+ Note that if there are any time series in v that match the data-label-selector (or the
+ default target_info if that argument is not specified), they will be treated as info series and
+ will be returned unchanged.
+
+
Limitations
From 8047b05b3c85b3fbb8d70f5b443ca5e3d25d9e69 Mon Sep 17 00:00:00 2001
From: George Krajcsovits
Date: Wed, 28 Jan 2026 08:20:06 +0100
Subject: [PATCH 085/165] chore(codeowners): promote matt-gp external github
user to owner of AWS SD (#17946)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Branch protection means they cannot merge PRs to main/release branches.
Branch protection means they cannot approve things outside their area for
PRs to main/release branches.
Also add sysadmind (Joe) as ower of aws, to make sure he gets notified.
Signed-off-by: György Krajcsovits
---
CODEOWNERS | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/CODEOWNERS b/CODEOWNERS
index 4982838376..2c5dedbffa 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -21,9 +21,8 @@
# Service discovery.
/discovery/kubernetes @prometheus/default-maintainers @brancz
/discovery/stackit @prometheus/default-maintainers @jkroepke
+/discovery/aws/ @prometheus/default-maintainers @matt-gp @sysadmind
# Pending
-# https://github.com/prometheus/prometheus/pull/17105#issuecomment-3248209452
-# /discovery/aws/ @prometheus/default-maintainers @matt-gp @sysadmind
# https://github.com/prometheus/prometheus/pull/15212#issuecomment-3575225179
# /discovery/aliyun @prometheus/default-maintainers @KeyOfSpectator
# https://github.com/prometheus/prometheus/pull/14108#issuecomment-2639515421
From 2597a120801f5e9bd573d43010325478c868a214 Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Wed, 28 Jan 2026 09:05:54 +0000
Subject: [PATCH 086/165] st: Add a hidden 'st-storage' feature flag for
PROM-60 (#17907)
Signed-off-by: bwplotka
Signed-off-by: Bartlomiej Plotka
---
cmd/prometheus/main.go | 57 ++++++++++++++++++++++++++++++++++--------
tsdb/agent/db.go | 5 ++++
tsdb/db.go | 5 ++++
3 files changed, 56 insertions(+), 11 deletions(-)
diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go
index e4f15f5cb8..02808bd652 100644
--- a/cmd/prometheus/main.go
+++ b/cmd/prometheus/main.go
@@ -265,13 +265,26 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
case "ooo-native-histograms":
logger.Warn("This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o)
case "created-timestamp-zero-ingestion":
+ // NOTE(bwplotka): Once AppendableV1 is removed, there will be only the TSDB and agent flags.
c.scrape.EnableStartTimestampZeroIngestion = true
c.web.STZeroIngestionEnabled = true
+ c.tsdb.EnableSTAsZeroSample = true
c.agent.EnableSTAsZeroSample = true
+
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
+ // This is to widen the ST support surface.
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
- logger.Info("Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
+ logger.Info("Experimental start timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
+ case "st-storage":
+ // TODO(bwplotka): Implement ST Storage as per PROM-60 and document this hidden feature flag.
+ c.tsdb.EnableSTStorage = true
+ c.agent.EnableSTStorage = true
+
+ // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. This is to widen the ST support surface.
+ config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
+ config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
+ logger.Info("Experimental start timestamp storage enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
case "delayed-compaction":
c.tsdb.EnableDelayedCompaction = true
logger.Info("Experimental delayed compaction is enabled.")
@@ -872,16 +885,29 @@ func main() {
os.Exit(1)
}
- scrapeManager, err := scrape.NewManager(
- &cfg.scrape,
- logger.With("component", "scrape manager"),
- logging.NewJSONFileLogger,
- fanoutStorage, nil, // TODO(bwplotka): Switch to AppendableV2.
- prometheus.DefaultRegisterer,
- )
- if err != nil {
- logger.Error("failed to create a scrape manager", "err", err)
- os.Exit(1)
+ var scrapeManager *scrape.Manager
+ {
+ // TODO(bwplotka): Switch to AppendableV2 by default.
+ // See: https://github.com/prometheus/prometheus/issues/17632
+ var (
+ scrapeAppendable storage.Appendable = fanoutStorage
+ scrapeAppendableV2 storage.AppendableV2
+ )
+ if cfg.tsdb.EnableSTStorage {
+ scrapeAppendable = nil
+ scrapeAppendableV2 = fanoutStorage
+ }
+ scrapeManager, err = scrape.NewManager(
+ &cfg.scrape,
+ logger.With("component", "scrape manager"),
+ logging.NewJSONFileLogger,
+ scrapeAppendable, scrapeAppendableV2,
+ prometheus.DefaultRegisterer,
+ )
+ if err != nil {
+ logger.Error("failed to create a scrape manager", "err", err)
+ os.Exit(1)
+ }
}
var (
@@ -1368,6 +1394,8 @@ func main() {
"WALSegmentSize", cfg.tsdb.WALSegmentSize,
"WALCompressionType", cfg.tsdb.WALCompressionType,
"BlockReloadInterval", cfg.tsdb.BlockReloadInterval,
+ "EnableSTAsZeroSample", cfg.tsdb.EnableSTAsZeroSample,
+ "EnableSTStorage", cfg.tsdb.EnableSTStorage,
)
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
@@ -1425,6 +1453,7 @@ func main() {
"MaxWALTime", cfg.agent.MaxWALTime,
"OutOfOrderTimeWindow", cfg.agent.OutOfOrderTimeWindow,
"EnableSTAsZeroSample", cfg.agent.EnableSTAsZeroSample,
+ "EnableSTStorage", cfg.tsdb.EnableSTStorage,
)
localStorage.Set(db, 0)
@@ -1944,6 +1973,8 @@ type tsdbOptions struct {
UseUncachedIO bool
BlockCompactionExcludeFunc tsdb.BlockExcludeFilterFunc
BlockReloadInterval model.Duration
+ EnableSTAsZeroSample bool
+ EnableSTStorage bool
StaleSeriesCompactionThreshold float64
}
@@ -1971,6 +2002,8 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
BlockCompactionExcludeFunc: opts.BlockCompactionExcludeFunc,
BlockReloadInterval: time.Duration(opts.BlockReloadInterval),
FeatureRegistry: features.DefaultRegistry,
+ EnableSTAsZeroSample: opts.EnableSTAsZeroSample,
+ EnableSTStorage: opts.EnableSTStorage,
StaleSeriesCompactionThreshold: opts.StaleSeriesCompactionThreshold,
}
}
@@ -1986,6 +2019,7 @@ type agentOptions struct {
NoLockfile bool
OutOfOrderTimeWindow int64 // TODO(bwplotka): Unused option, fix it or remove.
EnableSTAsZeroSample bool
+ EnableSTStorage bool
}
func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Options {
@@ -2002,6 +2036,7 @@ func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Option
NoLockfile: opts.NoLockfile,
OutOfOrderTimeWindow: outOfOrderTimeWindow,
EnableSTAsZeroSample: opts.EnableSTAsZeroSample,
+ EnableSTStorage: opts.EnableSTStorage,
}
}
diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go
index 1b29b223d7..460ceb7c04 100644
--- a/tsdb/agent/db.go
+++ b/tsdb/agent/db.go
@@ -92,6 +92,11 @@ type Options struct {
// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
// is implemented.
EnableSTAsZeroSample bool
+
+ // EnableSTStorage determines whether agent DB should write a Start Timestamp (ST)
+ // per sample to WAL.
+ // TODO(bwplotka): Implement this option as per PROM-60, currently it's noop.
+ EnableSTStorage bool
}
// DefaultOptions used for the WAL storage. They are reasonable for setups using
diff --git a/tsdb/db.go b/tsdb/db.go
index 1dd524a76a..e8ab300397 100644
--- a/tsdb/db.go
+++ b/tsdb/db.go
@@ -235,6 +235,11 @@ type Options struct {
// is implemented.
EnableSTAsZeroSample bool
+ // EnableSTStorage determines whether TSDB should write a Start Timestamp (ST)
+ // per sample to WAL.
+ // TODO(bwplotka): Implement this option as per PROM-60, currently it's noop.
+ EnableSTStorage bool
+
// EnableMetadataWALRecords represents 'metadata-wal-records' feature flag.
// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
// is implemented.
From 97e7ef802cd2789aeb0feb42cb67a1579ec44b1a Mon Sep 17 00:00:00 2001
From: Callum Styan
Date: Wed, 28 Jan 2026 03:47:34 -0800
Subject: [PATCH 087/165] remote write: simplify readability of timeseries
filtering by using the slices package (#14318)
* simplify readability of timeseries filtering by using the slices package
Signed-off-by: Callum Styan
* ensure that BenchmarkBuildTimeSeries doesn't account for the building of
the actual proto in the benchmark results, we only care about the
buildTimeSeries call
Signed-off-by: Callum Styan
---------
Signed-off-by: Callum Styan
---
storage/remote/queue_manager.go | 18 ++++++------------
storage/remote/queue_manager_test.go | 10 ++++++++--
2 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go
index 2b26179e58..63cdfb36f4 100644
--- a/storage/remote/queue_manager.go
+++ b/storage/remote/queue_manager.go
@@ -19,6 +19,7 @@ import (
"fmt"
"log/slog"
"math"
+ "slices"
"strconv"
"sync"
"time"
@@ -2105,12 +2106,11 @@ func setAtomicToNewer(value *atomic.Int64, newValue int64) (previous int64, upda
func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeries) bool) ([]prompb.TimeSeries, *timeSeriesStats) {
stats := newTimeSeriesStats()
- keepIdx := 0
- for i, ts := range timeSeries {
+ timeSeries = slices.DeleteFunc(timeSeries, func(ts prompb.TimeSeries) bool {
if filter != nil && filter(ts) {
stats.recordDropped(len(ts.Samples) > 0, len(ts.Exemplars) > 0, len(ts.Histograms) > 0)
- continue
+ return true
}
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
@@ -2123,16 +2123,10 @@ func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeri
if len(ts.Histograms) > 0 {
stats.updateTimestamp(ts.Histograms[0].Timestamp)
}
+ return false
+ })
- if i != keepIdx {
- // We have to swap the kept timeseries with the one which should be dropped.
- // Copying any elements within timeSeries could cause data corruptions when reusing the slice in a next batch (shards.populateTimeSeries).
- timeSeries[keepIdx], timeSeries[i] = timeSeries[i], timeSeries[keepIdx]
- }
- keepIdx++
- }
-
- return timeSeries[:keepIdx], stats
+ return timeSeries, stats
}
func buildWriteRequest(logger *slog.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, filter func(prompb.TimeSeries) bool, buf compression.EncodeBuffer, compr compression.Type) (_ []byte, highest, lowest int64, _ error) {
diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go
index f1462b4406..a4b05d387a 100644
--- a/storage/remote/queue_manager_test.go
+++ b/storage/remote/queue_manager_test.go
@@ -871,7 +871,7 @@ func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([
return samples, series
}
-func createProtoTimeseriesWithOld(numSamples, baseTs int64, _ ...labels.Label) []prompb.TimeSeries {
+func createProtoTimeseriesWithOld(numSamples, baseTs int64) []prompb.TimeSeries {
samples := make([]prompb.TimeSeries, numSamples)
// use a fixed rand source so tests are consistent
r := rand.New(rand.NewSource(99))
@@ -2365,8 +2365,14 @@ func BenchmarkBuildTimeSeries(b *testing.B) {
// Send one sample per series, which is the typical remote_write case
const numSamples = 10000
filter := func(ts prompb.TimeSeries) bool { return filterTsLimit(99, ts) }
+ originalSamples := createProtoTimeseriesWithOld(numSamples, 100)
+
+ b.ReportAllocs()
for b.Loop() {
- samples := createProtoTimeseriesWithOld(numSamples, 100, extraLabels...)
+ b.StopTimer()
+ samples := make([]prompb.TimeSeries, len(originalSamples))
+ copy(samples, originalSamples)
+ b.StartTimer()
result, _ := buildTimeSeries(samples, filter)
require.NotNil(b, result)
}
From dc34b90f93bf0265187ccec1dabdcc3db1a87ce0 Mon Sep 17 00:00:00 2001
From: Arve Knudsen
Date: Wed, 28 Jan 2026 13:58:50 +0100
Subject: [PATCH 088/165] otlptranslator: fix silently swallowed error in
addSumNumberDataPoints (#17954)
The createAttributes error was incorrectly returning nil instead of err,
causing errors to be silently discarded. This could lead to silent data
loss for sum metrics during OTLP ingestion.
Fixes #17953
Signed-off-by: Arve Knudsen
---
.../otlptranslator/prometheusremotewrite/number_data_points.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
index 65d4fd70b2..e681bb352b 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
@@ -86,7 +86,7 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo
meta.MetricFamilyName,
)
if err != nil {
- return nil
+ return err
}
var val float64
switch pt.ValueType() {
From 00a7faa2e3cbef625faff9236cdabd877243f35b Mon Sep 17 00:00:00 2001
From: Arve Knudsen
Date: Thu, 29 Jan 2026 08:06:00 +0100
Subject: [PATCH 089/165] tsdb: fix division by zero in stale series compaction
(#17952)
Guard the stale series ratio calculation by checking numSeries > 0
before computing the ratio. This prevents division by zero when
the head has no series.
Fixes #17949
Signed-off-by: Arve Knudsen
---
tsdb/db.go | 29 +++++++++++++++--------------
tsdb/db_test.go | 24 ++++++++++++++++++++++++
2 files changed, 39 insertions(+), 14 deletions(-)
diff --git a/tsdb/db.go b/tsdb/db.go
index e8ab300397..c5da5b54a6 100644
--- a/tsdb/db.go
+++ b/tsdb/db.go
@@ -1172,22 +1172,23 @@ func (db *DB) run(ctx context.Context) {
db.head.mmapHeadChunks()
numStaleSeries, numSeries := db.Head().NumStaleSeries(), db.Head().NumSeries()
- staleSeriesRatio := float64(numStaleSeries) / float64(numSeries)
- if db.autoCompact && db.opts.staleSeriesCompactionThreshold.Load() > 0 &&
- staleSeriesRatio >= db.opts.staleSeriesCompactionThreshold.Load() {
- nextCompactionIsSoon := false
- if !db.lastHeadCompactionTime.IsZero() {
- compactionInterval := time.Duration(db.head.chunkRange.Load()) * time.Millisecond
- nextEstimatedCompactionTime := db.lastHeadCompactionTime.Add(compactionInterval)
- if time.Now().Add(10 * time.Minute).After(nextEstimatedCompactionTime) {
- // Next compaction is starting within next 10 mins.
- nextCompactionIsSoon = true
+ if db.autoCompact && numSeries > 0 && db.opts.staleSeriesCompactionThreshold.Load() > 0 {
+ staleSeriesRatio := float64(numStaleSeries) / float64(numSeries)
+ if staleSeriesRatio >= db.opts.staleSeriesCompactionThreshold.Load() {
+ nextCompactionIsSoon := false
+ if !db.lastHeadCompactionTime.IsZero() {
+ compactionInterval := time.Duration(db.head.chunkRange.Load()) * time.Millisecond
+ nextEstimatedCompactionTime := db.lastHeadCompactionTime.Add(compactionInterval)
+ if time.Now().Add(10 * time.Minute).After(nextEstimatedCompactionTime) {
+ // Next compaction is starting within next 10 mins.
+ nextCompactionIsSoon = true
+ }
}
- }
- if !nextCompactionIsSoon {
- if err := db.CompactStaleHead(); err != nil {
- db.logger.Error("immediate stale series compaction failed", "err", err)
+ if !nextCompactionIsSoon {
+ if err := db.CompactStaleHead(); err != nil {
+ db.logger.Error("immediate stale series compaction failed", "err", err)
+ }
}
}
}
diff --git a/tsdb/db_test.go b/tsdb/db_test.go
index 2dbcb11645..403ce3636a 100644
--- a/tsdb/db_test.go
+++ b/tsdb/db_test.go
@@ -9561,3 +9561,27 @@ func TestStaleSeriesCompaction(t *testing.T) {
verifyHeadBlock()
}
}
+
+// TestStaleSeriesCompactionWithZeroSeries verifies that CompactStaleHead handles
+// an empty head (0 series) gracefully without division by zero or incorrectly
+// triggering compaction. This is a regression test for issue #17949.
+func TestStaleSeriesCompactionWithZeroSeries(t *testing.T) {
+ opts := DefaultOptions()
+ opts.MinBlockDuration = 1000
+ opts.MaxBlockDuration = 1000
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+ t.Cleanup(func() {
+ require.NoError(t, db.Close())
+ })
+
+ // Verify the head is empty.
+ require.Equal(t, uint64(0), db.Head().NumSeries())
+ require.Equal(t, uint64(0), db.Head().NumStaleSeries())
+
+ // CompactStaleHead should handle zero series gracefully (no panic, no error).
+ require.NoError(t, db.CompactStaleHead())
+
+ // Should still have no blocks since there was nothing to compact.
+ require.Empty(t, db.Blocks())
+}
From 020a0b30a0817e0027770ef324a8f4f30a577ba8 Mon Sep 17 00:00:00 2001
From: Arve Knudsen
Date: Thu, 29 Jan 2026 08:07:32 +0100
Subject: [PATCH 090/165] notifier: fix flaky TestStop_DrainingEnabled and
TestStop_DrainingDisabled race conditions (#17938)
Fix flaky TestStop_DrainingEnabled and TestStop_DrainingDisabled tests.
The tests used real HTTP servers and real time, making them susceptible to
race conditions and timing-dependent failures.
The solution is to convert both tests to use synctest for deterministic fake time.
---------
Signed-off-by: Arve Knudsen
---
notifier/manager_test.go | 242 ++++++++++++++++++---------------------
1 file changed, 112 insertions(+), 130 deletions(-)
diff --git a/notifier/manager_test.go b/notifier/manager_test.go
index ba1d578d99..d7108c1628 100644
--- a/notifier/manager_test.go
+++ b/notifier/manager_test.go
@@ -831,171 +831,153 @@ func TestHangingNotifier(t *testing.T) {
}
func TestStop_DrainingDisabled(t *testing.T) {
- releaseReceiver := make(chan struct{})
- receiverReceivedRequest := make(chan struct{}, 2)
- alertsReceived := atomic.NewInt64(0)
+ synctest.Test(t, func(t *testing.T) {
+ const alertmanagerURL = "http://alertmanager:9093/api/v2/alerts"
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- // Let the test know we've received a request.
- receiverReceivedRequest <- struct{}{}
+ handlerStarted := make(chan struct{})
+ alertsReceived := atomic.NewInt64(0)
- var alerts []*Alert
+ // Fake Do function that simulates a hanging alertmanager that times out.
+ fakeDo := func(ctx context.Context, _ *http.Client, req *http.Request) (*http.Response, error) {
+ var alerts []*Alert
+ b, err := io.ReadAll(req.Body)
+ if err != nil {
+ return nil, fmt.Errorf("read request body: %w", err)
+ }
+ if err := json.Unmarshal(b, &alerts); err != nil {
+ return nil, fmt.Errorf("unmarshal request body: %w", err)
+ }
+ alertsReceived.Add(int64(len(alerts)))
- b, err := io.ReadAll(r.Body)
- require.NoError(t, err)
+ // Signal arrival, then block until context times out.
+ handlerStarted <- struct{}{}
+ <-ctx.Done()
- err = json.Unmarshal(b, &alerts)
- require.NoError(t, err)
+ return nil, ctx.Err()
+ }
- alertsReceived.Add(int64(len(alerts)))
+ reg := prometheus.NewRegistry()
+ m := NewManager(
+ &Options{
+ QueueCapacity: 10,
+ DrainOnShutdown: false,
+ Registerer: reg,
+ Do: fakeDo,
+ },
+ model.UTF8Validation,
+ nil,
+ )
- // Wait for the test to release us.
- <-releaseReceiver
+ m.alertmanagers = make(map[string]*alertmanagerSet)
- w.WriteHeader(http.StatusOK)
- }))
- defer func() {
- server.Close()
- }()
+ am1Cfg := config.DefaultAlertmanagerConfig
+ am1Cfg.Timeout = model.Duration(time.Second)
+ m.alertmanagers["1"] = newTestAlertmanagerSet(&am1Cfg, nil, m.opts, m.metrics, alertmanagerURL)
- reg := prometheus.NewRegistry()
- m := NewManager(
- &Options{
- QueueCapacity: 10,
- DrainOnShutdown: false,
- Registerer: reg,
- },
- model.UTF8Validation,
- nil,
- )
+ for _, ams := range m.alertmanagers {
+ ams.startSendLoops(ams.ams)
+ }
- m.alertmanagers = make(map[string]*alertmanagerSet)
+ // This will be waited on automatically when synctest.Test exits.
+ go m.Run(nil)
- am1Cfg := config.DefaultAlertmanagerConfig
- am1Cfg.Timeout = model.Duration(time.Second)
- m.alertmanagers["1"] = newTestAlertmanagerSet(&am1Cfg, nil, m.opts, m.metrics, server.URL)
+ // Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later.
+ m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
- for _, ams := range m.alertmanagers {
- ams.startSendLoops(ams.ams)
- }
+ // Wait for receiver to get the request.
+ <-handlerStarted
- notificationManagerStopped := make(chan struct{})
+ m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
- go func() {
- defer close(notificationManagerStopped)
- m.Run(nil)
- }()
+ // Stop the notification manager, then advance time to trigger the request timeout.
+ m.Stop()
+ time.Sleep(time.Second)
- // Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later.
- m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
+ // Allow goroutines to finish.
+ synctest.Wait()
- select {
- case <-receiverReceivedRequest:
- // Nothing more to do.
- case <-time.After(time.Second):
- require.FailNow(t, "gave up waiting for receiver to receive notification of first alert")
- }
-
- m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
-
- // Stop the notification manager, pause to allow the shutdown to be observed, and then allow the receiver to proceed.
- m.Stop()
- time.Sleep(time.Second)
- close(releaseReceiver)
-
- // Wait for the notification manager to stop and confirm only the first notification was sent.
- // The second notification should be dropped.
- select {
- case <-notificationManagerStopped:
- // Nothing more to do.
- case <-time.After(time.Second):
- require.FailNow(t, "gave up waiting for notification manager to stop")
- }
-
- require.Equal(t, int64(1), alertsReceived.Load())
+ // Confirm only the first notification was sent. The second notification should be dropped.
+ require.Equal(t, int64(1), alertsReceived.Load())
+ })
}
func TestStop_DrainingEnabled(t *testing.T) {
- releaseReceiver := make(chan struct{})
- receiverReceivedRequest := make(chan struct{}, 2)
- alertsReceived := atomic.NewInt64(0)
+ synctest.Test(t, func(t *testing.T) {
+ const alertmanagerURL = "http://alertmanager:9093/api/v2/alerts"
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- var alerts []*Alert
+ handlerStarted := make(chan struct{}, 1)
+ alertsReceived := atomic.NewInt64(0)
- // Let the test know we've received a request.
- receiverReceivedRequest <- struct{}{}
+ // Fake Do function that simulates alertmanager responding slowly but successfully.
+ fakeDo := func(_ context.Context, _ *http.Client, req *http.Request) (*http.Response, error) {
+ var alerts []*Alert
+ b, err := io.ReadAll(req.Body)
+ if err != nil {
+ return nil, fmt.Errorf("read request body: %w", err)
+ }
+ if err := json.Unmarshal(b, &alerts); err != nil {
+ return nil, fmt.Errorf("unmarshal request body: %w", err)
+ }
+ alertsReceived.Add(int64(len(alerts)))
- b, err := io.ReadAll(r.Body)
- require.NoError(t, err)
+ // Signal arrival.
+ handlerStarted <- struct{}{}
- err = json.Unmarshal(b, &alerts)
- require.NoError(t, err)
+ // Block to allow for alert-2 to be queued while this request is in-flight.
+ time.Sleep(100 * time.Millisecond)
- alertsReceived.Add(int64(len(alerts)))
+ return &http.Response{
+ StatusCode: http.StatusOK,
+ Body: io.NopCloser(bytes.NewBuffer(nil)),
+ }, nil
+ }
- // Wait for the test to release us.
- <-releaseReceiver
+ reg := prometheus.NewRegistry()
+ m := NewManager(
+ &Options{
+ QueueCapacity: 10,
+ DrainOnShutdown: true,
+ Registerer: reg,
+ Do: fakeDo,
+ },
+ model.UTF8Validation,
+ nil,
+ )
- w.WriteHeader(http.StatusOK)
- }))
- defer func() {
- server.Close()
- }()
+ m.alertmanagers = make(map[string]*alertmanagerSet)
- reg := prometheus.NewRegistry()
- m := NewManager(
- &Options{
- QueueCapacity: 10,
- DrainOnShutdown: true,
- Registerer: reg,
- },
- model.UTF8Validation,
- nil,
- )
+ am1Cfg := config.DefaultAlertmanagerConfig
+ am1Cfg.Timeout = model.Duration(time.Second)
+ m.alertmanagers["1"] = newTestAlertmanagerSet(&am1Cfg, nil, m.opts, m.metrics, alertmanagerURL)
- m.alertmanagers = make(map[string]*alertmanagerSet)
+ for _, ams := range m.alertmanagers {
+ ams.startSendLoops(ams.ams)
+ }
- am1Cfg := config.DefaultAlertmanagerConfig
- am1Cfg.Timeout = model.Duration(time.Second)
- m.alertmanagers["1"] = newTestAlertmanagerSet(&am1Cfg, nil, m.opts, m.metrics, server.URL)
+ go m.Run(nil)
- for _, ams := range m.alertmanagers {
- ams.startSendLoops(ams.ams)
- }
+ // Queue two alerts. The first should be immediately sent to the receiver.
+ m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
- notificationManagerStopped := make(chan struct{})
+ // Wait for receiver to get the first request.
+ <-handlerStarted
- go func() {
- defer close(notificationManagerStopped)
- m.Run(nil)
- }()
+ // Send second alert while first is still being processed (fakeDo has 100ms delay).
+ m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
- // Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later.
- m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
+ // Stop the notification manager. With DrainOnShutdown=true, this should wait
+ // for the queue to drain, ensuring both alerts are sent.
+ m.Stop()
- select {
- case <-receiverReceivedRequest:
- // Nothing more to do.
- case <-time.After(time.Second):
- require.FailNow(t, "gave up waiting for receiver to receive notification of first alert")
- }
+ // Advance time so in-flight requests complete.
+ time.Sleep(time.Second)
- m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
+ // Allow goroutines to finish.
+ synctest.Wait()
- // Stop the notification manager and allow the receiver to proceed.
- m.Stop()
- close(releaseReceiver)
-
- // Wait for the notification manager to stop and confirm both notifications were sent.
- select {
- case <-notificationManagerStopped:
- // Nothing more to do.
- case <-time.After(200 * time.Millisecond):
- require.FailNow(t, "gave up waiting for notification manager to stop")
- }
-
- require.Equal(t, int64(2), alertsReceived.Load())
+ // Confirm both notifications were sent.
+ require.Equal(t, int64(2), alertsReceived.Load())
+ })
}
// TestQueuesDrainingOnApplyConfig ensures that when an alertmanagerSet disappears after an ApplyConfig(), its
From 36ea75d20336ef5b54a85e957c223c47e6f5783f Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Thu, 29 Jan 2026 10:50:17 +0000
Subject: [PATCH 091/165] scrape: fix flaky appender test (#17962)
Fixes https://github.com/prometheus/prometheus/issues/17941
Signed-off-by: bwplotka
---
scrape/manager.go | 3 +++
scrape/manager_test.go | 20 ++++++++++----------
2 files changed, 13 insertions(+), 10 deletions(-)
diff --git a/scrape/manager.go b/scrape/manager.go
index aafd8c1931..24a63b056b 100644
--- a/scrape/manager.go
+++ b/scrape/manager.go
@@ -65,6 +65,9 @@ func NewManager(
if appendable != nil && appendableV2 != nil {
return nil, errors.New("scrape.NewManager: appendable and appendableV2 cannot be provided at the same time")
}
+ if appendable == nil && appendableV2 == nil {
+ return nil, errors.New("scrape.NewManager: provide either appendable or appendableV2")
+ }
sm, err := newScrapeMetrics(registerer)
if err != nil {
diff --git a/scrape/manager_test.go b/scrape/manager_test.go
index 288f1d678d..395cc98a82 100644
--- a/scrape/manager_test.go
+++ b/scrape/manager_test.go
@@ -522,7 +522,7 @@ scrape_configs:
)
opts := Options{}
- scrapeManager, err := NewManager(&opts, nil, nil, nil, nil, testRegistry)
+ scrapeManager, err := NewManager(&opts, nil, nil, nil, teststorage.NewAppendable(), testRegistry)
require.NoError(t, err)
newLoop := func(scrapeLoopOptions) loop {
ch <- struct{}{}
@@ -578,7 +578,7 @@ scrape_configs:
func TestManagerTargetsUpdates(t *testing.T) {
opts := Options{}
testRegistry := prometheus.NewRegistry()
- m, err := NewManager(&opts, nil, nil, nil, nil, testRegistry)
+ m, err := NewManager(&opts, nil, nil, nil, teststorage.NewAppendable(), testRegistry)
require.NoError(t, err)
targetSetsCh := make(chan map[string][]*targetgroup.Group)
@@ -631,7 +631,7 @@ global:
opts := Options{}
testRegistry := prometheus.NewRegistry()
- scrapeManager, err := NewManager(&opts, nil, nil, nil, nil, testRegistry)
+ scrapeManager, err := NewManager(&opts, nil, nil, nil, teststorage.NewAppendable(), testRegistry)
require.NoError(t, err)
// Load the first config.
@@ -701,7 +701,7 @@ scrape_configs:
}
opts := Options{}
- scrapeManager, err := NewManager(&opts, nil, nil, nil, nil, testRegistry)
+ scrapeManager, err := NewManager(&opts, nil, nil, nil, teststorage.NewAppendable(), testRegistry)
require.NoError(t, err)
reload(scrapeManager, cfg1)
@@ -1034,7 +1034,7 @@ func TestUnregisterMetrics(t *testing.T) {
// Check that all metrics can be unregistered, allowing a second manager to be created.
for range 2 {
opts := Options{}
- manager, err := NewManager(&opts, nil, nil, nil, nil, reg)
+ manager, err := NewManager(&opts, nil, nil, nil, teststorage.NewAppendable(), reg)
require.NotNil(t, manager)
require.NoError(t, err)
// Unregister all metrics.
@@ -1255,7 +1255,7 @@ scrape_configs:
- files: ['%s']
`
- discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil, nil)
+ discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil, teststorage.NewAppendable())
defer scrapeManager.Stop()
applyConfig(
@@ -1354,7 +1354,7 @@ scrape_configs:
file_sd_configs:
- files: ['%s', '%s']
`
- discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil, nil)
+ discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil, teststorage.NewAppendable())
defer scrapeManager.Stop()
applyConfig(
@@ -1413,7 +1413,7 @@ scrape_configs:
file_sd_configs:
- files: ['%s']
`
- discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil, nil)
+ discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil, teststorage.NewAppendable())
defer scrapeManager.Stop()
applyConfig(
@@ -1479,7 +1479,7 @@ scrape_configs:
- targets: ['%s']
`
- discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil, nil)
+ discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil, teststorage.NewAppendable())
defer scrapeManager.Stop()
// Apply the initial config with an existing file
@@ -1563,7 +1563,7 @@ scrape_configs:
cfg := loadConfiguration(t, cfgText)
- m, err := NewManager(&Options{}, nil, nil, nil, nil, prometheus.NewRegistry())
+ m, err := NewManager(&Options{}, nil, nil, nil, teststorage.NewAppendable(), prometheus.NewRegistry())
require.NoError(t, err)
defer m.Stop()
require.NoError(t, m.ApplyConfig(cfg))
From 79b553499ac0d5a0b155e30dcb322a8a7ed2550e Mon Sep 17 00:00:00 2001
From: 1seal
Date: Thu, 29 Jan 2026 11:59:35 +0000
Subject: [PATCH 092/165] web/api: compute relabel_steps in single pass
Signed-off-by: 1seal
---
web/api/v1/api.go | 16 ++++++++----
web/api/v1/api_test.go | 56 +++++++++++++++++++++++++++++++++++++++---
2 files changed, 64 insertions(+), 8 deletions(-)
diff --git a/web/api/v1/api.go b/web/api/v1/api.go
index f32fee19f8..07ce482a40 100644
--- a/web/api/v1/api.go
+++ b/web/api/v1/api.go
@@ -1346,13 +1346,19 @@ func (api *API) targetRelabelSteps(r *http.Request) apiFuncResult {
rules := scrapeConfig.RelabelConfigs
steps := make([]RelabelStep, len(rules))
+ lb := labels.NewBuilder(lbls)
+ keep := true
for i, rule := range rules {
- outLabels, keep := relabel.Process(lbls, rules[:i+1]...)
- steps[i] = RelabelStep{
- Rule: rule,
- Output: outLabels,
- Keep: keep,
+ if keep {
+ keep = relabel.ProcessBuilder(lb, rule)
}
+
+ outLabels := labels.EmptyLabels()
+ if keep {
+ outLabels = lb.Labels()
+ }
+
+ steps[i] = RelabelStep{Rule: rule, Output: outLabels, Keep: keep}
}
return apiFuncResult{&RelabelStepsResponse{Steps: steps}, nil, nil, nil}
diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go
index 797182ce88..96d1cec531 100644
--- a/web/api/v1/api_test.go
+++ b/web/api/v1/api_test.go
@@ -166,8 +166,8 @@ func (t testTargetRetriever) TargetsDroppedCounts() map[string]int {
return r
}
-func (testTargetRetriever) ScrapePoolConfig(_ string) (*config.ScrapeConfig, error) {
- return &config.ScrapeConfig{
+func (testTargetRetriever) ScrapePoolConfig(pool string) (*config.ScrapeConfig, error) {
+ cfg := &config.ScrapeConfig{
RelabelConfigs: []*relabel.Config{
{
Action: relabel.Replace,
@@ -182,7 +182,16 @@ func (testTargetRetriever) ScrapePoolConfig(_ string) (*config.ScrapeConfig, err
Regex: relabel.MustNewRegexp(`example\.com:.*`),
},
},
- }, nil
+ }
+ if pool == "testpool3" {
+ cfg.RelabelConfigs = append(cfg.RelabelConfigs, &relabel.Config{
+ Action: relabel.Replace,
+ TargetLabel: "job",
+ Regex: relabel.MustNewRegexp(".*"),
+ Replacement: "should_not_apply",
+ })
+ }
+ return cfg, nil
}
func (t *testTargetRetriever) SetMetadataStoreForTargets(identifier string, metadata scrape.MetricMetadataStore) error {
@@ -1937,6 +1946,47 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, testLabelAPI
},
},
},
+ {
+ endpoint: api.targetRelabelSteps,
+ query: url.Values{"scrapePool": []string{"testpool3"}, "labels": []string{`{"job":"test","__address__":"localhost:9090"}`}},
+ response: &RelabelStepsResponse{
+ Steps: []RelabelStep{
+ {
+ Rule: &relabel.Config{
+ Action: relabel.Replace,
+ Replacement: "example.com:443",
+ TargetLabel: "__address__",
+ Regex: relabel.MustNewRegexp(""),
+ NameValidationScheme: model.LegacyValidation,
+ },
+ Output: labels.FromMap(map[string]string{
+ "job": "test",
+ "__address__": "example.com:443",
+ }),
+ Keep: true,
+ },
+ {
+ Rule: &relabel.Config{
+ Action: relabel.Drop,
+ SourceLabels: []model.LabelName{"__address__"},
+ Regex: relabel.MustNewRegexp(`example\.com:.*`),
+ },
+ Output: labels.EmptyLabels(),
+ Keep: false,
+ },
+ {
+ Rule: &relabel.Config{
+ Action: relabel.Replace,
+ TargetLabel: "job",
+ Regex: relabel.MustNewRegexp(".*"),
+ Replacement: "should_not_apply",
+ },
+ Output: labels.EmptyLabels(),
+ Keep: false,
+ },
+ },
+ },
+ },
// With a matching metric.
{
endpoint: api.targetMetadata,
From 75f94903b36d11b7b47dc3e0ebabab1ce3acabc7 Mon Sep 17 00:00:00 2001
From: Julien <291750+roidelapluie@users.noreply.github.com>
Date: Thu, 29 Jan 2026 13:36:13 +0100
Subject: [PATCH 093/165] Add OpenAPI 3.2 specification generation for
Prometheus HTTP API (#17825)
* Add OpenAPI 3.2 specification generation for Prometheus HTTP API
This commit introduces an OpenAPI specification for the Prometheus API.
After testing multiple code-generation servers with built-in APIs, this
implementation uses an independent spec file outside of the critical path.
This spec file is tested with a framework present in this pull request.
The specification helps clients know which parameters they can use and is
served at /api/v1/openapi.yaml. The spec file will evolve with the
Prometheus API and has the same version number.
Downstream projects can tune the APIs presented in the spec file with
configuration options using the IncludePaths setting for path filtering.
In the future, there is room to generate a server from this spec file
(e.g. with interfaces), but this is out of scope for this pull request.
Architecture:
- Core OpenAPI infrastructure (openapi.go): Dynamic spec building,
caching, and thread-safe spec generation
- Schema definitions (openapi_schemas.go): Complete type definitions
for all API request and response types
- Path specifications (openapi_paths.go): Endpoint definitions with
parameters, request bodies, and response schemas
- Examples (openapi_examples.go): Realistic request/response examples
- Helper functions (openapi_helpers.go): Reusable builders for common
OpenAPI structures
Testing:
- Comprehensive test suite with golden file validation
- Test helpers package for API testing infrastructure
- OpenAPI compliance validation utilities
The golden file captures the complete specification for snapshot testing.
Update with: go test -run TestOpenAPIGolden -update-openapi-spec
REVIEWERS: The most important thing to check would be the OpenAPI golden
file (web/api/v1/testdata/openapi_golden.yaml). Test scenarios are important
as they test the actual OpenAPI spec validity.
Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
* Add OpenAPI 3.1 support with version selection
Add support for both OpenAPI 3.1 and 3.2 specifications with version
selection via openapi_version query parameter. Defaults to 3.1 for
broader compatibility
Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
* Enhance OpenAPI examples and add helper functions
- Add timestampExamples helper for consistent time formatting
- Add exampleMap helper to simplify example creation
- Improve example summaries with query details
- Add matrix result example for range vector queries
Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
* web/api: Add AtST method to test helper iterators
Implement the AtST() method required by chunkenc.Iterator interface
for FakeSeriesIterator and FakeHistogramSeriesIterator test helpers.
The method returns 0 as these test helpers don't use start timestamps
Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
* OpenAPI: Add minimum coverage test
Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
* OpenAPI: Improve examples handling
Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
---------
Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
---
.gitattributes | 1 +
.golangci.yml | 2 +
.yamllint | 1 +
docs/querying/api.md | 16 +
documentation/examples/remote_storage/go.mod | 2 +-
go.mod | 15 +-
go.sum | 48 +
go.work | 2 +-
internal/tools/go.mod | 2 +-
web/api/testhelpers/api.go | 244 +
web/api/testhelpers/assertions.go | 252 +
web/api/testhelpers/fixtures.go | 178 +
web/api/testhelpers/mocks.go | 534 +++
web/api/testhelpers/openapi.go | 204 +
web/api/testhelpers/request.go | 145 +
web/api/v1/api.go | 8 +-
web/api/v1/api_scenarios_test.go | 419 ++
web/api/v1/errors_test.go | 1 +
web/api/v1/openapi.go | 320 ++
web/api/v1/openapi_coverage_test.go | 258 +
web/api/v1/openapi_examples.go | 1013 ++++
web/api/v1/openapi_golden_test.go | 176 +
web/api/v1/openapi_helpers.go | 343 ++
web/api/v1/openapi_paths.go | 626 +++
web/api/v1/openapi_schemas.go | 1223 +++++
web/api/v1/openapi_test.go | 289 ++
web/api/v1/test_helpers.go | 157 +
web/api/v1/testdata/openapi_3.1_golden.yaml | 4401 +++++++++++++++++
web/api/v1/testdata/openapi_3.2_golden.yaml | 4452 ++++++++++++++++++
web/ui/mantine-ui/src/promql/tools/go.mod | 2 +-
web/web.go | 9 +
web/web_test.go | 2 +
32 files changed, 15337 insertions(+), 8 deletions(-)
create mode 100644 .gitattributes
create mode 100644 web/api/testhelpers/api.go
create mode 100644 web/api/testhelpers/assertions.go
create mode 100644 web/api/testhelpers/fixtures.go
create mode 100644 web/api/testhelpers/mocks.go
create mode 100644 web/api/testhelpers/openapi.go
create mode 100644 web/api/testhelpers/request.go
create mode 100644 web/api/v1/api_scenarios_test.go
create mode 100644 web/api/v1/openapi.go
create mode 100644 web/api/v1/openapi_coverage_test.go
create mode 100644 web/api/v1/openapi_examples.go
create mode 100644 web/api/v1/openapi_golden_test.go
create mode 100644 web/api/v1/openapi_helpers.go
create mode 100644 web/api/v1/openapi_paths.go
create mode 100644 web/api/v1/openapi_schemas.go
create mode 100644 web/api/v1/openapi_test.go
create mode 100644 web/api/v1/test_helpers.go
create mode 100644 web/api/v1/testdata/openapi_3.1_golden.yaml
create mode 100644 web/api/v1/testdata/openapi_3.2_golden.yaml
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000..432caee6f7
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+web/api/v1/testdata/openapi_golden.yaml linguist-generated
diff --git a/.golangci.yml b/.golangci.yml
index 0c866611e9..599a5e2b49 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -124,6 +124,8 @@ linters:
# Disable this check for now since it introduces too many changes in our existing codebase.
# See https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#hdr-Analyzer_omitzero for more details.
- omitzero
+ # Disable waitgroup check until we really move to Go 1.25.
+ - waitgroup
perfsprint:
# Optimizes even if it requires an int or uint type cast.
int-conversion: true
diff --git a/.yamllint b/.yamllint
index 8d09c375fd..b329f464fb 100644
--- a/.yamllint
+++ b/.yamllint
@@ -2,6 +2,7 @@
extends: default
ignore: |
**/node_modules
+ web/api/v1/testdata/openapi_*_golden.yaml
rules:
braces:
diff --git a/docs/querying/api.md b/docs/querying/api.md
index 4891db8980..7324669699 100644
--- a/docs/querying/api.md
+++ b/docs/querying/api.md
@@ -6,6 +6,22 @@ sort_rank: 7
The current stable HTTP API is reachable under `/api/v1` on a Prometheus
server. Any non-breaking additions will be added under that endpoint.
+## OpenAPI Specification
+
+An OpenAPI specification for the HTTP API is available at `/api/v1/openapi.yaml`.
+By default, it returns OpenAPI 3.1 for broader compatibility. Use `?openapi_version=3.2`
+for OpenAPI 3.2, which includes advanced features and endpoints like `/api/v1/notifications/live`.
+
+This machine-readable specification describes all available endpoints, request parameters,
+response formats, and schemas.
+
+The OpenAPI specification can be used to:
+
+- Generate client libraries in various programming languages.
+- Validate API requests and responses.
+- Generate interactive API documentation.
+- Test API endpoints.
+
## Format overview
The API response format is JSON. Every successful API request returns a `2xx`
diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod
index 17076faddd..5f2cd98037 100644
--- a/documentation/examples/remote_storage/go.mod
+++ b/documentation/examples/remote_storage/go.mod
@@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/documentation/examples/remote_storage
-go 1.24.0
+go 1.25.0
require (
github.com/alecthomas/kingpin/v2 v2.4.0
diff --git a/go.mod b/go.mod
index afc3f2740d..0aa3658177 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/prometheus/prometheus
-go 1.24.0
+go 1.25.0
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
@@ -54,6 +54,8 @@ require (
github.com/oklog/ulid/v2 v2.1.1
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.142.0
github.com/ovh/go-ovh v1.9.0
+ github.com/pb33f/libopenapi v0.31.1
+ github.com/pb33f/libopenapi-validator v0.10.0
github.com/prometheus/alertmanager v0.30.0
github.com/prometheus/client_golang v1.23.2
github.com/prometheus/client_golang/exp v0.0.0-20260101091701-2cd067eb23c9
@@ -85,6 +87,7 @@ require (
go.uber.org/goleak v1.3.0
go.yaml.in/yaml/v2 v2.4.3
go.yaml.in/yaml/v3 v3.0.4
+ go.yaml.in/yaml/v4 v4.0.0-rc.3
golang.org/x/oauth2 v0.34.0
golang.org/x/sync v0.19.0
golang.org/x/sys v0.39.0
@@ -93,6 +96,7 @@ require (
google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b
google.golang.org/grpc v1.78.0
google.golang.org/protobuf v1.36.11
+ gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.34.3
k8s.io/apimachinery v0.34.3
k8s.io/client-go v0.34.3
@@ -102,6 +106,9 @@ require (
require (
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect
+ github.com/bahlo/generic-list-go v0.2.0 // indirect
+ github.com/basgys/goxml2json v1.1.1-0.20231018121955-e66ee54ceaad // indirect
+ github.com/buger/jsonparser v1.1.1 // indirect
github.com/go-openapi/swag/cmdutils v0.25.4 // indirect
github.com/go-openapi/swag/conv v0.25.4 // indirect
github.com/go-openapi/swag/fileutils v0.25.4 // indirect
@@ -113,8 +120,10 @@ require (
github.com/go-openapi/swag/stringutils v0.25.4 // indirect
github.com/go-openapi/swag/typeutils v0.25.4 // indirect
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
+ github.com/pb33f/jsonpath v0.7.0 // indirect
+ github.com/pb33f/ordered-map/v2 v2.3.0 // indirect
+ github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
- gopkg.in/yaml.v3 v3.0.1 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)
@@ -237,7 +246,7 @@ require (
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
+ gopkg.in/yaml.v2 v2.4.0
gotest.tools/v3 v3.0.3 // indirect
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
diff --git a/go.sum b/go.sum
index 6ac2105275..280724445a 100644
--- a/go.sum
+++ b/go.sum
@@ -81,6 +81,10 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk=
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
+github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
+github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
+github.com/basgys/goxml2json v1.1.1-0.20231018121955-e66ee54ceaad h1:3swAvbzgfaI6nKuDDU7BiKfZRdF+h2ZwKgMHd8Ha4t8=
+github.com/basgys/goxml2json v1.1.1-0.20231018121955-e66ee54ceaad/go.mod h1:9+nBLYNWkvPcq9ep0owWUsPTLgL9ZXTsZWcCSVGGLJ0=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -88,6 +92,10 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bitly/go-simplejson v0.5.1 h1:xgwPbetQScXt1gh9BmoJ6j9JMr3TElvuIyjR8pgdoow=
+github.com/bitly/go-simplejson v0.5.1/go.mod h1:YOPVLzCfwK14b4Sff3oP1AmGhI9T9Vsg84etUnlyp+Q=
+github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -116,6 +124,8 @@ github.com/digitalocean/godo v1.171.0 h1:QwpkwWKr3v7yxc8D4NQG973NoR9APCEWjYnLOQe
github.com/digitalocean/godo v1.171.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
+github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@@ -437,6 +447,14 @@ github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pb33f/jsonpath v0.7.0 h1:3oG6yu1RqNoMZpqnRjBMqi8fSIXWoDAKDrsB0QGTcoU=
+github.com/pb33f/jsonpath v0.7.0/go.mod h1:/+JlSIjWA2ijMVYGJ3IQPF4Q1nLMYbUTYNdk0exCDPQ=
+github.com/pb33f/libopenapi v0.31.1 h1:smGr45U2Y+hHWYKiEV13oS2tP9IUnscqNb5qsvT9+YI=
+github.com/pb33f/libopenapi v0.31.1/go.mod h1:oaebeA5l58AFbZ7qRKTtMnu15JEiPlaBas1vLDcw9vs=
+github.com/pb33f/libopenapi-validator v0.10.0 h1:9XhgxW2jTDd+1aDMuIjGUsWaeUaPi5ql2z1Y+WBltiE=
+github.com/pb33f/libopenapi-validator v0.10.0/go.mod h1:hW3wIpg4YCxLrJxyTrfrzP9Mtt9FvbD/nm0yemUcjSs=
+github.com/pb33f/ordered-map/v2 v2.3.0 h1:k2OhVEQkhTCQMhAicQ3Z6iInzoZNQ7L9MVomwKBZ5WQ=
+github.com/pb33f/ordered-map/v2 v2.3.0/go.mod h1:oe5ue+6ZNhy7QN9cPZvPA23Hx0vMHnNVeMg4fGdCANw=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
@@ -491,6 +509,8 @@ github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPK
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ=
+github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36 h1:ObX9hZmK+VmijreZO/8x9pQ8/P/ToHD/bdSb4Eg4tUo=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36/go.mod h1:LEsDu4BubxK7/cWhtlQWfuxwL4rf/2UEpxXz1o1EMtM=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
@@ -517,6 +537,7 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
@@ -533,6 +554,7 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss=
go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
@@ -620,12 +642,16 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/exp v0.0.0-20250808145144-a408d31f581a h1:Y+7uR/b1Mw2iSXZ3G//1haIiSElDQZ8KWh0h+sZPG90=
golang.org/x/exp v0.0.0-20250808145144-a408d31f581a/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -638,6 +664,10 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
@@ -648,6 +678,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -667,23 +699,37 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
@@ -694,6 +740,8 @@ golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/tools/godoc v0.1.0-deprecated h1:o+aZ1BOj6Hsx/GBdJO/s815sqftjSnrZZwyYTHODvtk=
diff --git a/go.work b/go.work
index fbb73655e9..c5ba5dfad6 100644
--- a/go.work
+++ b/go.work
@@ -1,4 +1,4 @@
-go 1.24.0
+go 1.25.0
use (
.
diff --git a/internal/tools/go.mod b/internal/tools/go.mod
index c8b62b5ca7..5238fca024 100644
--- a/internal/tools/go.mod
+++ b/internal/tools/go.mod
@@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/internal/tools
-go 1.24.0
+go 1.25.0
require (
github.com/bufbuild/buf v1.62.1
diff --git a/web/api/testhelpers/api.go b/web/api/testhelpers/api.go
new file mode 100644
index 0000000000..07d7003b5c
--- /dev/null
+++ b/web/api/testhelpers/api.go
@@ -0,0 +1,244 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package testhelpers provides utilities for testing the Prometheus HTTP API.
+// This file contains helper functions for creating test API instances and managing test lifecycles.
+package testhelpers
+
+import (
+ "context"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/promslog"
+
+ "github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/promql"
+ "github.com/prometheus/prometheus/promql/promqltest"
+ "github.com/prometheus/prometheus/rules"
+ "github.com/prometheus/prometheus/scrape"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb"
+ "github.com/prometheus/prometheus/util/notifications"
+)
+
+// RulesRetriever provides a list of active rules and alerts.
+type RulesRetriever interface {
+ RuleGroups() []*rules.Group
+ AlertingRules() []*rules.AlertingRule
+}
+
+// TargetRetriever provides the list of active/dropped targets to scrape or not.
+type TargetRetriever interface {
+ TargetsActive() map[string][]*scrape.Target
+ TargetsDropped() map[string][]*scrape.Target
+ TargetsDroppedCounts() map[string]int
+ ScrapePoolConfig(string) (*config.ScrapeConfig, error)
+}
+
+// ScrapePoolsRetriever provide the list of all scrape pools.
+type ScrapePoolsRetriever interface {
+ ScrapePools() []string
+}
+
+// AlertmanagerRetriever provides a list of all/dropped AlertManager URLs.
+type AlertmanagerRetriever interface {
+ Alertmanagers() []*url.URL
+ DroppedAlertmanagers() []*url.URL
+}
+
+// TSDBAdminStats provides TSDB admin statistics.
+type TSDBAdminStats interface {
+ CleanTombstones() error
+ Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error
+ Snapshot(dir string, withHead bool) error
+ Stats(statsByLabelName string, limit int) (*tsdb.Stats, error)
+ WALReplayStatus() (tsdb.WALReplayStatus, error)
+ BlockMetas() ([]tsdb.BlockMeta, error)
+}
+
+// APIConfig holds configuration for creating a test API instance.
+type APIConfig struct {
+ // Core dependencies.
+ QueryEngine *LazyLoader[promql.QueryEngine]
+ Queryable *LazyLoader[storage.SampleAndChunkQueryable]
+ ExemplarQueryable *LazyLoader[storage.ExemplarQueryable]
+
+ // Retrievers.
+ RulesRetriever *LazyLoader[RulesRetriever]
+ TargetRetriever *LazyLoader[TargetRetriever]
+ ScrapePoolsRetriever *LazyLoader[ScrapePoolsRetriever]
+ AlertmanagerRetriever *LazyLoader[AlertmanagerRetriever]
+
+ // Admin.
+ TSDBAdmin *LazyLoader[TSDBAdminStats]
+ DBDir string
+
+ // Optional overrides.
+ Config func() config.Config
+ FlagsMap map[string]string
+ Now func() time.Time
+}
+
+// APIWrapper wraps the API and provides a handler for testing.
+type APIWrapper struct {
+ Handler http.Handler
+}
+
+// PrometheusVersion contains build information about Prometheus.
+type PrometheusVersion struct {
+ Version string `json:"version"`
+ Revision string `json:"revision"`
+ Branch string `json:"branch"`
+ BuildUser string `json:"buildUser"`
+ BuildDate string `json:"buildDate"`
+ GoVersion string `json:"goVersion"`
+}
+
+// RuntimeInfo contains runtime information about Prometheus.
+type RuntimeInfo struct {
+ StartTime time.Time `json:"startTime"`
+ CWD string `json:"CWD"`
+ Hostname string `json:"hostname"`
+ ServerTime time.Time `json:"serverTime"`
+ ReloadConfigSuccess bool `json:"reloadConfigSuccess"`
+ LastConfigTime time.Time `json:"lastConfigTime"`
+ CorruptionCount int64 `json:"corruptionCount"`
+ GoroutineCount int `json:"goroutineCount"`
+ GOMAXPROCS int `json:"GOMAXPROCS"`
+ GOMEMLIMIT int64 `json:"GOMEMLIMIT"`
+ GOGC string `json:"GOGC"`
+ GODEBUG string `json:"GODEBUG"`
+ StorageRetention string `json:"storageRetention"`
+}
+
+// NewAPIParams holds all the parameters needed to create a v1.API instance.
+type NewAPIParams struct {
+ QueryEngine promql.QueryEngine
+ Queryable storage.SampleAndChunkQueryable
+ ExemplarQueryable storage.ExemplarQueryable
+ ScrapePoolsRetriever func(context.Context) ScrapePoolsRetriever
+ TargetRetriever func(context.Context) TargetRetriever
+ AlertmanagerRetriever func(context.Context) AlertmanagerRetriever
+ ConfigFunc func() config.Config
+ FlagsMap map[string]string
+ ReadyFunc func(http.HandlerFunc) http.HandlerFunc
+ TSDBAdmin TSDBAdminStats
+ DBDir string
+ Logger *slog.Logger
+ RulesRetriever func(context.Context) RulesRetriever
+ RuntimeInfoFunc func() (RuntimeInfo, error)
+ BuildInfo *PrometheusVersion
+ NotificationsGetter func() []notifications.Notification
+ NotificationsSub func() (<-chan notifications.Notification, func(), bool)
+ Gatherer prometheus.Gatherer
+ Registerer prometheus.Registerer
+}
+
+// PrepareAPI creates a NewAPIParams with sensible defaults for testing.
+func PrepareAPI(t *testing.T, cfg APIConfig) NewAPIParams {
+ t.Helper()
+
+ // Create defaults for unset lazy loaders.
+ if cfg.QueryEngine == nil {
+ cfg.QueryEngine = NewLazyLoader(func() promql.QueryEngine {
+ return promqltest.NewTestEngineWithOpts(t, promql.EngineOpts{
+ Logger: nil,
+ Reg: nil,
+ MaxSamples: 10000,
+ Timeout: 100 * time.Second,
+ NoStepSubqueryIntervalFn: func(int64) int64 { return 60 * 1000 },
+ EnableAtModifier: true,
+ EnableNegativeOffset: true,
+ EnablePerStepStats: true,
+ })
+ })
+ }
+
+ if cfg.Queryable == nil {
+ cfg.Queryable = NewLazyLoader(NewEmptyQueryable)
+ }
+
+ if cfg.ExemplarQueryable == nil {
+ cfg.ExemplarQueryable = NewLazyLoader(NewEmptyExemplarQueryable)
+ }
+
+ if cfg.RulesRetriever == nil {
+ cfg.RulesRetriever = NewLazyLoader(func() RulesRetriever {
+ return NewEmptyRulesRetriever()
+ })
+ }
+
+ if cfg.TargetRetriever == nil {
+ cfg.TargetRetriever = NewLazyLoader(func() TargetRetriever {
+ return NewEmptyTargetRetriever()
+ })
+ }
+
+ if cfg.ScrapePoolsRetriever == nil {
+ cfg.ScrapePoolsRetriever = NewLazyLoader(func() ScrapePoolsRetriever {
+ return NewEmptyScrapePoolsRetriever()
+ })
+ }
+
+ if cfg.AlertmanagerRetriever == nil {
+ cfg.AlertmanagerRetriever = NewLazyLoader(func() AlertmanagerRetriever {
+ return NewEmptyAlertmanagerRetriever()
+ })
+ }
+
+ if cfg.TSDBAdmin == nil {
+ cfg.TSDBAdmin = NewLazyLoader(func() TSDBAdminStats {
+ return NewEmptyTSDBAdminStats()
+ })
+ }
+
+ if cfg.Config == nil {
+ cfg.Config = func() config.Config { return config.Config{} }
+ }
+
+ if cfg.FlagsMap == nil {
+ cfg.FlagsMap = map[string]string{}
+ }
+
+ if cfg.DBDir == "" {
+ cfg.DBDir = t.TempDir()
+ }
+
+ return NewAPIParams{
+ QueryEngine: cfg.QueryEngine.Get(),
+ Queryable: cfg.Queryable.Get(),
+ ExemplarQueryable: cfg.ExemplarQueryable.Get(),
+ ScrapePoolsRetriever: func(context.Context) ScrapePoolsRetriever { return cfg.ScrapePoolsRetriever.Get() },
+ TargetRetriever: func(context.Context) TargetRetriever { return cfg.TargetRetriever.Get() },
+ AlertmanagerRetriever: func(context.Context) AlertmanagerRetriever { return cfg.AlertmanagerRetriever.Get() },
+ ConfigFunc: cfg.Config,
+ FlagsMap: cfg.FlagsMap,
+ ReadyFunc: func(f http.HandlerFunc) http.HandlerFunc { return f },
+ TSDBAdmin: cfg.TSDBAdmin.Get(),
+ DBDir: cfg.DBDir,
+ Logger: promslog.NewNopLogger(),
+ RulesRetriever: func(context.Context) RulesRetriever { return cfg.RulesRetriever.Get() },
+ RuntimeInfoFunc: func() (RuntimeInfo, error) { return RuntimeInfo{}, nil },
+ BuildInfo: &PrometheusVersion{},
+ NotificationsGetter: func() []notifications.Notification { return nil },
+ NotificationsSub: func() (<-chan notifications.Notification, func(), bool) { return nil, func() {}, false },
+ Gatherer: prometheus.NewRegistry(),
+ Registerer: prometheus.NewRegistry(),
+ }
+}
diff --git a/web/api/testhelpers/assertions.go b/web/api/testhelpers/assertions.go
new file mode 100644
index 0000000000..53010b08b5
--- /dev/null
+++ b/web/api/testhelpers/assertions.go
@@ -0,0 +1,252 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file provides assertion helpers for validating API responses in tests.
+package testhelpers
+
+import (
+ "fmt"
+ "slices"
+ "strings"
+
+ "github.com/stretchr/testify/require"
+)
+
+// RequireSuccess asserts that the response has status "success" and returns the response for chaining.
+func (r *Response) RequireSuccess() *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+ require.Equal(r.t, "success", r.JSON["status"], "expected status to be 'success'")
+ return r
+}
+
+// RequireError asserts that the response has status "error" and returns the response for chaining.
+func (r *Response) RequireError() *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+ require.Equal(r.t, "error", r.JSON["status"], "expected status to be 'error'")
+ return r
+}
+
+// RequireStatusCode asserts that the response has the given HTTP status code and returns the response for chaining.
+func (r *Response) RequireStatusCode(expectedCode int) *Response {
+ r.t.Helper()
+ require.Equal(r.t, expectedCode, r.StatusCode, "unexpected HTTP status code")
+ return r
+}
+
+// RequireJSONPathExists asserts that a JSON path exists and returns the response for chaining.
+func (r *Response) RequireJSONPathExists(path string) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ return r
+}
+
+// RequireEquals asserts that a JSON path equals the expected value and returns the response for chaining.
+func (r *Response) RequireEquals(path string, expected any) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ require.Equal(r.t, expected, value, "JSON path %q has unexpected value", path)
+ return r
+}
+
+// RequireJSONArray asserts that a JSON path contains an array and returns the response for chaining.
+func (r *Response) RequireJSONArray(path string) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ _, ok := value.([]any)
+ require.True(r.t, ok, "JSON path %q is not an array", path)
+ return r
+}
+
+// RequireLenAtLeast asserts that a JSON path contains an array with at least minLen elements and returns the response for chaining.
+func (r *Response) RequireLenAtLeast(path string, minLen int) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ arr, ok := value.([]any)
+ require.True(r.t, ok, "JSON path %q is not an array", path)
+ require.GreaterOrEqual(r.t, len(arr), minLen, "JSON path %q has fewer than %d elements", path, minLen)
+ return r
+}
+
+// RequireArrayContains asserts that a JSON path contains an array with the expected element and returns the response for chaining.
+func (r *Response) RequireArrayContains(path string, expected any) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ arr, ok := value.([]any)
+ require.True(r.t, ok, "JSON path %q is not an array", path)
+
+ found := slices.Contains(arr, expected)
+ require.True(r.t, found, "JSON path %q does not contain expected value %v", path, expected)
+ return r
+}
+
+// RequireSome asserts that at least one element in an array satisfies the predicate and returns the response for chaining.
+func (r *Response) RequireSome(path string, predicate func(any) bool) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ arr, ok := value.([]any)
+ require.True(r.t, ok, "JSON path %q is not an array", path)
+
+ found := slices.ContainsFunc(arr, predicate)
+ require.True(r.t, found, "no element in JSON path %q satisfies the predicate", path)
+ return r
+}
+
+// getJSONPath extracts a value from a JSON object using a simple path notation.
+// Supports paths like "$.data", "$.data.groups", "$.data.groups[0]".
+func getJSONPath(data map[string]any, path string) any {
+ // Remove leading "$." if present.
+ path = strings.TrimPrefix(path, "$.")
+
+ if path == "" {
+ return data
+ }
+
+ parts := strings.Split(path, ".")
+ current := any(data)
+
+ for _, part := range parts {
+ // Handle array indexing (e.g., "groups[0]").
+ if strings.Contains(part, "[") {
+ // Not implementing array indexing for simplicity.
+ // Tests should use direct field access or RequireSome.
+ return nil
+ }
+
+ // Navigate to the next level.
+ m, ok := current.(map[string]any)
+ if !ok {
+ return nil
+ }
+ current = m[part]
+ }
+
+ return current
+}
+
+// RequireVectorResult is a convenience helper for checking vector query results.
+func (r *Response) RequireVectorResult() *Response {
+ r.t.Helper()
+ return r.RequireSuccess().RequireEquals("$.data.resultType", "vector")
+}
+
+// RequireMatrixResult is a convenience helper for checking matrix query results.
+func (r *Response) RequireMatrixResult() *Response {
+ r.t.Helper()
+ return r.RequireSuccess().RequireEquals("$.data.resultType", "matrix")
+}
+
+// RequireScalarResult is a convenience helper for checking scalar query results.
+func (r *Response) RequireScalarResult() *Response {
+ r.t.Helper()
+ return r.RequireSuccess().RequireEquals("$.data.resultType", "scalar")
+}
+
+// RequireRulesGroupNamed asserts that a rules response contains a group with the given name.
+func (r *Response) RequireRulesGroupNamed(name string) *Response {
+ r.t.Helper()
+ return r.RequireSuccess().RequireSome("$.data.groups", func(group any) bool {
+ if g, ok := group.(map[string]any); ok {
+ return g["name"] == name
+ }
+ return false
+ })
+}
+
+// RequireTargetCount asserts that a targets response contains at least n targets.
+func (r *Response) RequireTargetCount(minCount int) *Response {
+ r.t.Helper()
+ r.RequireSuccess()
+
+ // The targets endpoint returns activeTargets as an array of targets.
+ value := getJSONPath(r.JSON, "$.data.activeTargets")
+ require.NotNil(r.t, value, "JSON path $.data.activeTargets does not exist")
+
+ arr, ok := value.([]any)
+ require.True(r.t, ok, "$.data.activeTargets is not an array")
+ require.GreaterOrEqual(r.t, len(arr), minCount, "expected at least %d targets, got %d", minCount, len(arr))
+ return r
+}
+
+// DebugJSON is a helper for debugging JSON responses in tests.
+func (r *Response) DebugJSON() *Response {
+ r.t.Helper()
+ r.t.Logf("Response status code: %d", r.StatusCode)
+ r.t.Logf("Response body: %s", r.Body)
+ if r.JSON != nil {
+ r.t.Logf("Response JSON: %+v", r.JSON)
+ }
+ return r
+}
+
+// RequireContainsSubstring asserts that the response body contains the given substring.
+func (r *Response) RequireContainsSubstring(substring string) *Response {
+ r.t.Helper()
+ require.Contains(r.t, r.Body, substring, "response body does not contain expected substring")
+ return r
+}
+
+// RequireField asserts that a field exists at the given path and returns its value.
+// Note: This method cannot be chained further since it returns the field value, not the Response.
+func (r *Response) RequireField(path string) any {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ return value
+}
+
+// RequireFieldType asserts that a field exists and has the expected type.
+func (r *Response) RequireFieldType(path, expectedType string) *Response {
+ r.t.Helper()
+ value := r.RequireField(path)
+
+ var actualType string
+ switch value.(type) {
+ case string:
+ actualType = "string"
+ case float64:
+ actualType = "number"
+ case bool:
+ actualType = "bool"
+ case []any:
+ actualType = "array"
+ case map[string]any:
+ actualType = "object"
+ default:
+ actualType = fmt.Sprintf("%T", value)
+ }
+
+ require.Equal(r.t, expectedType, actualType, "JSON path %q has unexpected type", path)
+ return r
+}
diff --git a/web/api/testhelpers/fixtures.go b/web/api/testhelpers/fixtures.go
new file mode 100644
index 0000000000..caa5afd59d
--- /dev/null
+++ b/web/api/testhelpers/fixtures.go
@@ -0,0 +1,178 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file provides test fixture data for API tests.
+package testhelpers
+
+import (
+ "time"
+
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/promql"
+ "github.com/prometheus/prometheus/promql/parser"
+ "github.com/prometheus/prometheus/rules"
+ "github.com/prometheus/prometheus/storage"
+)
+
+// FixtureSeries creates a simple series with the "up" metric.
+func FixtureSeries() []storage.Series {
+ // Use timestamps relative to "now" so queries work.
+ now := time.Now().UnixMilli()
+ return []storage.Series{
+ &FakeSeries{
+ labels: labels.FromStrings("__name__", "up", "job", "prometheus", "instance", "localhost:9090"),
+ samples: []promql.FPoint{
+ {T: now - 120000, F: 1},
+ {T: now - 60000, F: 1},
+ {T: now, F: 1},
+ },
+ },
+ }
+}
+
+// FixtureMultipleSeries creates multiple series for testing.
+func FixtureMultipleSeries() []storage.Series {
+ // Use timestamps relative to "now" so queries work.
+ now := time.Now().UnixMilli()
+ return []storage.Series{
+ &FakeSeries{
+ labels: labels.FromStrings("__name__", "up", "job", "prometheus", "instance", "localhost:9090"),
+ samples: []promql.FPoint{
+ {T: now - 60000, F: 1},
+ {T: now, F: 1},
+ },
+ },
+ &FakeSeries{
+ labels: labels.FromStrings("__name__", "up", "job", "node", "instance", "localhost:9100"),
+ samples: []promql.FPoint{
+ {T: now - 60000, F: 1},
+ {T: now, F: 0},
+ },
+ },
+ &FakeSeries{
+ labels: labels.FromStrings("__name__", "http_requests_total", "job", "api", "instance", "localhost:8080"),
+ samples: []promql.FPoint{
+ {T: now - 60000, F: 100},
+ {T: now, F: 150},
+ },
+ },
+ }
+}
+
+// FixtureRuleGroups creates a simple set of rule groups for testing.
+func FixtureRuleGroups() []*rules.Group {
+ // Create a simple recording rule.
+ expr, _ := parser.ParseExpr("up == 1")
+ recordingRule := rules.NewRecordingRule(
+ "job:up:sum",
+ expr,
+ labels.EmptyLabels(),
+ )
+
+ // Create a simple alerting rule.
+ alertExpr, _ := parser.ParseExpr("up == 0")
+ alertingRule := rules.NewAlertingRule(
+ "InstanceDown",
+ alertExpr,
+ time.Minute,
+ 0,
+ labels.FromStrings("severity", "critical"),
+ labels.EmptyLabels(),
+ labels.EmptyLabels(),
+ "Instance {{ $labels.instance }} is down",
+ true,
+ nil,
+ )
+
+ // Create a rule group.
+ group := rules.NewGroup(rules.GroupOptions{
+ Name: "example",
+ File: "example.rules",
+ Interval: time.Minute,
+ Rules: []rules.Rule{
+ recordingRule,
+ alertingRule,
+ },
+ })
+
+ return []*rules.Group{group}
+}
+
+// FixtureEmptyRuleGroups returns an empty set of rule groups.
+func FixtureEmptyRuleGroups() []*rules.Group {
+ return []*rules.Group{}
+}
+
+// FixtureSingleSeries creates a single series for simple tests.
+func FixtureSingleSeries(metricName string, value float64) []storage.Series {
+ return []storage.Series{
+ &FakeSeries{
+ labels: labels.FromStrings("__name__", metricName),
+ samples: []promql.FPoint{
+ {T: 0, F: value},
+ },
+ },
+ }
+}
+
+// FixtureHistogramSeries creates a series with native histogram data.
+func FixtureHistogramSeries() []storage.Series {
+ // Use timestamps relative to "now" so queries work.
+ now := time.Now().UnixMilli()
+ return []storage.Series{
+ &FakeHistogramSeries{
+ labels: labels.FromStrings("__name__", "test_histogram", "job", "prometheus", "instance", "localhost:9090"),
+ histograms: []promql.HPoint{
+ {
+ T: now - 60000,
+ H: &histogram.FloatHistogram{
+ Schema: 2,
+ ZeroThreshold: 0.001,
+ ZeroCount: 5,
+ Count: 50,
+ Sum: 100,
+ PositiveSpans: []histogram.Span{
+ {Offset: 0, Length: 2},
+ {Offset: 1, Length: 2},
+ },
+ NegativeSpans: []histogram.Span{
+ {Offset: 0, Length: 1},
+ },
+ PositiveBuckets: []float64{5, 10, 8, 7},
+ NegativeBuckets: []float64{3},
+ },
+ },
+ {
+ T: now,
+ H: &histogram.FloatHistogram{
+ Schema: 2,
+ ZeroThreshold: 0.001,
+ ZeroCount: 8,
+ Count: 60,
+ Sum: 120,
+ PositiveSpans: []histogram.Span{
+ {Offset: 0, Length: 2},
+ {Offset: 1, Length: 2},
+ },
+ NegativeSpans: []histogram.Span{
+ {Offset: 0, Length: 1},
+ },
+ PositiveBuckets: []float64{6, 12, 10, 9},
+ NegativeBuckets: []float64{4},
+ },
+ },
+ },
+ },
+ }
+}
diff --git a/web/api/testhelpers/mocks.go b/web/api/testhelpers/mocks.go
new file mode 100644
index 0000000000..527febb727
--- /dev/null
+++ b/web/api/testhelpers/mocks.go
@@ -0,0 +1,534 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains mock implementations of API dependencies for testing.
+package testhelpers
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/promql"
+ "github.com/prometheus/prometheus/rules"
+ "github.com/prometheus/prometheus/scrape"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb"
+ "github.com/prometheus/prometheus/tsdb/chunkenc"
+ "github.com/prometheus/prometheus/tsdb/chunks"
+ "github.com/prometheus/prometheus/util/annotations"
+)
+
+// LazyLoader allows lazy initialization of mocks per test.
+type LazyLoader[T any] struct {
+ loader func() T
+ value *T
+}
+
+// NewLazyLoader creates a new LazyLoader with the given loader function.
+func NewLazyLoader[T any](loader func() T) *LazyLoader[T] {
+ return &LazyLoader[T]{loader: loader}
+}
+
+// Get returns the loaded value, initializing it if necessary.
+func (l *LazyLoader[T]) Get() T {
+ if l.value == nil {
+ v := l.loader()
+ l.value = &v
+ }
+ return *l.value
+}
+
+// FakeQueryable implements storage.SampleAndChunkQueryable with configurable behavior.
+type FakeQueryable struct {
+ series []storage.Series
+}
+
+func (f *FakeQueryable) Querier(_, _ int64) (storage.Querier, error) {
+ return &FakeQuerier{series: f.series}, nil
+}
+
+func (f *FakeQueryable) ChunkQuerier(_, _ int64) (storage.ChunkQuerier, error) {
+ return &FakeChunkQuerier{series: f.series}, nil
+}
+
+// FakeQuerier implements storage.Querier.
+type FakeQuerier struct {
+ series []storage.Series
+}
+
+func (f *FakeQuerier) Select(_ context.Context, _ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet {
+ return &FakeSeriesSet{series: f.series, idx: -1}
+}
+
+func (f *FakeQuerier) LabelValues(_ context.Context, name string, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+ valuesMap := make(map[string]struct{})
+ for _, s := range f.series {
+ lbls := s.Labels()
+ if val := lbls.Get(name); val != "" {
+ valuesMap[val] = struct{}{}
+ }
+ }
+ values := make([]string, 0, len(valuesMap))
+ for v := range valuesMap {
+ values = append(values, v)
+ }
+ return values, nil, nil
+}
+
+func (f *FakeQuerier) LabelNames(_ context.Context, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+ namesMap := make(map[string]struct{})
+ for _, s := range f.series {
+ lbls := s.Labels()
+ lbls.Range(func(l labels.Label) {
+ namesMap[l.Name] = struct{}{}
+ })
+ }
+ names := make([]string, 0, len(namesMap))
+ for n := range namesMap {
+ names = append(names, n)
+ }
+ return names, nil, nil
+}
+
+func (*FakeQuerier) Close() error {
+ return nil
+}
+
+// FakeChunkQuerier implements storage.ChunkQuerier.
+type FakeChunkQuerier struct {
+ series []storage.Series
+}
+
+func (f *FakeChunkQuerier) Select(_ context.Context, _ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.ChunkSeriesSet {
+ return &FakeChunkSeriesSet{series: f.series, idx: -1}
+}
+
+func (f *FakeChunkQuerier) LabelValues(_ context.Context, name string, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+ valuesMap := make(map[string]struct{})
+ for _, s := range f.series {
+ lbls := s.Labels()
+ if val := lbls.Get(name); val != "" {
+ valuesMap[val] = struct{}{}
+ }
+ }
+ values := make([]string, 0, len(valuesMap))
+ for v := range valuesMap {
+ values = append(values, v)
+ }
+ return values, nil, nil
+}
+
+func (f *FakeChunkQuerier) LabelNames(_ context.Context, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+ namesMap := make(map[string]struct{})
+ for _, s := range f.series {
+ lbls := s.Labels()
+ lbls.Range(func(l labels.Label) {
+ namesMap[l.Name] = struct{}{}
+ })
+ }
+ names := make([]string, 0, len(namesMap))
+ for n := range namesMap {
+ names = append(names, n)
+ }
+ return names, nil, nil
+}
+
+func (*FakeChunkQuerier) Close() error {
+ return nil
+}
+
+// FakeSeriesSet implements storage.SeriesSet.
+type FakeSeriesSet struct {
+ series []storage.Series
+ idx int
+}
+
+func (f *FakeSeriesSet) Next() bool {
+ f.idx++
+ return f.idx < len(f.series)
+}
+
+func (f *FakeSeriesSet) At() storage.Series {
+ return f.series[f.idx]
+}
+
+func (*FakeSeriesSet) Err() error {
+ return nil
+}
+
+func (*FakeSeriesSet) Warnings() annotations.Annotations {
+ return nil
+}
+
+// FakeChunkSeriesSet implements storage.ChunkSeriesSet.
+type FakeChunkSeriesSet struct {
+ series []storage.Series
+ idx int
+}
+
+func (f *FakeChunkSeriesSet) Next() bool {
+ f.idx++
+ return f.idx < len(f.series)
+}
+
+func (f *FakeChunkSeriesSet) At() storage.ChunkSeries {
+ return &FakeChunkSeries{series: f.series[f.idx]}
+}
+
+func (*FakeChunkSeriesSet) Err() error {
+ return nil
+}
+
+func (*FakeChunkSeriesSet) Warnings() annotations.Annotations {
+ return nil
+}
+
+// FakeChunkSeries implements storage.ChunkSeries.
+type FakeChunkSeries struct {
+ series storage.Series
+}
+
+func (f *FakeChunkSeries) Labels() labels.Labels {
+ return f.series.Labels()
+}
+
+func (*FakeChunkSeries) Iterator(_ chunks.Iterator) chunks.Iterator {
+ return &FakeChunkSeriesIterator{}
+}
+
+// FakeChunkSeriesIterator implements chunks.Iterator.
+type FakeChunkSeriesIterator struct{}
+
+func (*FakeChunkSeriesIterator) Next() bool {
+ return false
+}
+
+func (*FakeChunkSeriesIterator) At() chunks.Meta {
+ return chunks.Meta{}
+}
+
+func (*FakeChunkSeriesIterator) Err() error {
+ return nil
+}
+
+// FakeSeries implements storage.Series.
+type FakeSeries struct {
+ labels labels.Labels
+ samples []promql.FPoint
+}
+
+func (f *FakeSeries) Labels() labels.Labels {
+ return f.labels
+}
+
+func (f *FakeSeries) Iterator(chunkenc.Iterator) chunkenc.Iterator {
+ return &FakeSeriesIterator{samples: f.samples, idx: -1}
+}
+
+// FakeSeriesIterator implements chunkenc.Iterator.
+type FakeSeriesIterator struct {
+ samples []promql.FPoint
+ idx int
+}
+
+func (f *FakeSeriesIterator) Next() chunkenc.ValueType {
+ f.idx++
+ if f.idx < len(f.samples) {
+ return chunkenc.ValFloat
+ }
+ return chunkenc.ValNone
+}
+
+func (f *FakeSeriesIterator) Seek(t int64) chunkenc.ValueType {
+ for f.idx < len(f.samples)-1 {
+ f.idx++
+ if f.samples[f.idx].T >= t {
+ return chunkenc.ValFloat
+ }
+ }
+ return chunkenc.ValNone
+}
+
+func (f *FakeSeriesIterator) At() (int64, float64) {
+ s := f.samples[f.idx]
+ return s.T, s.F
+}
+
+func (*FakeSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+ panic("not implemented")
+}
+
+func (*FakeSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+ panic("not implemented")
+}
+
+func (f *FakeSeriesIterator) AtT() int64 {
+ return f.samples[f.idx].T
+}
+
+func (*FakeSeriesIterator) AtST() int64 {
+ return 0
+}
+
+func (*FakeSeriesIterator) Err() error {
+ return nil
+}
+
+// FakeHistogramSeries implements storage.Series for histogram data.
+type FakeHistogramSeries struct {
+ labels labels.Labels
+ histograms []promql.HPoint
+}
+
+func (f *FakeHistogramSeries) Labels() labels.Labels {
+ return f.labels
+}
+
+func (f *FakeHistogramSeries) Iterator(chunkenc.Iterator) chunkenc.Iterator {
+ return &FakeHistogramSeriesIterator{histograms: f.histograms, idx: -1}
+}
+
+// FakeHistogramSeriesIterator implements chunkenc.Iterator for histogram data.
+type FakeHistogramSeriesIterator struct {
+ histograms []promql.HPoint
+ idx int
+}
+
+func (f *FakeHistogramSeriesIterator) Next() chunkenc.ValueType {
+ f.idx++
+ if f.idx < len(f.histograms) {
+ return chunkenc.ValFloatHistogram
+ }
+ return chunkenc.ValNone
+}
+
+func (f *FakeHistogramSeriesIterator) Seek(t int64) chunkenc.ValueType {
+ for f.idx < len(f.histograms)-1 {
+ f.idx++
+ if f.histograms[f.idx].T >= t {
+ return chunkenc.ValFloatHistogram
+ }
+ }
+ return chunkenc.ValNone
+}
+
+func (*FakeHistogramSeriesIterator) At() (int64, float64) {
+ panic("not a float value")
+}
+
+func (*FakeHistogramSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+ panic("not implemented")
+}
+
+func (f *FakeHistogramSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+ h := f.histograms[f.idx]
+ return h.T, h.H
+}
+
+func (f *FakeHistogramSeriesIterator) AtT() int64 {
+ return f.histograms[f.idx].T
+}
+
+func (*FakeHistogramSeriesIterator) AtST() int64 {
+ return 0
+}
+
+func (*FakeHistogramSeriesIterator) Err() error {
+ return nil
+}
+
+// FakeExemplarQueryable implements storage.ExemplarQueryable.
+type FakeExemplarQueryable struct{}
+
+func (*FakeExemplarQueryable) ExemplarQuerier(_ context.Context) (storage.ExemplarQuerier, error) {
+ return &FakeExemplarQuerier{}, nil
+}
+
+// FakeExemplarQuerier implements storage.ExemplarQuerier.
+type FakeExemplarQuerier struct{}
+
+func (*FakeExemplarQuerier) Select(_, _ int64, _ ...[]*labels.Matcher) ([]exemplar.QueryResult, error) {
+ return nil, nil
+}
+
+// FakeRulesRetriever implements v1.RulesRetriever.
+type FakeRulesRetriever struct {
+ groups []*rules.Group
+}
+
+func (f *FakeRulesRetriever) RuleGroups() []*rules.Group {
+ return f.groups
+}
+
+func (f *FakeRulesRetriever) AlertingRules() []*rules.AlertingRule {
+ var alertingRules []*rules.AlertingRule
+ for _, g := range f.groups {
+ for _, r := range g.Rules() {
+ if ar, ok := r.(*rules.AlertingRule); ok {
+ alertingRules = append(alertingRules, ar)
+ }
+ }
+ }
+ return alertingRules
+}
+
+// FakeTargetRetriever implements v1.TargetRetriever.
+type FakeTargetRetriever struct {
+ active map[string][]*scrape.Target
+ dropped map[string][]*scrape.Target
+ droppedCounts map[string]int
+ scrapeConfig map[string]*config.ScrapeConfig
+}
+
+func (f *FakeTargetRetriever) TargetsActive() map[string][]*scrape.Target {
+ if f.active == nil {
+ return make(map[string][]*scrape.Target)
+ }
+ return f.active
+}
+
+func (f *FakeTargetRetriever) TargetsDropped() map[string][]*scrape.Target {
+ if f.dropped == nil {
+ return make(map[string][]*scrape.Target)
+ }
+ return f.dropped
+}
+
+func (f *FakeTargetRetriever) TargetsDroppedCounts() map[string]int {
+ if f.droppedCounts == nil {
+ return make(map[string]int)
+ }
+ return f.droppedCounts
+}
+
+func (f *FakeTargetRetriever) ScrapePoolConfig(name string) (*config.ScrapeConfig, error) {
+ if f.scrapeConfig == nil {
+ return nil, nil
+ }
+ return f.scrapeConfig[name], nil
+}
+
+// FakeScrapePoolsRetriever implements v1.ScrapePoolsRetriever.
+type FakeScrapePoolsRetriever struct {
+ pools []string
+}
+
+func (f *FakeScrapePoolsRetriever) ScrapePools() []string {
+ if f.pools == nil {
+ return []string{}
+ }
+ return f.pools
+}
+
+// FakeAlertmanagerRetriever implements v1.AlertmanagerRetriever.
+type FakeAlertmanagerRetriever struct{}
+
+func (*FakeAlertmanagerRetriever) Alertmanagers() []*url.URL {
+ return nil
+}
+
+func (*FakeAlertmanagerRetriever) DroppedAlertmanagers() []*url.URL {
+ return nil
+}
+
+// FakeTSDBAdminStats implements v1.TSDBAdminStats.
+type FakeTSDBAdminStats struct{}
+
+func (*FakeTSDBAdminStats) CleanTombstones() error {
+ return nil
+}
+
+func (*FakeTSDBAdminStats) Delete(_ context.Context, _, _ int64, _ ...*labels.Matcher) error {
+ return nil
+}
+
+func (*FakeTSDBAdminStats) Snapshot(_ string, _ bool) error {
+ return nil
+}
+
+func (*FakeTSDBAdminStats) Stats(_ string, _ int) (*tsdb.Stats, error) {
+ return &tsdb.Stats{}, nil
+}
+
+func (*FakeTSDBAdminStats) WALReplayStatus() (tsdb.WALReplayStatus, error) {
+ return tsdb.WALReplayStatus{}, nil
+}
+
+func (*FakeTSDBAdminStats) BlockMetas() ([]tsdb.BlockMeta, error) {
+ return []tsdb.BlockMeta{}, nil
+}
+
+// NewEmptyQueryable returns a queryable with no series.
+func NewEmptyQueryable() storage.SampleAndChunkQueryable {
+ return &FakeQueryable{series: []storage.Series{}}
+}
+
+// NewQueryableWithSeries returns a queryable with the given series.
+func NewQueryableWithSeries(series []storage.Series) storage.SampleAndChunkQueryable {
+ return &FakeQueryable{series: series}
+}
+
+// TSDBNotReadyQueryable implements storage.SampleAndChunkQueryable that returns tsdb.ErrNotReady.
+type TSDBNotReadyQueryable struct{}
+
+func (*TSDBNotReadyQueryable) Querier(_, _ int64) (storage.Querier, error) {
+ return nil, tsdb.ErrNotReady
+}
+
+func (*TSDBNotReadyQueryable) ChunkQuerier(_, _ int64) (storage.ChunkQuerier, error) {
+ return nil, tsdb.ErrNotReady
+}
+
+// NewTSDBNotReadyQueryable returns a queryable that always returns tsdb.ErrNotReady.
+func NewTSDBNotReadyQueryable() storage.SampleAndChunkQueryable {
+ return &TSDBNotReadyQueryable{}
+}
+
+// NewEmptyExemplarQueryable returns an exemplar queryable with no exemplars.
+func NewEmptyExemplarQueryable() storage.ExemplarQueryable {
+ return &FakeExemplarQueryable{}
+}
+
+// NewEmptyRulesRetriever returns a rules retriever with no rules.
+func NewEmptyRulesRetriever() *FakeRulesRetriever {
+ return &FakeRulesRetriever{groups: []*rules.Group{}}
+}
+
+// NewRulesRetrieverWithGroups returns a rules retriever with the given groups.
+func NewRulesRetrieverWithGroups(groups []*rules.Group) *FakeRulesRetriever {
+ return &FakeRulesRetriever{groups: groups}
+}
+
+// NewEmptyTargetRetriever returns a target retriever with no targets.
+func NewEmptyTargetRetriever() *FakeTargetRetriever {
+ return &FakeTargetRetriever{}
+}
+
+// NewEmptyScrapePoolsRetriever returns a scrape pools retriever with no pools.
+func NewEmptyScrapePoolsRetriever() *FakeScrapePoolsRetriever {
+ return &FakeScrapePoolsRetriever{pools: []string{}}
+}
+
+// NewEmptyAlertmanagerRetriever returns an alertmanager retriever with no alertmanagers.
+func NewEmptyAlertmanagerRetriever() *FakeAlertmanagerRetriever {
+ return &FakeAlertmanagerRetriever{}
+}
+
+// NewEmptyTSDBAdminStats returns a TSDB admin stats with no-op implementations.
+func NewEmptyTSDBAdminStats() *FakeTSDBAdminStats {
+ return &FakeTSDBAdminStats{}
+}
diff --git a/web/api/testhelpers/openapi.go b/web/api/testhelpers/openapi.go
new file mode 100644
index 0000000000..d2e88943d2
--- /dev/null
+++ b/web/api/testhelpers/openapi.go
@@ -0,0 +1,204 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file provides OpenAPI-specific test utilities for validating spec compliance.
+package testhelpers
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/pb33f/libopenapi"
+ validator "github.com/pb33f/libopenapi-validator"
+ valerrors "github.com/pb33f/libopenapi-validator/errors"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ openAPIValidator31 validator.Validator
+ openAPIValidator32 validator.Validator
+ openAPIValidatorOnce sync.Once
+ openAPIValidatorErr error
+)
+
+// loadOpenAPIValidators loads and caches both OpenAPI 3.1 and 3.2 validators from golden files.
+func loadOpenAPIValidators() (v31, v32 validator.Validator, err error) {
+ openAPIValidatorOnce.Do(func() {
+ // Load OpenAPI 3.1 validator.
+ goldenPath31 := filepath.Join("testdata", "openapi_3.1_golden.yaml")
+ specBytes31, err := os.ReadFile(goldenPath31)
+ if err != nil {
+ openAPIValidatorErr = fmt.Errorf("failed to read OpenAPI 3.1 spec from %s: %w", goldenPath31, err)
+ return
+ }
+
+ doc31, err := libopenapi.NewDocument(specBytes31)
+ if err != nil {
+ openAPIValidatorErr = fmt.Errorf("failed to parse OpenAPI 3.1 document: %w", err)
+ return
+ }
+
+ v31, errs := validator.NewValidator(doc31)
+ if len(errs) > 0 {
+ openAPIValidatorErr = fmt.Errorf("failed to create OpenAPI 3.1 validator: %v", errs)
+ return
+ }
+
+ openAPIValidator31 = v31
+
+ // Load OpenAPI 3.2 validator.
+ goldenPath32 := filepath.Join("testdata", "openapi_3.2_golden.yaml")
+ specBytes32, err := os.ReadFile(goldenPath32)
+ if err != nil {
+ openAPIValidatorErr = fmt.Errorf("failed to read OpenAPI 3.2 spec from %s: %w", goldenPath32, err)
+ return
+ }
+
+ doc32, err := libopenapi.NewDocument(specBytes32)
+ if err != nil {
+ openAPIValidatorErr = fmt.Errorf("failed to parse OpenAPI 3.2 document: %w", err)
+ return
+ }
+
+ v32, errs := validator.NewValidator(doc32)
+ if len(errs) > 0 {
+ openAPIValidatorErr = fmt.Errorf("failed to create OpenAPI 3.2 validator: %v", errs)
+ return
+ }
+
+ openAPIValidator32 = v32
+ })
+
+ if openAPIValidatorErr != nil {
+ return nil, nil, openAPIValidatorErr
+ }
+
+ return openAPIValidator31, openAPIValidator32, nil
+}
+
+// ValidateOpenAPI validates the request and response against both OpenAPI 3.1 and 3.2 specifications.
+// This ensures API endpoints are compatible with both OpenAPI versions.
+// Returns the response for chaining.
+func (r *Response) ValidateOpenAPI() *Response {
+ r.t.Helper()
+
+ // Load both validators (cached after first call).
+ v31, v32, err := loadOpenAPIValidators()
+ require.NoError(r.t, err, "failed to load OpenAPI validators")
+
+ // Validate against OpenAPI 3.1 spec.
+ if r.request != nil {
+ r.validateRequestWithVersion(v31, "3.1")
+ }
+ r.validateResponseWithVersion(v31, "3.1")
+
+ // Validate against OpenAPI 3.2 spec.
+ if r.request != nil {
+ r.validateRequestWithVersion(v32, "3.2")
+ }
+ r.validateResponseWithVersion(v32, "3.2")
+
+ return r
+}
+
+// validateRequestWithVersion validates the HTTP request against a specific OpenAPI version's spec.
+func (r *Response) validateRequestWithVersion(v validator.Validator, version string) {
+ r.t.Helper()
+
+ // Create a validation request from the original request.
+ validationReq := &http.Request{
+ Method: r.request.Method,
+ URL: r.request.URL,
+ Header: r.request.Header,
+ Body: io.NopCloser(bytes.NewReader(r.requestBody)),
+ }
+
+ // Validate the request.
+ valid, errors := v.ValidateHttpRequest(validationReq)
+ if !valid {
+ // Check if the error is because the path doesn't exist in this version.
+ // Some endpoints (like /notifications/live) only exist in 3.2, not 3.1.
+ if isPathNotFoundError(errors) && version == "3.1" && strings.Contains(r.request.URL.Path, "/notifications/live") {
+ // Expected: /notifications/live is only in OpenAPI 3.2.
+ return
+ }
+
+ var errorMessages []string
+ for _, e := range errors {
+ errorMessages = append(errorMessages, e.Error())
+ }
+ require.Fail(r.t, fmt.Sprintf("OpenAPI %s request validation failed", version),
+ "Request to %s %s failed OpenAPI %s validation:\n%v",
+ r.request.Method, r.request.URL.Path, version, errorMessages)
+ }
+}
+
+// validateResponseWithVersion validates the HTTP response against a specific OpenAPI version's spec.
+func (r *Response) validateResponseWithVersion(v validator.Validator, version string) {
+ r.t.Helper()
+
+ // Create a validation request (needed for response validation context).
+ validationReq := &http.Request{
+ Method: r.request.Method,
+ URL: r.request.URL,
+ Header: r.request.Header,
+ }
+
+ // Create a response for validation.
+ validationResp := &http.Response{
+ StatusCode: r.StatusCode,
+ Header: r.responseHeader,
+ Body: io.NopCloser(bytes.NewReader([]byte(r.Body))),
+ Request: validationReq,
+ }
+
+ // Validate the response.
+ valid, errors := v.ValidateHttpResponse(validationReq, validationResp)
+ if !valid {
+ // Check if the error is because the path doesn't exist in this version.
+ // Some endpoints (like /notifications/live) only exist in 3.2, not 3.1.
+ if isPathNotFoundError(errors) && version == "3.1" && strings.Contains(r.request.URL.Path, "/notifications/live") {
+ // Expected: /notifications/live is only in OpenAPI 3.2.
+ return
+ }
+
+ var errorMessages []string
+ for _, e := range errors {
+ errorMessages = append(errorMessages, e.Error())
+ }
+ require.Fail(r.t, fmt.Sprintf("OpenAPI %s response validation failed", version),
+ "Response from %s %s (status %d) failed OpenAPI %s validation:\n%v",
+ r.request.Method, r.request.URL.Path, r.StatusCode, version, errorMessages)
+ }
+}
+
+// isPathNotFoundError checks if the validation errors indicate a path was not found in the spec.
+func isPathNotFoundError(errors []*valerrors.ValidationError) bool {
+ for _, err := range errors {
+ errStr := err.Error()
+ // Check for common "path not found" error messages from libopenapi-validator.
+ if strings.Contains(errStr, "path") && (strings.Contains(errStr, "not found") || strings.Contains(errStr, "does not exist")) {
+ return true
+ }
+ if strings.Contains(errStr, "GET /notifications/live") || strings.Contains(errStr, "/notifications/live not found") {
+ return true
+ }
+ }
+ return false
+}
diff --git a/web/api/testhelpers/request.go b/web/api/testhelpers/request.go
new file mode 100644
index 0000000000..81650e4c49
--- /dev/null
+++ b/web/api/testhelpers/request.go
@@ -0,0 +1,145 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file provides HTTP request builders for testing API endpoints.
+package testhelpers
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+ "testing"
+)
+
+// Response wraps an HTTP response with parsed JSON data.
+// It supports method chaining for assertions.
+//
+// Example usage:
+//
+// testhelpers.GET(t, api, "/api/v1/query", "query", "up").
+// ValidateOpenAPI().
+// RequireSuccess().
+// RequireEquals("$.data.resultType", "vector").
+// RequireLenAtLeast("$.data.result", 1)
+//
+// testhelpers.POST(t, api, "/api/v1/query", "query", "up").
+// ValidateOpenAPI().
+// RequireSuccess().
+// RequireArrayContains("$.data.result", expectedValue)
+type Response struct {
+ StatusCode int
+ Body string
+ JSON map[string]any
+ t *testing.T
+ request *http.Request
+ requestBody []byte
+ responseHeader http.Header
+}
+
+// GET sends a GET request to the API and returns a Response with parsed JSON.
+// queryParams should be pairs of key-value strings.
+func GET(t *testing.T, api *APIWrapper, path string, queryParams ...string) *Response {
+ t.Helper()
+
+ if len(queryParams)%2 != 0 {
+ t.Fatal("queryParams must be key-value pairs")
+ }
+
+ // Build query string.
+ values := url.Values{}
+ for i := 0; i < len(queryParams); i += 2 {
+ values.Add(queryParams[i], queryParams[i+1])
+ }
+
+ fullPath := path
+ if len(values) > 0 {
+ fullPath = path + "?" + values.Encode()
+ }
+
+ req := httptest.NewRequest(http.MethodGet, fullPath, nil)
+ return executeRequest(t, api, req)
+}
+
+// POST sends a POST request to the API with the given body and returns a Response with parsed JSON.
+// bodyParams should be pairs of key-value strings for form data.
+func POST(t *testing.T, api *APIWrapper, path string, bodyParams ...string) *Response {
+ t.Helper()
+
+ if len(bodyParams)%2 != 0 {
+ t.Fatal("bodyParams must be key-value pairs")
+ }
+
+ // Build form data.
+ values := url.Values{}
+ for i := 0; i < len(bodyParams); i += 2 {
+ values.Add(bodyParams[i], bodyParams[i+1])
+ }
+
+ req := httptest.NewRequest(http.MethodPost, path, strings.NewReader(values.Encode()))
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ return executeRequest(t, api, req)
+}
+
+// executeRequest executes an HTTP request and parses the response as JSON.
+func executeRequest(t *testing.T, api *APIWrapper, req *http.Request) *Response {
+ t.Helper()
+
+ // Capture the request body for validation.
+ var requestBody []byte
+ if req.Body != nil {
+ var err error
+ requestBody, err = io.ReadAll(req.Body)
+ if err != nil {
+ t.Fatalf("failed to read request body: %v", err)
+ }
+ // Restore the body for the actual request.
+ req.Body = io.NopCloser(strings.NewReader(string(requestBody)))
+ }
+
+ recorder := httptest.NewRecorder()
+ api.Handler.ServeHTTP(recorder, req)
+
+ result := recorder.Result()
+ defer result.Body.Close()
+
+ bodyBytes, err := io.ReadAll(result.Body)
+ if err != nil {
+ t.Fatalf("failed to read response body: %v", err)
+ }
+
+ resp := &Response{
+ StatusCode: result.StatusCode,
+ Body: string(bodyBytes),
+ t: t,
+ request: req,
+ requestBody: requestBody,
+ responseHeader: result.Header,
+ }
+
+ // Try to parse as JSON.
+ if result.Header.Get("Content-Type") == "application/json" || strings.Contains(result.Header.Get("Content-Type"), "application/json") {
+ var jsonData map[string]any
+ if err := json.Unmarshal(bodyBytes, &jsonData); err != nil {
+ // If JSON parsing fails, leave JSON as nil.
+ // This allows tests to handle non-JSON responses.
+ resp.JSON = nil
+ } else {
+ resp.JSON = jsonData
+ }
+ }
+
+ return resp
+}
diff --git a/web/api/v1/api.go b/web/api/v1/api.go
index f32fee19f8..456bafc97d 100644
--- a/web/api/v1/api.go
+++ b/web/api/v1/api.go
@@ -258,6 +258,7 @@ type API struct {
codecs []Codec
featureRegistry features.Collector
+ openAPIBuilder *OpenAPIBuilder
}
// NewAPI returns an initialized API type.
@@ -299,6 +300,7 @@ func NewAPI(
appendMetadata bool,
overrideErrorCode OverrideErrorCode,
featureRegistry features.Collector,
+ openAPIOptions OpenAPIOptions,
) *API {
a := &API{
QueryEngine: qe,
@@ -329,6 +331,7 @@ func NewAPI(
notificationsSub: notificationsSub,
overrideErrorCode: overrideErrorCode,
featureRegistry: featureRegistry,
+ openAPIBuilder: NewOpenAPIBuilder(openAPIOptions, logger),
remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame),
}
@@ -400,7 +403,7 @@ func (api *API) Register(r *route.Router) {
w.WriteHeader(http.StatusNoContent)
})
return api.ready(httputil.CompressionHandler{
- Handler: hf,
+ Handler: api.openAPIBuilder.WrapHandler(hf),
}.ServeHTTP)
}
@@ -469,6 +472,9 @@ func (api *API) Register(r *route.Router) {
r.Put("/admin/tsdb/delete_series", wrapAgent(api.deleteSeries))
r.Put("/admin/tsdb/clean_tombstones", wrapAgent(api.cleanTombstones))
r.Put("/admin/tsdb/snapshot", wrapAgent(api.snapshot))
+
+ // OpenAPI endpoint.
+ r.Get("/openapi.yaml", api.ready(api.openAPIBuilder.ServeOpenAPI))
}
type QueryData struct {
diff --git a/web/api/v1/api_scenarios_test.go b/web/api/v1/api_scenarios_test.go
new file mode 100644
index 0000000000..a707680c57
--- /dev/null
+++ b/web/api/v1/api_scenarios_test.go
@@ -0,0 +1,419 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/web/api/testhelpers"
+)
+
+// TODO: Generate automated tests from OpenAPI spec to validate API responses.
+
+// TestAPIEmpty tests the API with no metrics and no rules.
+func TestAPIEmpty(t *testing.T) {
+ // Create an API with empty defaults (no series, no rules).
+ api := newTestAPI(t, testhelpers.APIConfig{})
+
+ t.Run("GET /api/v1/labels returns success with empty array", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/labels").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data")
+ })
+
+ t.Run("GET /api/v1/query?query=up returns success (empty result ok)", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", "up").
+ ValidateOpenAPI().
+ RequireSuccess().
+ RequireEquals("$.data.resultType", "vector")
+ })
+
+ t.Run("GET /api/v1/query_range?query=up returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query_range",
+ "query", "up",
+ "start", "0",
+ "end", "100",
+ "step", "10").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "matrix")
+ })
+
+ t.Run("GET /api/v1/series returns success with empty result", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/series",
+ "match[]", "up",
+ "start", "0",
+ "end", "100").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data")
+ })
+
+ t.Run("GET /api/v1/label/__name__/values returns success with empty array", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/label/__name__/values").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data")
+ })
+
+ t.Run("GET /api/v1/targets returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/targets").
+ RequireSuccess().
+ RequireJSONPathExists("$.data.activeTargets")
+ })
+
+ t.Run("GET /api/v1/rules returns success with empty groups", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/rules").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.groups")
+ })
+
+ t.Run("GET /api/v1/alerts returns success with empty alerts", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/alerts").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.alerts")
+ })
+
+ t.Run("GET /api/v1/alertmanagers returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/alertmanagers").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.activeAlertmanagers")
+ })
+
+ t.Run("GET /api/v1/metadata returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/metadata").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data")
+ })
+
+ t.Run("GET /api/v1/status/config returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/status/config").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.yaml")
+ })
+
+ t.Run("GET /api/v1/status/flags returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/status/flags").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data")
+ })
+
+ t.Run("GET /api/v1/status/runtimeinfo returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/status/runtimeinfo").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data")
+ })
+
+ t.Run("GET /api/v1/status/buildinfo returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/status/buildinfo").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data")
+ })
+
+ t.Run("POST /api/v1/query with form data returns success", func(t *testing.T) {
+ testhelpers.POST(t, api, "/api/v1/query", "query", "up").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector")
+ })
+}
+
+// TestAPIWithSeries tests the API with metrics/series data.
+func TestAPIWithSeries(t *testing.T) {
+ // Create an API with sample series data.
+ api := newTestAPI(t, testhelpers.APIConfig{
+ Queryable: testhelpers.NewLazyLoader(func() storage.SampleAndChunkQueryable {
+ return testhelpers.NewQueryableWithSeries(testhelpers.FixtureMultipleSeries())
+ }),
+ })
+
+ t.Run("GET /api/v1/query returns vector with >= 1 sample", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", "up").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector").
+ RequireLenAtLeast("$.data.result", 1)
+ })
+
+ t.Run("GET /api/v1/query_range returns matrix result type", func(t *testing.T) {
+ // Use relative timestamps to match our fixtures.
+ now := time.Now().Unix()
+ testhelpers.GET(t, api, "/api/v1/query_range",
+ "query", "up",
+ "start", strconv.FormatInt(now-120, 10),
+ "end", strconv.FormatInt(now, 10),
+ "step", "60").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "matrix")
+ // Note: Result may be empty if timestamps don't align perfectly with samples.
+ })
+
+ t.Run("GET /api/v1/labels returns non-empty array", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/labels").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data").
+ RequireLenAtLeast("$.data", 1)
+ })
+
+ t.Run("GET /api/v1/label/__name__/values contains expected metric names", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/label/__name__/values").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireArrayContains("$.data", "up").
+ RequireArrayContains("$.data", "http_requests_total")
+ })
+
+ t.Run("GET /api/v1/label/job/values contains expected jobs", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/label/job/values").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data").
+ RequireArrayContains("$.data", "prometheus").
+ RequireArrayContains("$.data", "node").
+ RequireArrayContains("$.data", "api")
+ })
+
+ t.Run("GET /api/v1/series with match returns results", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/series",
+ "match[]", "up",
+ "start", "0",
+ "end", "120").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data").
+ RequireLenAtLeast("$.data", 1)
+ })
+
+ t.Run("GET /api/v1/query with specific job returns filtered results", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", `up{job="prometheus"}`).
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector").
+ RequireLenAtLeast("$.data.result", 1)
+ })
+
+ t.Run("GET /api/v1/query with aggregation returns result", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", "sum(up)").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector")
+ })
+
+ t.Run("POST /api/v1/query returns vector with data", func(t *testing.T) {
+ testhelpers.POST(t, api, "/api/v1/query", "query", "up").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector").
+ RequireLenAtLeast("$.data.result", 1)
+ })
+}
+
+// TestAPIWithRules tests the API with rules configured.
+func TestAPIWithRules(t *testing.T) {
+ // Create an API with rule groups.
+ api := newTestAPI(t, testhelpers.APIConfig{
+ RulesRetriever: testhelpers.NewLazyLoader(func() testhelpers.RulesRetriever {
+ return testhelpers.NewRulesRetrieverWithGroups(testhelpers.FixtureRuleGroups())
+ }),
+ })
+
+ t.Run("GET /api/v1/rules returns groups with rules", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/rules").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.groups").
+ RequireLenAtLeast("$.data.groups", 1).
+ RequireSome("$.data.groups", func(group any) bool {
+ if g, ok := group.(map[string]any); ok {
+ return g["name"] == "example"
+ }
+ return false
+ }).
+ RequireSome("$.data.groups", func(group any) bool {
+ if g, ok := group.(map[string]any); ok {
+ if g["name"] == "example" {
+ // Check that the group has rules.
+ if rules, ok := g["rules"].([]any); ok {
+ return len(rules) > 0
+ }
+ }
+ }
+ return false
+ })
+ })
+
+ t.Run("GET /api/v1/alerts returns alerts array", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/alerts").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.alerts").
+ RequireJSONArray("$.data.alerts")
+ })
+
+ t.Run("GET /api/v1/rules with rule_name filter", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/rules", "rule_name[]", "InstanceDown").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.groups")
+ })
+}
+
+// TestAPITSDBNotReady tests the API when TSDB is not ready (e.g., during WAL replay).
+// TSDB not ready errors are converted to errorUnavailable by setUnavailStatusOnTSDBNotReady,
+// which returns HTTP 500 Internal Server Error (the default for errorUnavailable).
+func TestAPITSDBNotReady(t *testing.T) {
+ // Create an API with a queryable that returns tsdb.ErrNotReady.
+ api := newTestAPI(t, testhelpers.APIConfig{
+ Queryable: testhelpers.NewLazyLoader(testhelpers.NewTSDBNotReadyQueryable),
+ })
+
+ t.Run("GET /api/v1/query returns 500 when TSDB not ready", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", "up").
+ RequireStatusCode(500).
+ ValidateOpenAPI().
+ RequireError()
+ })
+
+ t.Run("POST /api/v1/query returns 500 when TSDB not ready", func(t *testing.T) {
+ testhelpers.POST(t, api, "/api/v1/query", "query", "up").
+ RequireStatusCode(500).
+ ValidateOpenAPI().
+ RequireError()
+ })
+
+ t.Run("GET /api/v1/query_range returns 500 when TSDB not ready", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query_range",
+ "query", "up",
+ "start", "0",
+ "end", "100",
+ "step", "10").
+ RequireStatusCode(500).
+ ValidateOpenAPI().
+ RequireError()
+ })
+
+ t.Run("GET /api/v1/series returns 500 when TSDB not ready", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/series",
+ "match[]", "up",
+ "start", "0",
+ "end", "100").
+ RequireStatusCode(500).
+ ValidateOpenAPI().
+ RequireError()
+ })
+
+ t.Run("GET /api/v1/labels returns 500 when TSDB not ready", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/labels").
+ RequireStatusCode(500).
+ ValidateOpenAPI().
+ RequireError()
+ })
+
+ t.Run("GET /api/v1/label/{name}/values returns 500 when TSDB not ready", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/label/__name__/values").
+ RequireStatusCode(500).
+ ValidateOpenAPI().
+ RequireError()
+ })
+}
+
+// TestAPIWithNativeHistograms tests the API with native histogram data.
+func TestAPIWithNativeHistograms(t *testing.T) {
+ // Create an API with histogram series data.
+ api := newTestAPI(t, testhelpers.APIConfig{
+ Queryable: testhelpers.NewLazyLoader(func() storage.SampleAndChunkQueryable {
+ return testhelpers.NewQueryableWithSeries(testhelpers.FixtureHistogramSeries())
+ }),
+ })
+
+ t.Run("GET /api/v1/query returns vector with native histogram", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", "test_histogram").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector").
+ RequireLenAtLeast("$.data.result", 1).
+ RequireSome("$.data.result", func(item any) bool {
+ sample, ok := item.(map[string]any)
+ if !ok {
+ return false
+ }
+ // Check that the sample has a histogram field (not a value field).
+ _, hasHistogram := sample["histogram"]
+ return hasHistogram
+ })
+ })
+
+ t.Run("POST /api/v1/query returns vector with native histogram", func(t *testing.T) {
+ testhelpers.POST(t, api, "/api/v1/query", "query", "test_histogram").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector").
+ RequireLenAtLeast("$.data.result", 1).
+ RequireSome("$.data.result", func(item any) bool {
+ sample, ok := item.(map[string]any)
+ if !ok {
+ return false
+ }
+ // Check that the sample has a histogram field (not a value field).
+ _, hasHistogram := sample["histogram"]
+ return hasHistogram
+ })
+ })
+
+ t.Run("GET /api/v1/query_range returns matrix with native histogram", func(t *testing.T) {
+ // Use relative timestamps to match our fixtures.
+ now := time.Now().Unix()
+ testhelpers.GET(t, api, "/api/v1/query_range",
+ "query", "test_histogram",
+ "start", strconv.FormatInt(now-120, 10),
+ "end", strconv.FormatInt(now, 10),
+ "step", "60").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "matrix")
+ })
+
+ t.Run("GET /api/v1/query with histogram selector", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", `test_histogram{job="prometheus"}`).
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector").
+ RequireLenAtLeast("$.data.result", 1)
+ })
+
+ t.Run("GET /api/v1/series returns histogram metric series", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/series",
+ "match[]", "test_histogram",
+ "start", "0",
+ "end", strconv.FormatInt(time.Now().Unix(), 10)).
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data").
+ RequireLenAtLeast("$.data", 1)
+ })
+}
diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go
index 6e55089e16..850bedef17 100644
--- a/web/api/v1/errors_test.go
+++ b/web/api/v1/errors_test.go
@@ -169,6 +169,7 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable, overri
false,
overrideErrorCode,
nil,
+ OpenAPIOptions{},
)
promRouter := route.New().WithPrefix("/api/v1")
diff --git a/web/api/v1/openapi.go b/web/api/v1/openapi.go
new file mode 100644
index 0000000000..59fa8969ef
--- /dev/null
+++ b/web/api/v1/openapi.go
@@ -0,0 +1,320 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements OpenAPI 3.2 specification generation for the Prometheus HTTP API.
+// It provides dynamic spec building with optional path filtering.
+package v1
+
+import (
+ "log/slog"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "sync"
+
+ "github.com/pb33f/libopenapi/datamodel/high/base"
+ v3 "github.com/pb33f/libopenapi/datamodel/high/v3"
+ "github.com/pb33f/libopenapi/orderedmap"
+)
+
+const (
+ // OpenAPI 3.1.0 is the default version with broader compatibility.
+ openAPIVersion31 = "3.1.0"
+ // OpenAPI 3.2.0 supports advanced features like itemSchema for SSE streams.
+ openAPIVersion32 = "3.2.0"
+)
+
+// OpenAPIOptions configures the OpenAPI spec builder.
+type OpenAPIOptions struct {
+ // IncludePaths filters which paths to include in the spec.
+ // If empty, all paths are included.
+ // Paths are matched by prefix (e.g., "/query" matches "/query" and "/query_range").
+ IncludePaths []string
+
+ // ExternalURL is the external URL of the Prometheus server (e.g., "http://prometheus.example.com:9090").
+ ExternalURL string
+
+ // Version is the API version to include in the OpenAPI spec.
+ // If empty, defaults to "0.0.1-undefined".
+ Version string
+}
+
+// OpenAPIBuilder builds and caches OpenAPI specifications.
+type OpenAPIBuilder struct {
+ mu sync.RWMutex
+ cachedYAML31 []byte // Cached OpenAPI 3.1 spec.
+ cachedYAML32 []byte // Cached OpenAPI 3.2 spec.
+ options OpenAPIOptions
+ logger *slog.Logger
+}
+
+// NewOpenAPIBuilder creates a new OpenAPI builder with the given options.
+func NewOpenAPIBuilder(opts OpenAPIOptions, logger *slog.Logger) *OpenAPIBuilder {
+ b := &OpenAPIBuilder{
+ options: opts,
+ logger: logger,
+ }
+
+ b.rebuild()
+ return b
+}
+
+// rebuild constructs the OpenAPI specs for both 3.1 and 3.2 versions based on current options.
+func (b *OpenAPIBuilder) rebuild() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ // Build OpenAPI 3.1 spec.
+ doc31 := b.buildDocument(openAPIVersion31)
+ yamlBytes31, err := doc31.Render()
+ if err != nil {
+ b.logger.Error("failed to render OpenAPI 3.1 spec - this is a bug, please report it", "err", err)
+ return
+ }
+ b.cachedYAML31 = yamlBytes31
+
+ // Build OpenAPI 3.2 spec.
+ doc32 := b.buildDocument(openAPIVersion32)
+ yamlBytes32, err := doc32.Render()
+ if err != nil {
+ b.logger.Error("failed to render OpenAPI 3.2 spec - this is a bug, please report it", "err", err)
+ return
+ }
+ b.cachedYAML32 = yamlBytes32
+}
+
+// ServeOpenAPI returns the OpenAPI specification as YAML.
+// By default, serves OpenAPI 3.1.0. Use ?openapi_version=3.2 for OpenAPI 3.2.0.
+func (b *OpenAPIBuilder) ServeOpenAPI(w http.ResponseWriter, r *http.Request) {
+ // Parse query parameter to determine which version to serve.
+ requestedVersion := r.URL.Query().Get("openapi_version")
+
+ b.mu.RLock()
+ var yamlData []byte
+ switch requestedVersion {
+ case "3.2", "3.2.0":
+ yamlData = b.cachedYAML32
+ case "3.1", "3.1.0":
+ yamlData = b.cachedYAML31
+ default:
+ // Default to OpenAPI 3.1.0 for broader compatibility.
+ yamlData = b.cachedYAML31
+ }
+ b.mu.RUnlock()
+
+ w.Header().Set("Content-Type", "application/yaml; charset=utf-8")
+ w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
+ w.WriteHeader(http.StatusOK)
+ w.Write(yamlData)
+}
+
+// WrapHandler returns the handler unchanged (no validation).
+func (*OpenAPIBuilder) WrapHandler(next http.HandlerFunc) http.HandlerFunc {
+ return next
+}
+
+// shouldIncludePath checks if a path should be included based on options.
+func (b *OpenAPIBuilder) shouldIncludePath(path string) bool {
+ if len(b.options.IncludePaths) == 0 {
+ return true
+ }
+ for _, include := range b.options.IncludePaths {
+ if strings.HasPrefix(path, include) || path == include {
+ return true
+ }
+ }
+ return false
+}
+
+// shouldIncludePathForVersion checks if a path should be included for a specific OpenAPI version.
+func (b *OpenAPIBuilder) shouldIncludePathForVersion(path, version string) bool {
+ // First check IncludePaths filter.
+ if !b.shouldIncludePath(path) {
+ return false
+ }
+
+ // OpenAPI 3.1 excludes paths that require 3.2 features.
+ // The /notifications/live endpoint uses itemSchema which is a 3.2-only feature.
+ if version == openAPIVersion31 && path == "/notifications/live" {
+ return false
+ }
+
+ return true
+}
+
+// buildDocument creates the OpenAPI document for the specified version using high-level structs.
+func (b *OpenAPIBuilder) buildDocument(version string) *v3.Document {
+ return &v3.Document{
+ Version: version,
+ Info: b.buildInfo(),
+ Servers: b.buildServers(),
+ Tags: b.buildTags(version),
+ Paths: b.buildPaths(version),
+ Components: b.buildComponents(),
+ }
+}
+
+// buildInfo constructs the info section.
+func (b *OpenAPIBuilder) buildInfo() *base.Info {
+ apiVersion := b.options.Version
+ if apiVersion == "" {
+ apiVersion = "0.0.1-undefined"
+ }
+ return &base.Info{
+ Title: "Prometheus API",
+ Description: "Prometheus is an Open-Source monitoring system with a dimensional data model, flexible query language, efficient time series database and modern alerting approach.",
+ Version: apiVersion,
+ Contact: &base.Contact{
+ Name: "Prometheus Community",
+ URL: "https://prometheus.io/community/",
+ },
+ }
+}
+
+// buildServers constructs the servers section.
+func (b *OpenAPIBuilder) buildServers() []*v3.Server {
+ // ExternalURL is always set by computeExternalURL in main.go.
+ // It includes scheme, host, port, and optional path prefix (without trailing slash).
+ serverURL := "/api/v1"
+ if b.options.ExternalURL != "" {
+ baseURL, err := url.Parse(b.options.ExternalURL)
+ if err == nil {
+ // Use path.Join to properly append /api/v1 to the existing path.
+ // Then use ResolveReference to construct the full URL.
+ baseURL.Path = path.Join(baseURL.Path, "/api/v1")
+ serverURL = baseURL.String()
+ }
+ }
+ return []*v3.Server{
+ {URL: serverURL},
+ }
+}
+
+// buildTags constructs the global tags list.
+// Tag summary is an OpenAPI 3.2 feature, excluded from 3.1.
+// Tag description is supported in both 3.1 and 3.2.
+func (*OpenAPIBuilder) buildTags(version string) []*base.Tag {
+ // Define tags with all metadata.
+ tagData := []struct {
+ name string
+ summary string
+ description string
+ }{
+ {"query", "Query", "Query and evaluate PromQL expressions."},
+ {"metadata", "Metadata", "Retrieve metric metadata such as type and unit."},
+ {"labels", "Labels", "Query label names and values."},
+ {"series", "Series", "Query and manage time series."},
+ {"targets", "Targets", "Retrieve target and scrape pool information."},
+ {"rules", "Rules", "Query recording and alerting rules."},
+ {"alerts", "Alerts", "Query active alerts and alertmanager discovery."},
+ {"status", "Status", "Retrieve server status and configuration."},
+ {"admin", "Admin", "Administrative operations for TSDB management."},
+ {"features", "Features", "Query enabled features."},
+ {"remote", "Remote Storage", "Remote read and write endpoints."},
+ {"otlp", "OTLP", "OpenTelemetry Protocol metrics ingestion."},
+ {"notifications", "Notifications", "Server notifications and events."},
+ }
+
+ tags := make([]*base.Tag, 0, len(tagData))
+ for _, td := range tagData {
+ tag := &base.Tag{
+ Name: td.name,
+ Description: td.description, // Description is supported in both 3.1 and 3.2.
+ }
+
+ // Summary is an OpenAPI 3.2 feature only.
+ if version == openAPIVersion32 {
+ tag.Summary = td.summary
+ }
+
+ tags = append(tags, tag)
+ }
+
+ return tags
+}
+
+// buildPaths constructs all API path definitions.
+func (b *OpenAPIBuilder) buildPaths(version string) *v3.Paths {
+ pathItems := orderedmap.New[string, *v3.PathItem]()
+
+ allPaths := b.getAllPathDefinitions()
+ for pair := allPaths.First(); pair != nil; pair = pair.Next() {
+ if b.shouldIncludePathForVersion(pair.Key(), version) {
+ pathItems.Set(pair.Key(), pair.Value())
+ }
+ }
+
+ return &v3.Paths{PathItems: pathItems}
+}
+
+// getAllPathDefinitions returns all path definitions.
+func (b *OpenAPIBuilder) getAllPathDefinitions() *orderedmap.Map[string, *v3.PathItem] {
+ paths := orderedmap.New[string, *v3.PathItem]()
+
+ // Query endpoints.
+ paths.Set("/query", b.queryPath())
+ paths.Set("/query_range", b.queryRangePath())
+ paths.Set("/query_exemplars", b.queryExemplarsPath())
+ paths.Set("/format_query", b.formatQueryPath())
+ paths.Set("/parse_query", b.parseQueryPath())
+
+ // Label endpoints.
+ paths.Set("/labels", b.labelsPath())
+ paths.Set("/label/{name}/values", b.labelValuesPath())
+
+ // Series endpoints.
+ paths.Set("/series", b.seriesPath())
+
+ // Metadata endpoints.
+ paths.Set("/metadata", b.metadataPath())
+
+ // Target endpoints.
+ paths.Set("/scrape_pools", b.scrapePoolsPath())
+ paths.Set("/targets", b.targetsPath())
+ paths.Set("/targets/metadata", b.targetsMetadataPath())
+ paths.Set("/targets/relabel_steps", b.targetsRelabelStepsPath())
+
+ // Rules and alerts endpoints.
+ paths.Set("/rules", b.rulesPath())
+ paths.Set("/alerts", b.alertsPath())
+ paths.Set("/alertmanagers", b.alertmanagersPath())
+
+ // Status endpoints.
+ paths.Set("/status/config", b.statusConfigPath())
+ paths.Set("/status/runtimeinfo", b.statusRuntimeInfoPath())
+ paths.Set("/status/buildinfo", b.statusBuildInfoPath())
+ paths.Set("/status/flags", b.statusFlagsPath())
+ paths.Set("/status/tsdb", b.statusTSDBPath())
+ paths.Set("/status/tsdb/blocks", b.statusTSDBBlocksPath())
+ paths.Set("/status/walreplay", b.statusWALReplayPath())
+
+ // Admin endpoints.
+ paths.Set("/admin/tsdb/delete_series", b.adminDeleteSeriesPath())
+ paths.Set("/admin/tsdb/clean_tombstones", b.adminCleanTombstonesPath())
+ paths.Set("/admin/tsdb/snapshot", b.adminSnapshotPath())
+
+ // Remote endpoints.
+ paths.Set("/read", b.remoteReadPath())
+ paths.Set("/write", b.remoteWritePath())
+ paths.Set("/otlp/v1/metrics", b.otlpWritePath())
+
+ // Notifications endpoints.
+ paths.Set("/notifications", b.notificationsPath())
+ paths.Set("/notifications/live", b.notificationsLivePath())
+
+ // Features endpoint.
+ paths.Set("/features", b.featuresPath())
+
+ return paths
+}
diff --git a/web/api/v1/openapi_coverage_test.go b/web/api/v1/openapi_coverage_test.go
new file mode 100644
index 0000000000..103f82e08e
--- /dev/null
+++ b/web/api/v1/openapi_coverage_test.go
@@ -0,0 +1,258 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ _ "embed"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "strconv"
+ "strings"
+ "testing"
+
+ v3 "github.com/pb33f/libopenapi/datamodel/high/v3"
+ "github.com/prometheus/common/promslog"
+ "github.com/stretchr/testify/require"
+)
+
+//go:embed api.go
+var apiGoSource string
+
+// routeInfo represents a route extracted from the Register function.
+type routeInfo struct {
+ method string
+ path string
+}
+
+// extractRoutesFromRegister parses the api.go source and extracts all routes
+// registered in the (*API) Register function using AST.
+func extractRoutesFromRegister(t *testing.T, source string) []routeInfo {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "api.go", source, parser.ParseComments)
+ require.NoError(t, err, "failed to parse api.go")
+
+ var registerFunc *ast.FuncDecl
+
+ // Find the Register method on *API.
+ ast.Inspect(f, func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok || fn.Body == nil {
+ return true
+ }
+
+ if fn.Name.Name != "Register" {
+ return true
+ }
+
+ // Ensure it's a method on *API.
+ if fn.Recv == nil || len(fn.Recv.List) != 1 {
+ return true
+ }
+
+ star, ok := fn.Recv.List[0].Type.(*ast.StarExpr)
+ if !ok {
+ return true
+ }
+
+ ident, ok := star.X.(*ast.Ident)
+ if !ok || ident.Name != "API" {
+ return true
+ }
+
+ registerFunc = fn
+ return false // Stop walking once found.
+ })
+
+ require.NotNil(t, registerFunc, "Register method not found")
+
+ var routes []routeInfo
+
+ // Extract all r.Get, r.Post, r.Put, r.Delete, r.Options calls.
+ ast.Inspect(registerFunc.Body, func(n ast.Node) bool {
+ call, ok := n.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+
+ // Check if it's a router method call.
+ method := sel.Sel.Name
+ if method != "Get" && method != "Post" && method != "Put" && method != "Delete" && method != "Del" && method != "Options" {
+ return true
+ }
+
+ // Ensure the receiver is 'r'.
+ if x, ok := sel.X.(*ast.Ident); !ok || x.Name != "r" {
+ return true
+ }
+
+ if len(call.Args) == 0 {
+ return true
+ }
+
+ // Extract the path from the first argument.
+ lit, ok := call.Args[0].(*ast.BasicLit)
+ if !ok || lit.Kind != token.STRING {
+ return true
+ }
+
+ path, err := strconv.Unquote(lit.Value)
+ if err != nil {
+ return true
+ }
+
+ // Normalize Del to DELETE.
+ if method == "Del" {
+ method = "Delete"
+ }
+
+ routes = append(routes, routeInfo{
+ method: strings.ToUpper(method),
+ path: path,
+ })
+ return true
+ })
+
+ return routes
+}
+
+// normalizePathForOpenAPI converts route paths with colon parameters to OpenAPI format.
+// e.g., "/label/:name/values" -> "/label/{name}/values".
+func normalizePathForOpenAPI(path string) string {
+ // Replace :param with {param}.
+ parts := strings.Split(path, "/")
+ for i, part := range parts {
+ if trimmed, ok := strings.CutPrefix(part, ":"); ok {
+ parts[i] = "{" + trimmed + "}"
+ }
+ }
+ return strings.Join(parts, "/")
+}
+
+// TestOpenAPICoverage verifies that all routes registered in the Register function
+// are documented in the OpenAPI specification.
+func TestOpenAPICoverage(t *testing.T) {
+ // Extract routes from api.go using AST.
+ routes := extractRoutesFromRegister(t, apiGoSource)
+ require.NotEmpty(t, routes, "no routes found in Register function")
+
+ // Build OpenAPI spec.
+ builder := NewOpenAPIBuilder(OpenAPIOptions{}, promslog.NewNopLogger())
+ allPaths := builder.getAllPathDefinitions()
+
+ // Create a map of OpenAPI paths for quick lookup.
+ // Key is the normalized path, value is the PathItem.
+ openAPIPaths := make(map[string]bool)
+ for pair := allPaths.First(); pair != nil; pair = pair.Next() {
+ pathItem := pair.Value()
+ path := pair.Key()
+
+ // Track which methods are defined for this path.
+ if pathItem.Get != nil {
+ openAPIPaths[path+":GET"] = true
+ }
+ if pathItem.Post != nil {
+ openAPIPaths[path+":POST"] = true
+ }
+ if pathItem.Put != nil {
+ openAPIPaths[path+":PUT"] = true
+ }
+ if pathItem.Delete != nil {
+ openAPIPaths[path+":DELETE"] = true
+ }
+ if pathItem.Options != nil {
+ openAPIPaths[path+":OPTIONS"] = true
+ }
+ }
+
+ // Check coverage for each route.
+ var missingRoutes []string
+ ignoredRoutes := map[string]bool{
+ "/*path:OPTIONS": true, // Wildcard OPTIONS handler.
+ "/openapi.yaml:GET": true, // Self-referential endpoint.
+ "/notifications/live:GET": true, // SSE endpoint (version-specific).
+ }
+
+ for _, route := range routes {
+ normalizedPath := normalizePathForOpenAPI(route.path)
+ key := normalizedPath + ":" + route.method
+
+ // Skip ignored routes.
+ if ignoredRoutes[key] {
+ continue
+ }
+
+ if !openAPIPaths[key] {
+ missingRoutes = append(missingRoutes, key)
+ }
+ }
+
+ if len(missingRoutes) > 0 {
+ t.Errorf("The following routes are registered but not documented in OpenAPI spec:\n%s",
+ strings.Join(missingRoutes, "\n"))
+ }
+}
+
+// TestOpenAPIHasNoExtraRoutes verifies that the OpenAPI spec doesn't document
+// routes that aren't actually registered.
+func TestOpenAPIHasNoExtraRoutes(t *testing.T) {
+ // Extract routes from api.go using AST.
+ routes := extractRoutesFromRegister(t, apiGoSource)
+ require.NotEmpty(t, routes, "no routes found in Register function")
+
+ // Create a map of registered routes.
+ registeredRoutes := make(map[string]bool)
+ for _, route := range routes {
+ normalizedPath := normalizePathForOpenAPI(route.path)
+ key := normalizedPath + ":" + route.method
+ registeredRoutes[key] = true
+ }
+
+ // Build OpenAPI spec.
+ builder := NewOpenAPIBuilder(OpenAPIOptions{}, promslog.NewNopLogger())
+ allPaths := builder.getAllPathDefinitions()
+
+ // Check if any OpenAPI paths are not registered.
+ var extraRoutes []string
+
+ for pair := allPaths.First(); pair != nil; pair = pair.Next() {
+ pathItem := pair.Value()
+ path := pair.Key()
+
+ checkMethod := func(method string, op *v3.Operation) {
+ if op != nil {
+ key := path + ":" + method
+ if !registeredRoutes[key] {
+ extraRoutes = append(extraRoutes, key)
+ }
+ }
+ }
+
+ checkMethod("GET", pathItem.Get)
+ checkMethod("POST", pathItem.Post)
+ checkMethod("PUT", pathItem.Put)
+ checkMethod("DELETE", pathItem.Delete)
+ checkMethod("OPTIONS", pathItem.Options)
+ }
+
+ if len(extraRoutes) > 0 {
+ t.Errorf("The following routes are documented in OpenAPI but not registered:\n%s",
+ strings.Join(extraRoutes, "\n"))
+ }
+}
diff --git a/web/api/v1/openapi_examples.go b/web/api/v1/openapi_examples.go
new file mode 100644
index 0000000000..50e155b184
--- /dev/null
+++ b/web/api/v1/openapi_examples.go
@@ -0,0 +1,1013 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains example request bodies and response data for OpenAPI documentation.
+// Examples are included in the generated spec to provide realistic usage scenarios for API consumers.
+package v1
+
+import (
+ "github.com/pb33f/libopenapi/datamodel/high/base"
+ "github.com/pb33f/libopenapi/orderedmap"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/promql"
+)
+
+// Example builders for request bodies.
+
+func queryPostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("simpleQuery", &base.Example{
+ Summary: "Simple instant query",
+ Value: createYAMLNode(map[string]any{"query": "up"}),
+ })
+
+ examples.Set("queryWithTime", &base.Example{
+ Summary: "Query with specific timestamp",
+ Value: createYAMLNode(map[string]any{
+ "query": "up{job=\"prometheus\"}",
+ "time": "2026-01-02T13:37:00.000Z",
+ }),
+ })
+
+ examples.Set("queryWithLimit", &base.Example{
+ Summary: "Query with limit and statistics",
+ Value: createYAMLNode(map[string]any{
+ "query": "rate(prometheus_http_requests_total{handler=\"/api/v1/query\"}[5m])",
+ "limit": 100,
+ "stats": "all",
+ }),
+ })
+
+ return examples
+}
+
+// queryRangePostExamples returns examples for POST /query_range endpoint.
+func queryRangePostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("basicRange", &base.Example{
+ Summary: "Basic range query",
+ Value: createYAMLNode(map[string]any{
+ "query": "up",
+ "start": "2026-01-02T12:37:00.000Z",
+ "end": "2026-01-02T13:37:00.000Z",
+ "step": "15s",
+ }),
+ })
+
+ examples.Set("rateQuery", &base.Example{
+ Summary: "Rate calculation over time range",
+ Value: createYAMLNode(map[string]any{
+ "query": "rate(prometheus_http_requests_total{handler=\"/api/v1/query\"}[5m])",
+ "start": "2026-01-02T12:37:00.000Z",
+ "end": "2026-01-02T13:37:00.000Z",
+ "step": "30s",
+ "timeout": "30s",
+ }),
+ })
+
+ return examples
+}
+
+// queryExemplarsPostExamples returns examples for POST /query_exemplars endpoint.
+func queryExemplarsPostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("basicExemplar", &base.Example{
+ Summary: "Query exemplars for a metric",
+ Value: createYAMLNode(map[string]any{"query": "prometheus_http_requests_total"}),
+ })
+
+ examples.Set("exemplarWithTimeRange", &base.Example{
+ Summary: "Exemplars within specific time range",
+ Value: createYAMLNode(map[string]any{
+ "query": "prometheus_http_requests_total{job=\"prometheus\"}",
+ "start": "2026-01-02T12:37:00.000Z",
+ "end": "2026-01-02T13:37:00.000Z",
+ }),
+ })
+
+ return examples
+}
+
+// formatQueryPostExamples returns examples for POST /format_query endpoint.
+func formatQueryPostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("simpleFormat", &base.Example{
+ Summary: "Format a simple query",
+ Value: createYAMLNode(map[string]any{"query": "up{job=\"prometheus\"}"}),
+ })
+
+ examples.Set("complexFormat", &base.Example{
+ Summary: "Format a complex query",
+ Value: createYAMLNode(map[string]any{"query": "sum(rate(http_requests_total[5m])) by (job, status)"}),
+ })
+
+ return examples
+}
+
+// parseQueryPostExamples returns examples for POST /parse_query endpoint.
+func parseQueryPostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("simpleParse", &base.Example{
+ Summary: "Parse a simple query",
+ Value: createYAMLNode(map[string]any{"query": "up"}),
+ })
+
+ examples.Set("complexParse", &base.Example{
+ Summary: "Parse a complex query",
+ Value: createYAMLNode(map[string]any{"query": "rate(http_requests_total{job=\"api\"}[5m])"}),
+ })
+
+ return examples
+}
+
+// labelsPostExamples returns examples for POST /labels endpoint.
+func labelsPostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("allLabels", &base.Example{
+ Summary: "Get all label names",
+ Value: createYAMLNode(map[string]any{}),
+ })
+
+ examples.Set("labelsWithTimeRange", &base.Example{
+ Summary: "Get label names within time range",
+ Value: createYAMLNode(map[string]any{
+ "start": "2026-01-02T12:37:00.000Z",
+ "end": "2026-01-02T13:37:00.000Z",
+ }),
+ })
+
+ examples.Set("labelsWithMatch", &base.Example{
+ Summary: "Get label names matching series selector",
+ Value: createYAMLNode(map[string]any{
+ "match[]": []string{"up", "process_start_time_seconds{job=\"prometheus\"}"},
+ }),
+ })
+
+ return examples
+}
+
+// seriesPostExamples returns examples for POST /series endpoint.
+func seriesPostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("seriesMatch", &base.Example{
+ Summary: "Find series by label matchers",
+ Value: createYAMLNode(map[string]any{
+ "match[]": []string{"up"},
+ }),
+ })
+
+ examples.Set("seriesWithTimeRange", &base.Example{
+ Summary: "Find series with time range",
+ Value: createYAMLNode(map[string]any{
+ "match[]": []string{"up", "process_cpu_seconds_total{job=\"prometheus\"}"},
+ "start": "2026-01-02T12:37:00.000Z",
+ "end": "2026-01-02T13:37:00.000Z",
+ }),
+ })
+
+ return examples
+}
+
+// Example builders for response bodies.
+
+// queryResponseExamples returns examples for /query response.
+func queryResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ vectorResult := promql.Vector{
+ promql.Sample{
+ Metric: labels.FromStrings("__name__", "up", "job", "prometheus", "instance", "demo.prometheus.io:9090"),
+ T: 1767436620000,
+ F: 1,
+ },
+ promql.Sample{
+ Metric: labels.FromStrings("__name__", "up", "env", "demo", "job", "alertmanager", "instance", "demo.prometheus.io:9093"),
+ T: 1767436620000,
+ F: 1,
+ },
+ }
+
+ examples.Set("vectorResult", &base.Example{
+ Summary: "Instant vector query: up",
+ Value: vectorExample(vectorResult),
+ })
+
+ examples.Set("scalarResult", &base.Example{
+ Summary: "Scalar query: scalar(42)",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "resultType": "scalar",
+ "result": []any{1767436620, "42"},
+ },
+ }),
+ })
+
+ matrixResult := promql.Matrix{
+ promql.Series{
+ Metric: labels.FromStrings("__name__", "up", "job", "prometheus", "instance", "demo.prometheus.io:9090"),
+ Floats: []promql.FPoint{
+ {T: 1767436320000, F: 1},
+ {T: 1767436620000, F: 1},
+ },
+ },
+ }
+
+ examples.Set("matrixResult", &base.Example{
+ Summary: "Range vector query: up[5m]",
+ Value: matrixExample(matrixResult),
+ })
+
+ // TODO: Add native histogram example.
+
+ return examples
+}
+
+// queryRangeResponseExamples returns examples for /query_range response.
+func queryRangeResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ matrixResult := promql.Matrix{
+ promql.Series{
+ Metric: labels.FromStrings("__name__", "up", "job", "prometheus", "instance", "demo.prometheus.io:9090"),
+ Floats: []promql.FPoint{
+ {T: 1767433020000, F: 1},
+ {T: 1767434820000, F: 1},
+ {T: 1767436620000, F: 1},
+ },
+ },
+ }
+
+ examples.Set("matrixResult", &base.Example{
+ Summary: "Range query: rate(prometheus_http_requests_total[5m])",
+ Value: matrixExample(matrixResult),
+ })
+
+ // TODO: Add native histogram example.
+
+ return examples
+}
+
+// labelsResponseExamples returns examples for /labels response.
+func labelsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("labelNames", &base.Example{
+ Summary: "List of label names",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []string{
+ "__name__", "active", "address", "alertmanager", "alertname", "alertstate",
+ "backend", "branch", "code", "collector", "component", "device",
+ "env", "endpoint", "fstype", "handler", "instance", "job",
+ "le", "method", "mode", "name",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// seriesResponseExamples returns examples for /series response.
+func seriesResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("seriesList", &base.Example{
+ Summary: "List of series matching the selector",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []map[string]string{
+ {
+ "__name__": "up",
+ "env": "demo",
+ "instance": "demo.prometheus.io:8080",
+ "job": "cadvisor",
+ },
+ {
+ "__name__": "up",
+ "env": "demo",
+ "instance": "demo.prometheus.io:9093",
+ "job": "alertmanager",
+ },
+ {
+ "__name__": "up",
+ "env": "demo",
+ "instance": "demo.prometheus.io:9100",
+ "job": "node",
+ },
+ {
+ "__name__": "up",
+ "instance": "demo.prometheus.io:3000",
+ "job": "grafana",
+ },
+ {
+ "__name__": "up",
+ "instance": "demo.prometheus.io:8996",
+ "job": "random",
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// targetsResponseExamples returns examples for /targets response.
+func targetsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("targetsList", &base.Example{
+ Summary: "Active and dropped targets",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "activeTargets": []map[string]any{
+ {
+ "discoveredLabels": map[string]string{
+ "__address__": "demo.prometheus.io:9093",
+ "__meta_filepath": "/etc/prometheus/file_sd/alertmanager.yml",
+ "__metrics_path__": "/metrics",
+ "__scheme__": "http",
+ "env": "demo",
+ "job": "alertmanager",
+ },
+ "labels": map[string]string{
+ "env": "demo",
+ "instance": "demo.prometheus.io:9093",
+ "job": "alertmanager",
+ },
+ "scrapePool": "alertmanager",
+ "scrapeUrl": "http://demo.prometheus.io:9093/metrics",
+ "globalUrl": "http://demo.prometheus.io:9093/metrics",
+ "lastError": "",
+ "lastScrape": "2026-01-02T13:36:40.200Z",
+ "lastScrapeDuration": 0.006576866,
+ "health": "up",
+ "scrapeInterval": "15s",
+ "scrapeTimeout": "10s",
+ },
+ },
+ "droppedTargets": []map[string]any{},
+ "droppedTargetCounts": map[string]int{
+ "alertmanager": 0,
+ "blackbox": 0,
+ "caddy": 0,
+ "cadvisor": 0,
+ "grafana": 0,
+ "node": 0,
+ "prometheus": 0,
+ "random": 0,
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// rulesResponseExamples returns examples for /rules response.
+func rulesResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("ruleGroups", &base.Example{
+ Summary: "Alerting and recording rules",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "groups": []map[string]any{
+ {
+ "name": "ansible managed alert rules",
+ "file": "/etc/prometheus/rules/ansible_managed.yml",
+ "interval": 15,
+ "limit": 0,
+ "rules": []map[string]any{
+ {
+ "state": "firing",
+ "name": "Watchdog",
+ "query": "vector(1)",
+ "duration": 600,
+ "keepFiringFor": 0,
+ "labels": map[string]string{"severity": "warning"},
+ "annotations": map[string]string{"description": "This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the \"DeadMansSnitch\" integration in PagerDuty.", "summary": "Ensure entire alerting pipeline is functional"},
+ "health": "ok",
+ "evaluationTime": 0.000356688,
+ "lastEvaluation": "2026-01-02T13:36:56.874Z",
+ "type": "alerting",
+ },
+ },
+ "evaluationTime": 0.000561635,
+ "lastEvaluation": "2026-01-02T13:36:56.874Z",
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// alertsResponseExamples returns examples for /alerts response.
+func alertsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("activeAlerts", &base.Example{
+ Summary: "Currently active alerts",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "alerts": []map[string]any{
+ {
+ "labels": map[string]string{
+ "alertname": "Watchdog",
+ "severity": "warning",
+ },
+ "annotations": map[string]string{
+ "description": "This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the \"DeadMansSnitch\" integration in PagerDuty.",
+ "summary": "Ensure entire alerting pipeline is functional",
+ },
+ "state": "firing",
+ "activeAt": "2026-01-02T13:30:00.000Z",
+ "value": "1e+00",
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// queryExemplarsResponseExamples returns examples for /query_exemplars response.
+func queryExemplarsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("exemplarsResult", &base.Example{
+ Summary: "Exemplars for a metric with trace IDs",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []map[string]any{
+ {
+ "seriesLabels": map[string]string{
+ "__name__": "http_requests_total",
+ "job": "api-server",
+ "method": "GET",
+ },
+ "exemplars": []map[string]any{
+ {
+ "labels": map[string]string{
+ "traceID": "abc123def456",
+ },
+ "value": "1.5",
+ "timestamp": 1689956451.781,
+ },
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// formatQueryResponseExamples returns examples for /format_query response.
+func formatQueryResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("formattedQuery", &base.Example{
+ Summary: "Formatted PromQL query",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": "sum by(job, status) (rate(http_requests_total[5m]))",
+ }),
+ })
+
+ return examples
+}
+
+// parseQueryResponseExamples returns examples for /parse_query response.
+func parseQueryResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("parsedQuery", &base.Example{
+ Summary: "Parsed PromQL expression tree",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "resultType": "vector",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// labelValuesResponseExamples returns examples for /label/{name}/values response.
+func labelValuesResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("labelValues", &base.Example{
+ Summary: "List of values for a label",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []string{"alertmanager", "blackbox", "caddy", "cadvisor", "grafana", "node", "prometheus", "random"},
+ }),
+ })
+
+ return examples
+}
+
+// metadataResponseExamples returns examples for /metadata response.
+func metadataResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("metricMetadata", &base.Example{
+ Summary: "Metadata for metrics",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string][]map[string]any{
+ "prometheus_rule_group_iterations_missed_total": {
+ {
+ "type": "counter",
+ "help": "The total number of rule group evaluations missed due to slow rule group evaluation.",
+ "unit": "",
+ },
+ },
+ "prometheus_sd_updates_total": {
+ {
+ "type": "counter",
+ "help": "Total number of update events sent to the SD consumers.",
+ "unit": "",
+ },
+ },
+ "go_gc_stack_starting_size_bytes": {
+ {
+ "type": "gauge",
+ "help": "The stack size of new goroutines. Sourced from /gc/stack/starting-size:bytes.",
+ "unit": "",
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// scrapePoolsResponseExamples returns examples for /scrape_pools response.
+func scrapePoolsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("scrapePoolsList", &base.Example{
+ Summary: "List of scrape pool names",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "scrapePools": []string{"alertmanager", "blackbox", "caddy", "cadvisor", "grafana", "node", "prometheus", "random"},
+ },
+ }),
+ })
+
+ return examples
+}
+
+// targetsMetadataResponseExamples returns examples for /targets/metadata response.
+func targetsMetadataResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("targetMetadata", &base.Example{
+ Summary: "Metadata for targets",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []map[string]any{
+ {
+ "target": map[string]string{
+ "instance": "localhost:9090",
+ "job": "prometheus",
+ },
+ "type": "gauge",
+ "help": "The current health status of the target",
+ "unit": "",
+ "metric": "up",
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// targetsRelabelStepsResponseExamples returns examples for /targets/relabel_steps response.
+func targetsRelabelStepsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("relabelSteps", &base.Example{
+ Summary: "Relabel steps for a target",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "steps": []map[string]any{
+ {
+ "rule": map[string]any{
+ "source_labels": []string{"__address__"},
+ "target_label": "instance",
+ "action": "replace",
+ "regex": "(.*)",
+ "replacement": "$1",
+ },
+ "output": map[string]string{
+ "__address__": "localhost:9090",
+ "instance": "localhost:9090",
+ "job": "prometheus",
+ },
+ "keep": true,
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// alertmanagersResponseExamples returns examples for /alertmanagers response.
+func alertmanagersResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("alertmanagerDiscovery", &base.Example{
+ Summary: "Alertmanager discovery results",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "activeAlertmanagers": []map[string]any{
+ {
+ "url": "http://demo.prometheus.io:9093/api/v2/alerts",
+ },
+ },
+ "droppedAlertmanagers": []map[string]any{},
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusConfigResponseExamples returns examples for /status/config response.
+func statusConfigResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("configYAML", &base.Example{
+ Summary: "Prometheus configuration",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "yaml": "global:\n scrape_interval: 15s\n scrape_timeout: 10s\n evaluation_interval: 15s\n external_labels:\n environment: demo-prometheus-io\nalerting:\n alertmanagers:\n - scheme: http\n static_configs:\n - targets:\n - demo.prometheus.io:9093\nrule_files:\n- /etc/prometheus/rules/*.yml\n",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusRuntimeInfoResponseExamples returns examples for /status/runtimeinfo response.
+func statusRuntimeInfoResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("runtimeInfo", &base.Example{
+ Summary: "Runtime information",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "startTime": "2026-01-01T13:37:00.000Z",
+ "CWD": "/",
+ "hostname": "demo-prometheus-io",
+ "serverTime": "2026-01-02T13:37:00.000Z",
+ "reloadConfigSuccess": true,
+ "lastConfigTime": "2026-01-01T13:37:00.000Z",
+ "corruptionCount": 0,
+ "goroutineCount": 88,
+ "GOMAXPROCS": 2,
+ "GOMEMLIMIT": int64(3703818240),
+ "GOGC": "75",
+ "GODEBUG": "",
+ "storageRetention": "31d",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusBuildInfoResponseExamples returns examples for /status/buildinfo response.
+func statusBuildInfoResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("buildInfo", &base.Example{
+ Summary: "Build information",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "version": "3.7.3",
+ "revision": "0a41f0000705c69ab8e0f9a723fc73e39ed62b07",
+ "branch": "HEAD",
+ "buildUser": "root@08c890a84441",
+ "buildDate": "20251030-07:26:10",
+ "goVersion": "go1.25.3",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusFlagsResponseExamples returns examples for /status/flags response.
+func statusFlagsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("flags", &base.Example{
+ Summary: "Command-line flags",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]string{
+ "agent": "false",
+ "alertmanager.notification-queue-capacity": "10000",
+ "config.file": "/etc/prometheus/prometheus.yml",
+ "enable-feature": "exemplar-storage,native-histograms",
+ "query.max-concurrency": "20",
+ "query.timeout": "2m",
+ "storage.tsdb.path": "/prometheus",
+ "storage.tsdb.retention.time": "15d",
+ "web.console.libraries": "/usr/share/prometheus/console_libraries",
+ "web.console.templates": "/usr/share/prometheus/consoles",
+ "web.enable-admin-api": "true",
+ "web.enable-lifecycle": "true",
+ "web.listen-address": "0.0.0.0:9090",
+ "web.page-title": "Prometheus Time Series Collection and Processing Server",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusTSDBResponseExamples returns examples for /status/tsdb response.
+func statusTSDBResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("tsdbStats", &base.Example{
+ Summary: "TSDB statistics",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "headStats": map[string]any{
+ "numSeries": 9925,
+ "numLabelPairs": 2512,
+ "chunkCount": 37525,
+ "minTime": int64(1767362400712),
+ "maxTime": int64(1767436620000),
+ },
+ "seriesCountByMetricName": []map[string]any{
+ {
+ "name": "up",
+ "value": 100,
+ },
+ {
+ "name": "http_requests_total",
+ "value": 500,
+ },
+ },
+ "labelValueCountByLabelName": []map[string]any{
+ {
+ "name": "__name__",
+ "value": 5,
+ },
+ {
+ "name": "job",
+ "value": 3,
+ },
+ },
+ "memoryInBytesByLabelName": []map[string]any{
+ {
+ "name": "__name__",
+ "value": 1024,
+ },
+ {
+ "name": "job",
+ "value": 512,
+ },
+ },
+ "seriesCountByLabelValuePair": []map[string]any{
+ {
+ "name": "job=prometheus",
+ "value": 100,
+ },
+ {
+ "name": "instance=localhost:9090",
+ "value": 100,
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusTSDBBlocksResponseExamples returns examples for /status/tsdb/blocks response.
+func statusTSDBBlocksResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("tsdbBlocks", &base.Example{
+ Summary: "TSDB block information",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "blocks": []map[string]any{
+ {
+ "ulid": "01KC4D6GXQA4CRHYKV78NEBVAE",
+ "minTime": int64(1764568801099),
+ "maxTime": int64(1764763200000),
+ "stats": map[string]any{
+ "numSamples": 129505582,
+ "numSeries": 10661,
+ "numChunks": 1073962,
+ },
+ "compaction": map[string]any{
+ "level": 4,
+ "sources": []string{
+ "01KBCJ7TR8A4QAJ3AA1J651P5S",
+ "01KBCS3J0E34567YPB8Y5W0E24",
+ "01KBCZZ9KRTYGG3E7HVQFGC3S3",
+ },
+ },
+ "version": 1,
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusWALReplayResponseExamples returns examples for /status/walreplay response.
+func statusWALReplayResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("walReplay", &base.Example{
+ Summary: "WAL replay status",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "min": 3209,
+ "max": 3214,
+ "current": 3214,
+ },
+ }),
+ })
+
+ return examples
+}
+
+// deleteSeriesResponseExamples returns examples for /admin/tsdb/delete_series response.
+func deleteSeriesResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("deletionSuccess", &base.Example{
+ Summary: "Successful series deletion",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ }),
+ })
+
+ return examples
+}
+
+// cleanTombstonesResponseExamples returns examples for /admin/tsdb/clean_tombstones response.
+func cleanTombstonesResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("tombstonesCleaned", &base.Example{
+ Summary: "Tombstones cleaned successfully",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ }),
+ })
+
+ return examples
+}
+
+// seriesDeleteResponseExamples returns examples for DELETE /series response.
+func seriesDeleteResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("seriesDeleted", &base.Example{
+ Summary: "Series marked for deletion",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ }),
+ })
+
+ return examples
+}
+
+// snapshotResponseExamples returns examples for /admin/tsdb/snapshot response.
+func snapshotResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("snapshotCreated", &base.Example{
+ Summary: "Snapshot created successfully",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "name": "20260102T133700Z-a1b2c3d4e5f67890",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// notificationsResponseExamples returns examples for /notifications response.
+func notificationsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("notifications", &base.Example{
+ Summary: "Server notifications",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []map[string]any{
+ {
+ "text": "Configuration reload has failed.",
+ "date": "2026-01-02T16:14:50.046Z",
+ "active": true,
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// notificationLiveExamples provides example SSE messages for the live notifications endpoint.
+func notificationLiveExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("activeNotification", &base.Example{
+ Summary: "Active notification SSE message",
+ Description: "An SSE message containing an active server notification.",
+ Value: createYAMLNode(map[string]any{
+ "data": "{\"text\":\"Configuration reload has failed.\",\"date\":\"2026-01-02T16:14:50.046Z\",\"active\":true}",
+ }),
+ })
+
+ return examples
+}
+
+// featuresResponseExamples returns examples for /features response.
+func featuresResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("enabledFeatures", &base.Example{
+ Summary: "Enabled feature flags",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []string{"exemplar-storage", "remote-write-receiver"},
+ }),
+ })
+
+ return examples
+}
+
+// errorResponseExamples returns examples for error responses.
+func errorResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("tsdbNotReady", &base.Example{
+ Summary: "TSDB not ready",
+ Value: createYAMLNode(map[string]any{
+ "status": "error",
+ "errorType": "internal",
+ "error": "TSDB not ready",
+ }),
+ })
+
+ return examples
+}
diff --git a/web/api/v1/openapi_golden_test.go b/web/api/v1/openapi_golden_test.go
new file mode 100644
index 0000000000..6207fda81b
--- /dev/null
+++ b/web/api/v1/openapi_golden_test.go
@@ -0,0 +1,176 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "flag"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v3"
+
+ "github.com/prometheus/prometheus/web/api/testhelpers"
+)
+
+var updateOpenAPISpec = flag.Bool("update-openapi-spec", false, "update openapi golden files with the current specs")
+
+// TestOpenAPIGolden_3_1 verifies that the OpenAPI 3.1 spec matches the golden file.
+func TestOpenAPIGolden_3_1(t *testing.T) {
+ // Create an API instance to serve the OpenAPI spec.
+ api := newTestAPI(t, testhelpers.APIConfig{})
+
+ // Fetch the OpenAPI 3.1 spec from the API (default, no query param).
+ resp := testhelpers.GET(t, api, "/api/v1/openapi.yaml")
+ require.Equal(t, 200, resp.StatusCode, "expected HTTP 200 for OpenAPI spec endpoint")
+ require.NotEmpty(t, resp.Body, "OpenAPI spec should not be empty")
+
+ goldenPath := filepath.Join("testdata", "openapi_3.1_golden.yaml")
+
+ if *updateOpenAPISpec {
+ // Update mode: write the current spec to the golden file.
+ t.Logf("Updating golden file: %s", goldenPath)
+
+ // Ensure the testdata directory exists.
+ err := os.MkdirAll(filepath.Dir(goldenPath), 0o755)
+ require.NoError(t, err, "failed to create testdata directory")
+
+ // Write the golden file.
+ err = os.WriteFile(goldenPath, []byte(resp.Body), 0o644)
+ require.NoError(t, err, "failed to write golden file")
+
+ t.Logf("Golden file updated successfully")
+ return
+ }
+
+ // Comparison mode: verify the spec matches the golden file.
+ goldenData, err := os.ReadFile(goldenPath)
+ require.NoError(t, err, "failed to read golden file (run with -update-openapi-spec to generate it)")
+
+ require.Equal(t, string(goldenData), resp.Body,
+ "OpenAPI 3.1 spec does not match golden file. Run 'go test -update-openapi-spec' to update.")
+
+ // Verify version field is 3.1.0.
+ var spec map[string]any
+ err = yaml.Unmarshal([]byte(resp.Body), &spec)
+ require.NoError(t, err)
+ require.Equal(t, "3.1.0", spec["openapi"], "OpenAPI version should be 3.1.0")
+
+ // Verify /notifications/live is NOT present in 3.1 spec.
+ paths := spec["paths"].(map[string]any)
+ _, found := paths["/notifications/live"]
+ require.False(t, found, "/notifications/live should not be in OpenAPI 3.1 spec")
+}
+
+// TestOpenAPIGolden_3_2 verifies that the OpenAPI 3.2 spec matches the golden file.
+func TestOpenAPIGolden_3_2(t *testing.T) {
+ // Create an API instance to serve the OpenAPI spec.
+ api := newTestAPI(t, testhelpers.APIConfig{})
+
+ // Fetch the OpenAPI 3.2 spec from the API with query parameter.
+ resp := testhelpers.GET(t, api, "/api/v1/openapi.yaml?openapi_version=3.2")
+ require.Equal(t, 200, resp.StatusCode, "expected HTTP 200 for OpenAPI spec endpoint")
+ require.NotEmpty(t, resp.Body, "OpenAPI spec should not be empty")
+
+ goldenPath := filepath.Join("testdata", "openapi_3.2_golden.yaml")
+
+ if *updateOpenAPISpec {
+ // Update mode: write the current spec to the golden file.
+ t.Logf("Updating golden file: %s", goldenPath)
+
+ // Ensure the testdata directory exists.
+ err := os.MkdirAll(filepath.Dir(goldenPath), 0o755)
+ require.NoError(t, err, "failed to create testdata directory")
+
+ // Write the golden file.
+ err = os.WriteFile(goldenPath, []byte(resp.Body), 0o644)
+ require.NoError(t, err, "failed to write golden file")
+
+ t.Logf("Golden file updated successfully")
+ return
+ }
+
+ // Comparison mode: verify the spec matches the golden file.
+ goldenData, err := os.ReadFile(goldenPath)
+ require.NoError(t, err, "failed to read golden file (run with -update-openapi-spec to generate it)")
+
+ require.Equal(t, string(goldenData), resp.Body,
+ "OpenAPI 3.2 spec does not match golden file. Run 'go test -update-openapi-spec' to update.")
+
+ // Verify version field is 3.2.0.
+ var spec map[string]any
+ err = yaml.Unmarshal([]byte(resp.Body), &spec)
+ require.NoError(t, err)
+ require.Equal(t, "3.2.0", spec["openapi"], "OpenAPI version should be 3.2.0")
+
+ // Verify /notifications/live IS present in 3.2 spec.
+ paths := spec["paths"].(map[string]any)
+ _, found := paths["/notifications/live"]
+ require.True(t, found, "/notifications/live should be in OpenAPI 3.2 spec")
+}
+
+// TestOpenAPIVersionSelection verifies version query parameter handling.
+func TestOpenAPIVersionSelection(t *testing.T) {
+ api := newTestAPI(t, testhelpers.APIConfig{})
+
+ tests := []struct {
+ name string
+ url string
+ expectedVersion string
+ expectLivePath bool
+ }{
+ {
+ name: "default to 3.1.0",
+ url: "/api/v1/openapi.yaml",
+ expectedVersion: "3.1.0",
+ expectLivePath: false,
+ },
+ {
+ name: "explicit 3.1",
+ url: "/api/v1/openapi.yaml?openapi_version=3.1",
+ expectedVersion: "3.1.0",
+ expectLivePath: false,
+ },
+ {
+ name: "explicit 3.2",
+ url: "/api/v1/openapi.yaml?openapi_version=3.2",
+ expectedVersion: "3.2.0",
+ expectLivePath: true,
+ },
+ {
+ name: "invalid version defaults to 3.1.0",
+ url: "/api/v1/openapi.yaml?openapi_version=4.0",
+ expectedVersion: "3.1.0",
+ expectLivePath: false,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ resp := testhelpers.GET(t, api, tc.url)
+ require.Equal(t, 200, resp.StatusCode)
+
+ var spec map[string]any
+ err := yaml.Unmarshal([]byte(resp.Body), &spec)
+ require.NoError(t, err)
+
+ require.Equal(t, tc.expectedVersion, spec["openapi"])
+
+ paths := spec["paths"].(map[string]any)
+ _, found := paths["/notifications/live"]
+ require.Equal(t, tc.expectLivePath, found)
+ })
+ }
+}
diff --git a/web/api/v1/openapi_helpers.go b/web/api/v1/openapi_helpers.go
new file mode 100644
index 0000000000..76f6001693
--- /dev/null
+++ b/web/api/v1/openapi_helpers.go
@@ -0,0 +1,343 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "time"
+
+ jsoniter "github.com/json-iterator/go"
+ "github.com/pb33f/libopenapi/datamodel/high/base"
+ v3 "github.com/pb33f/libopenapi/datamodel/high/v3"
+ "github.com/pb33f/libopenapi/orderedmap"
+ yaml "go.yaml.in/yaml/v4"
+
+ "github.com/prometheus/prometheus/promql"
+)
+
+// Helper functions for building common structures.
+
+// exampleTime is a reference time used for timestamp examples.
+var exampleTime = time.Date(2026, 1, 2, 13, 37, 0, 0, time.UTC)
+
+func boolPtr(b bool) *bool {
+ return &b
+}
+
+func int64Ptr(i int64) *int64 {
+ return &i
+}
+
+type example struct {
+ name string
+ value any
+}
+
+// exampleMap creates an Examples map from the provided examples.
+func exampleMap(exs []example) *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+ for _, ex := range exs {
+ examples.Set(ex.name, &base.Example{
+ Value: createYAMLNode(ex.value),
+ })
+ }
+ return examples
+}
+
+func schemaRef(ref string) *base.SchemaProxy {
+ return base.CreateSchemaProxyRef(ref)
+}
+
+func schemaFromType(t string) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{Type: []string{t}})
+}
+
+func stringSchema() *base.SchemaProxy {
+ return schemaFromType("string")
+}
+
+func integerSchema() *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"integer"},
+ Format: "int64",
+ })
+}
+
+func stringSchemaWithDescription(description string) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Description: description,
+ })
+}
+
+func stringSchemaWithDescriptionAndExample(description string, example any) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Description: description,
+ Example: createYAMLNode(example),
+ })
+}
+
+func integerSchemaWithDescription(description string) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"integer"},
+ Format: "int64",
+ Description: description,
+ })
+}
+
+func integerSchemaWithDescriptionAndExample(description string, example any) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"integer"},
+ Format: "int64",
+ Description: description,
+ Example: createYAMLNode(example),
+ })
+}
+
+func stringArraySchemaWithDescription(description string) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ Description: description,
+ })
+}
+
+func stringArraySchemaWithDescriptionAndExample(description string, example any) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ Description: description,
+ Example: createYAMLNode(example),
+ })
+}
+
+func statusSchema() *base.SchemaProxy {
+ successNode := &yaml.Node{Kind: yaml.ScalarNode, Value: "success"}
+ errorNode := &yaml.Node{Kind: yaml.ScalarNode, Value: "error"}
+ exampleNode := &yaml.Node{Kind: yaml.ScalarNode, Value: "success"}
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Enum: []*yaml.Node{successNode, errorNode},
+ Description: "Response status.",
+ Example: exampleNode,
+ })
+}
+
+func warningsSchema() *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ Description: "Only set if there were warnings while executing the request. There will still be data in the data field.",
+ })
+}
+
+func infosSchema() *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ Description: "Only set if there were info-level annotations while executing the request.",
+ })
+}
+
+func timestampSchema() *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Format: "date-time",
+ Description: "RFC3339 timestamp.",
+ }),
+ base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Format: "unixtime",
+ Description: "Unix timestamp in seconds.",
+ }),
+ },
+ Description: "Timestamp in RFC3339 format or Unix timestamp in seconds.",
+ })
+}
+
+func stringSchemaWithConstValue(value string) *base.SchemaProxy {
+ node := &yaml.Node{Kind: yaml.ScalarNode, Value: value}
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Enum: []*yaml.Node{node},
+ })
+}
+
+func dateTimeSchemaWithDescription(description string) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Format: "date-time",
+ Description: description,
+ })
+}
+
+func numberSchemaWithDescription(description string) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Format: "double",
+ Description: description,
+ })
+}
+
+func errorResponse() *v3.Response {
+ content := orderedmap.New[string, *v3.MediaType]()
+ content.Set("application/json", &v3.MediaType{
+ Schema: schemaRef("#/components/schemas/Error"),
+ })
+ return &v3.Response{
+ Description: "Error",
+ Content: content,
+ }
+}
+
+func noContentResponse() *v3.Response {
+ return &v3.Response{Description: "No Content"}
+}
+
+func responsesNoContent() *v3.Responses {
+ codes := orderedmap.New[string, *v3.Response]()
+ codes.Set("204", noContentResponse())
+ codes.Set("default", errorResponse())
+ return &v3.Responses{Codes: codes}
+}
+
+func pathParam(name, description string, schema *base.SchemaProxy) *v3.Parameter {
+ return &v3.Parameter{
+ Name: name,
+ In: "path",
+ Description: description,
+ Required: boolPtr(true),
+ Schema: schema,
+ }
+}
+
+// createYAMLNode converts Go data to yaml.Node for use in examples.
+func createYAMLNode(data any) *yaml.Node {
+ node := &yaml.Node{}
+ bytes, _ := yaml.Marshal(data)
+ _ = yaml.Unmarshal(bytes, node)
+ return node
+}
+
+// formRequestBodyWithExamples creates a form-encoded request body with examples.
+func formRequestBodyWithExamples(schemaRef string, examples *orderedmap.Map[string, *base.Example], description string) *v3.RequestBody {
+ content := orderedmap.New[string, *v3.MediaType]()
+ mediaType := &v3.MediaType{
+ Schema: base.CreateSchemaProxyRef("#/components/schemas/" + schemaRef),
+ }
+ if examples != nil {
+ mediaType.Examples = examples
+ }
+ content.Set("application/x-www-form-urlencoded", mediaType)
+ return &v3.RequestBody{
+ Required: boolPtr(true),
+ Description: description,
+ Content: content,
+ }
+}
+
+// jsonResponseWithExamples creates a JSON response with examples.
+func jsonResponseWithExamples(schemaRef string, examples *orderedmap.Map[string, *base.Example], description string) *v3.Response {
+ content := orderedmap.New[string, *v3.MediaType]()
+ mediaType := &v3.MediaType{
+ Schema: base.CreateSchemaProxyRef("#/components/schemas/" + schemaRef),
+ }
+ if examples != nil {
+ mediaType.Examples = examples
+ }
+ content.Set("application/json", mediaType)
+ return &v3.Response{
+ Description: description,
+ Content: content,
+ }
+}
+
+// responsesWithErrorExamples creates responses with both success and error examples.
+func responsesWithErrorExamples(okSchemaRef string, successExamples, errorExamples *orderedmap.Map[string, *base.Example], successDescription, errorDescription string) *v3.Responses {
+ codes := orderedmap.New[string, *v3.Response]()
+ codes.Set("200", jsonResponseWithExamples(okSchemaRef, successExamples, successDescription))
+ codes.Set("default", jsonResponseWithExamples("Error", errorExamples, errorDescription))
+ return &v3.Responses{Codes: codes}
+}
+
+// timestampExamples returns examples for timestamp parameters (RFC3339 and epoch).
+func timestampExamples(t time.Time) []example {
+ return []example{
+ {"RFC3339", t.Format(time.RFC3339Nano)},
+ {"epoch", t.Unix()},
+ }
+}
+
+// queryParamWithExample creates a query parameter with examples.
+func queryParamWithExample(name, description string, required bool, schema *base.SchemaProxy, examples []example) *v3.Parameter {
+ param := &v3.Parameter{
+ Name: name,
+ In: "query",
+ Description: description,
+ Required: &required,
+ Explode: boolPtr(false),
+ Schema: schema,
+ }
+ if len(examples) > 0 {
+ param.Examples = exampleMap(examples)
+ }
+ return param
+}
+
+// marshalToYAMLNode marshals a value using jsoniter (production marshaling) and converts to yaml.Node.
+// The result is an inline JSON representation that preserves integer types for timestamps.
+func marshalToYAMLNode(v any) *yaml.Node {
+ jsonAPI := jsoniter.ConfigCompatibleWithStandardLibrary
+ jsonBytes, err := jsonAPI.Marshal(v)
+ if err != nil {
+ panic(err)
+ }
+ node := &yaml.Node{}
+ if err := yaml.Unmarshal(jsonBytes, node); err != nil {
+ panic(err)
+ }
+ return node
+}
+
+// vectorExample creates an example for a vector query response using production marshaling.
+func vectorExample(v promql.Vector) *yaml.Node {
+ type response struct {
+ Status string `json:"status"`
+ Data struct {
+ ResultType string `json:"resultType"`
+ Result promql.Vector `json:"result"`
+ } `json:"data"`
+ }
+ resp := response{Status: "success"}
+ resp.Data.ResultType = "vector"
+ resp.Data.Result = v
+ return marshalToYAMLNode(resp)
+}
+
+// matrixExample creates an example for a matrix query response using production marshaling.
+func matrixExample(m promql.Matrix) *yaml.Node {
+ type response struct {
+ Status string `json:"status"`
+ Data struct {
+ ResultType string `json:"resultType"`
+ Result promql.Matrix `json:"result"`
+ } `json:"data"`
+ }
+ resp := response{Status: "success"}
+ resp.Data.ResultType = "matrix"
+ resp.Data.Result = m
+ return marshalToYAMLNode(resp)
+}
diff --git a/web/api/v1/openapi_paths.go b/web/api/v1/openapi_paths.go
new file mode 100644
index 0000000000..2f5ab592f7
--- /dev/null
+++ b/web/api/v1/openapi_paths.go
@@ -0,0 +1,626 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines all API path specifications including parameters, request bodies,
+// and response schemas. Each path definition corresponds to an endpoint registered in api.go.
+package v1
+
+import (
+ "time"
+
+ "github.com/pb33f/libopenapi/datamodel/high/base"
+ v3 "github.com/pb33f/libopenapi/datamodel/high/v3"
+ "github.com/pb33f/libopenapi/orderedmap"
+)
+
+// Path definition methods for API endpoints.
+
+func (*OpenAPIBuilder) queryPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("limit", "The maximum number of metrics to return.", false, integerSchema(), []example{{"example", 100}}),
+ queryParamWithExample("time", "The evaluation timestamp (optional, defaults to current time).", false, timestampSchema(), timestampExamples(exampleTime)),
+ queryParamWithExample("query", "The PromQL query to execute.", true, stringSchema(), []example{{"example", "up"}}),
+ queryParamWithExample("timeout", "Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag.", false, stringSchema(), []example{{"example", "30s"}}),
+ queryParamWithExample("lookback_delta", "Override the lookback period for this query. Optional.", false, stringSchema(), []example{{"example", "5m"}}),
+ queryParamWithExample("stats", "When provided, include query statistics in the response. The special value 'all' enables more comprehensive statistics.", false, stringSchema(), []example{{"example", "all"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "query",
+ Summary: "Evaluate an instant query",
+ Tags: []string{"query"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("QueryOutputBody", queryResponseExamples(), errorResponseExamples(), "Query executed successfully.", "Error executing query."),
+ },
+ Post: &v3.Operation{
+ OperationId: "query-post",
+ Summary: "Evaluate an instant query",
+ Tags: []string{"query"},
+ RequestBody: formRequestBodyWithExamples("QueryPostInputBody", queryPostExamples(), "Submit an instant query. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("QueryOutputBody", queryResponseExamples(), errorResponseExamples(), "Instant query executed successfully.", "Error executing instant query."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) queryRangePath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("limit", "The maximum number of metrics to return.", false, integerSchema(), []example{{"example", 100}}),
+ queryParamWithExample("start", "The start time of the query.", true, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))),
+ queryParamWithExample("end", "The end time of the query.", true, timestampSchema(), timestampExamples(exampleTime)),
+ queryParamWithExample("step", "The step size of the query.", true, stringSchema(), []example{{"example", "15s"}}),
+ queryParamWithExample("query", "The query to execute.", true, stringSchema(), []example{{"example", "rate(prometheus_http_requests_total{handler=\"/api/v1/query\"}[5m])"}}),
+ queryParamWithExample("timeout", "Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag.", false, stringSchema(), []example{{"example", "30s"}}),
+ queryParamWithExample("lookback_delta", "Override the lookback period for this query. Optional.", false, stringSchema(), []example{{"example", "5m"}}),
+ queryParamWithExample("stats", "When provided, include query statistics in the response. The special value 'all' enables more comprehensive statistics.", false, stringSchema(), []example{{"example", "all"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "query-range",
+ Summary: "Evaluate a range query",
+ Tags: []string{"query"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("QueryRangeOutputBody", queryRangeResponseExamples(), errorResponseExamples(), "Range query executed successfully.", "Error executing range query."),
+ },
+ Post: &v3.Operation{
+ OperationId: "query-range-post",
+ Summary: "Evaluate a range query",
+ Tags: []string{"query"},
+ RequestBody: formRequestBodyWithExamples("QueryRangePostInputBody", queryRangePostExamples(), "Submit a range query. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("QueryRangeOutputBody", queryRangeResponseExamples(), errorResponseExamples(), "Range query executed successfully.", "Error executing range query."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) queryExemplarsPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("start", "Start timestamp for exemplars query.", false, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))),
+ queryParamWithExample("end", "End timestamp for exemplars query.", false, timestampSchema(), timestampExamples(exampleTime)),
+ queryParamWithExample("query", "PromQL query to extract exemplars for.", true, stringSchema(), []example{{"example", "prometheus_http_requests_total"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "query-exemplars",
+ Summary: "Query exemplars",
+ Tags: []string{"query"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("QueryExemplarsOutputBody", queryExemplarsResponseExamples(), errorResponseExamples(), "Exemplars retrieved successfully.", "Error retrieving exemplars."),
+ },
+ Post: &v3.Operation{
+ OperationId: "query-exemplars-post",
+ Summary: "Query exemplars",
+ Tags: []string{"query"},
+ RequestBody: formRequestBodyWithExamples("QueryExemplarsPostInputBody", queryExemplarsPostExamples(), "Submit an exemplars query. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("QueryExemplarsOutputBody", queryExemplarsResponseExamples(), errorResponseExamples(), "Exemplars query completed successfully.", "Error processing exemplars query."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) formatQueryPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("query", "PromQL expression to format.", true, stringSchema(), []example{{"example", "sum(rate(http_requests_total[5m])) by (job)"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "format-query",
+ Summary: "Format a PromQL query",
+ Tags: []string{"query"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("FormatQueryOutputBody", formatQueryResponseExamples(), errorResponseExamples(), "Query formatted successfully.", "Error formatting query."),
+ },
+ Post: &v3.Operation{
+ OperationId: "format-query-post",
+ Summary: "Format a PromQL query",
+ Tags: []string{"query"},
+ RequestBody: formRequestBodyWithExamples("FormatQueryPostInputBody", formatQueryPostExamples(), "Submit a PromQL query to format. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("FormatQueryOutputBody", formatQueryResponseExamples(), errorResponseExamples(), "Query formatting completed successfully.", "Error formatting query."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) parseQueryPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("query", "PromQL expression to parse.", true, stringSchema(), []example{{"example", "up{job=\"prometheus\"}"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "parse-query",
+ Summary: "Parse a PromQL query",
+ Tags: []string{"query"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("ParseQueryOutputBody", parseQueryResponseExamples(), errorResponseExamples(), "Query parsed successfully.", "Error parsing query."),
+ },
+ Post: &v3.Operation{
+ OperationId: "parse-query-post",
+ Summary: "Parse a PromQL query",
+ Tags: []string{"query"},
+ RequestBody: formRequestBodyWithExamples("ParseQueryPostInputBody", parseQueryPostExamples(), "Submit a PromQL query to parse. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("ParseQueryOutputBody", parseQueryResponseExamples(), errorResponseExamples(), "Query parsed successfully via POST.", "Error parsing query via POST."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) labelsPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("start", "Start timestamp for label names query.", false, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))),
+ queryParamWithExample("end", "End timestamp for label names query.", false, timestampSchema(), timestampExamples(exampleTime)),
+ queryParamWithExample("match[]", "Series selector argument.", false, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"{job=\"prometheus\"}"}}}),
+ queryParamWithExample("limit", "Maximum number of label names to return.", false, integerSchema(), []example{{"example", 100}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "labels",
+ Summary: "Get label names",
+ Tags: []string{"labels"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("LabelsOutputBody", labelsResponseExamples(), errorResponseExamples(), "Label names retrieved successfully.", "Error retrieving label names."),
+ },
+ Post: &v3.Operation{
+ OperationId: "labels-post",
+ Summary: "Get label names",
+ Tags: []string{"labels"},
+ RequestBody: formRequestBodyWithExamples("LabelsPostInputBody", labelsPostExamples(), "Submit a label names query. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("LabelsOutputBody", labelsResponseExamples(), errorResponseExamples(), "Label names retrieved successfully via POST.", "Error retrieving label names via POST."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) labelValuesPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ pathParam("name", "Label name.", stringSchema()),
+ queryParamWithExample("start", "Start timestamp for label values query.", false, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))),
+ queryParamWithExample("end", "End timestamp for label values query.", false, timestampSchema(), timestampExamples(exampleTime)),
+ queryParamWithExample("match[]", "Series selector argument.", false, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"{job=\"prometheus\"}"}}}),
+ queryParamWithExample("limit", "Maximum number of label values to return.", false, integerSchema(), []example{{"example", 1000}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "label-values",
+ Summary: "Get label values",
+ Tags: []string{"labels"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("LabelValuesOutputBody", labelValuesResponseExamples(), errorResponseExamples(), "Label values retrieved successfully.", "Error retrieving label values."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) seriesPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("start", "Start timestamp for series query.", false, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))),
+ queryParamWithExample("end", "End timestamp for series query.", false, timestampSchema(), timestampExamples(exampleTime)),
+ queryParamWithExample("match[]", "Series selector argument.", true, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"{job=\"prometheus\"}"}}}),
+ queryParamWithExample("limit", "Maximum number of series to return.", false, integerSchema(), []example{{"example", 100}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "series",
+ Summary: "Find series by label matchers",
+ Tags: []string{"series"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("SeriesOutputBody", seriesResponseExamples(), errorResponseExamples(), "Series returned matching the provided label matchers.", "Error retrieving series."),
+ },
+ Post: &v3.Operation{
+ OperationId: "series-post",
+ Summary: "Find series by label matchers",
+ Tags: []string{"series"},
+ RequestBody: formRequestBodyWithExamples("SeriesPostInputBody", seriesPostExamples(), "Submit a series query. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("SeriesOutputBody", seriesResponseExamples(), errorResponseExamples(), "Series returned matching the provided label matchers via POST.", "Error retrieving series via POST."),
+ },
+ Delete: &v3.Operation{
+ OperationId: "delete-series",
+ Summary: "Delete series",
+ Description: "Delete series matching selectors. Note: This is deprecated, use POST /admin/tsdb/delete_series instead.",
+ Tags: []string{"series"},
+ Responses: responsesWithErrorExamples("SeriesDeleteOutputBody", seriesDeleteResponseExamples(), errorResponseExamples(), "Series marked for deletion.", "Error deleting series."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) metadataPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("limit", "The maximum number of metrics to return.", false, integerSchema(), []example{{"example", 100}}),
+ queryParamWithExample("limit_per_metric", "The maximum number of metadata entries per metric.", false, integerSchema(), []example{{"example", 10}}),
+ queryParamWithExample("metric", "A metric name to filter metadata for.", false, stringSchema(), []example{{"example", "http_requests_total"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-metadata",
+ Summary: "Get metadata",
+ Tags: []string{"metadata"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("MetadataOutputBody", metadataResponseExamples(), errorResponseExamples(), "Metric metadata retrieved successfully.", "Error retrieving metadata."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) scrapePoolsPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-scrape-pools",
+ Summary: "Get scrape pools",
+ Tags: []string{"targets"},
+ Responses: responsesWithErrorExamples("ScrapePoolsOutputBody", scrapePoolsResponseExamples(), errorResponseExamples(), "Scrape pools retrieved successfully.", "Error retrieving scrape pools."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) targetsPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("scrapePool", "Filter targets by scrape pool name.", false, stringSchema(), []example{{"example", "prometheus"}}),
+ queryParamWithExample("state", "Filter by state: active, dropped, or any.", false, stringSchema(), []example{{"example", "active"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-targets",
+ Summary: "Get targets",
+ Tags: []string{"targets"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("TargetsOutputBody", targetsResponseExamples(), errorResponseExamples(), "Target discovery information retrieved successfully.", "Error retrieving targets."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) targetsMetadataPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("match_target", "Label selector to filter targets.", false, stringSchema(), []example{{"example", "{job=\"prometheus\"}"}}),
+ queryParamWithExample("metric", "Metric name to retrieve metadata for.", false, stringSchema(), []example{{"example", "http_requests_total"}}),
+ queryParamWithExample("limit", "Maximum number of targets to match.", false, integerSchema(), []example{{"example", 10}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-targets-metadata",
+ Summary: "Get targets metadata",
+ Tags: []string{"targets"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("TargetMetadataOutputBody", targetsMetadataResponseExamples(), errorResponseExamples(), "Target metadata retrieved successfully.", "Error retrieving target metadata."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) targetsRelabelStepsPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("scrapePool", "Name of the scrape pool.", true, stringSchema(), []example{{"example", "prometheus"}}),
+ queryParamWithExample("labels", "JSON-encoded labels to apply relabel rules to.", true, stringSchema(), []example{{"example", "{\"__address__\":\"localhost:9090\",\"job\":\"prometheus\"}"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-targets-relabel-steps",
+ Summary: "Get targets relabel steps",
+ Tags: []string{"targets"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("TargetRelabelStepsOutputBody", targetsRelabelStepsResponseExamples(), errorResponseExamples(), "Relabel steps retrieved successfully.", "Error retrieving relabel steps."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) rulesPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("type", "Filter by rule type: alert or record.", false, stringSchema(), []example{{"example", "alert"}}),
+ queryParamWithExample("rule_name[]", "Filter by rule name.", false, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"HighErrorRate"}}}),
+ queryParamWithExample("rule_group[]", "Filter by rule group name.", false, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"example_alerts"}}}),
+ queryParamWithExample("file[]", "Filter by file path.", false, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"/etc/prometheus/rules.yml"}}}),
+ queryParamWithExample("match[]", "Label matchers to filter rules.", false, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"{severity=\"critical\"}"}}}),
+ queryParamWithExample("exclude_alerts", "Exclude active alerts from response.", false, stringSchema(), []example{{"example", "false"}}),
+ queryParamWithExample("group_limit", "Maximum number of rule groups to return.", false, integerSchema(), []example{{"example", 100}}),
+ queryParamWithExample("group_next_token", "Pagination token for next page.", false, stringSchema(), []example{{"example", "abc123"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "rules",
+ Summary: "Get alerting and recording rules",
+ Tags: []string{"rules"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("RulesOutputBody", rulesResponseExamples(), errorResponseExamples(), "Rules retrieved successfully.", "Error retrieving rules."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) alertsPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "alerts",
+ Summary: "Get active alerts",
+ Tags: []string{"alerts"},
+ Responses: responsesWithErrorExamples("AlertsOutputBody", alertsResponseExamples(), errorResponseExamples(), "Active alerts retrieved successfully.", "Error retrieving alerts."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) alertmanagersPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "alertmanagers",
+ Summary: "Get Alertmanager discovery",
+ Tags: []string{"alerts"},
+ Responses: responsesWithErrorExamples("AlertmanagersOutputBody", alertmanagersResponseExamples(), errorResponseExamples(), "Alertmanager targets retrieved successfully.", "Error retrieving Alertmanager targets."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusConfigPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-status-config",
+ Summary: "Get status config",
+ Tags: []string{"status"},
+ Responses: responsesWithErrorExamples("StatusConfigOutputBody", statusConfigResponseExamples(), errorResponseExamples(), "Configuration retrieved successfully.", "Error retrieving configuration."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusRuntimeInfoPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-status-runtimeinfo",
+ Summary: "Get status runtimeinfo",
+ Tags: []string{"status"},
+ Responses: responsesWithErrorExamples("StatusRuntimeInfoOutputBody", statusRuntimeInfoResponseExamples(), errorResponseExamples(), "Runtime information retrieved successfully.", "Error retrieving runtime information."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusBuildInfoPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-status-buildinfo",
+ Summary: "Get status buildinfo",
+ Tags: []string{"status"},
+ Responses: responsesWithErrorExamples("StatusBuildInfoOutputBody", statusBuildInfoResponseExamples(), errorResponseExamples(), "Build information retrieved successfully.", "Error retrieving build information."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusFlagsPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-status-flags",
+ Summary: "Get status flags",
+ Tags: []string{"status"},
+ Responses: responsesWithErrorExamples("StatusFlagsOutputBody", statusFlagsResponseExamples(), errorResponseExamples(), "Command-line flags retrieved successfully.", "Error retrieving flags."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusTSDBPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("limit", "The maximum number of items to return per category.", false, integerSchema(), []example{{"example", 10}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "status-tsdb",
+ Summary: "Get TSDB status",
+ Tags: []string{"status"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("StatusTSDBOutputBody", statusTSDBResponseExamples(), errorResponseExamples(), "TSDB status retrieved successfully.", "Error retrieving TSDB status."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusTSDBBlocksPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "status-tsdb-blocks",
+ Summary: "Get TSDB blocks information",
+ Tags: []string{"status"},
+ Responses: responsesWithErrorExamples("StatusTSDBBlocksOutputBody", statusTSDBBlocksResponseExamples(), errorResponseExamples(), "TSDB blocks information retrieved successfully.", "Error retrieving TSDB blocks."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusWALReplayPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-status-walreplay",
+ Summary: "Get status walreplay",
+ Tags: []string{"status"},
+ Responses: responsesWithErrorExamples("StatusWALReplayOutputBody", statusWALReplayResponseExamples(), errorResponseExamples(), "WAL replay status retrieved successfully.", "Error retrieving WAL replay status."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) adminDeleteSeriesPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("match[]", "Series selectors to identify series to delete.", true, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"{__name__=~\"test.*\"}"}}}),
+ queryParamWithExample("start", "Start timestamp for deletion.", false, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))),
+ queryParamWithExample("end", "End timestamp for deletion.", false, timestampSchema(), timestampExamples(exampleTime)),
+ }
+ return &v3.PathItem{
+ Post: &v3.Operation{
+ OperationId: "deleteSeriesPost",
+ Summary: "Delete series matching selectors",
+ Description: "Deletes data for a selection of series in a time range.",
+ Tags: []string{"admin"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("DeleteSeriesOutputBody", deleteSeriesResponseExamples(), errorResponseExamples(), "Series deleted successfully.", "Error deleting series."),
+ },
+ Put: &v3.Operation{
+ OperationId: "deleteSeriesPut",
+ Summary: "Delete series matching selectors via PUT",
+ Description: "Deletes data for a selection of series in a time range using PUT method.",
+ Tags: []string{"admin"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("DeleteSeriesOutputBody", deleteSeriesResponseExamples(), errorResponseExamples(), "Series deleted successfully via PUT.", "Error deleting series via PUT."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) adminCleanTombstonesPath() *v3.PathItem {
+ return &v3.PathItem{
+ Post: &v3.Operation{
+ OperationId: "cleanTombstonesPost",
+ Summary: "Clean tombstones in the TSDB",
+ Description: "Removes deleted data from disk and cleans up existing tombstones.",
+ Tags: []string{"admin"},
+ Responses: responsesWithErrorExamples("CleanTombstonesOutputBody", cleanTombstonesResponseExamples(), errorResponseExamples(), "Tombstones cleaned successfully.", "Error cleaning tombstones."),
+ },
+ Put: &v3.Operation{
+ OperationId: "cleanTombstonesPut",
+ Summary: "Clean tombstones in the TSDB via PUT",
+ Description: "Removes deleted data from disk and cleans up existing tombstones using PUT method.",
+ Tags: []string{"admin"},
+ Responses: responsesWithErrorExamples("CleanTombstonesOutputBody", cleanTombstonesResponseExamples(), errorResponseExamples(), "Tombstones cleaned successfully via PUT.", "Error cleaning tombstones via PUT."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) adminSnapshotPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("skip_head", "If true, do not snapshot data in the head block.", false, stringSchema(), []example{{"example", "false"}}),
+ }
+ return &v3.PathItem{
+ Post: &v3.Operation{
+ OperationId: "snapshotPost",
+ Summary: "Create a snapshot of the TSDB",
+ Description: "Creates a snapshot of all current data.",
+ Tags: []string{"admin"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("SnapshotOutputBody", snapshotResponseExamples(), errorResponseExamples(), "Snapshot created successfully.", "Error creating snapshot."),
+ },
+ Put: &v3.Operation{
+ OperationId: "snapshotPut",
+ Summary: "Create a snapshot of the TSDB via PUT",
+ Description: "Creates a snapshot of all current data using PUT method.",
+ Tags: []string{"admin"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("SnapshotOutputBody", snapshotResponseExamples(), errorResponseExamples(), "Snapshot created successfully via PUT.", "Error creating snapshot via PUT."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) remoteReadPath() *v3.PathItem {
+ return &v3.PathItem{
+ Post: &v3.Operation{
+ OperationId: "remoteRead",
+ Summary: "Remote read endpoint",
+ Description: "Prometheus remote read endpoint for federated queries. Accepts and returns Protocol Buffer encoded data.",
+ Tags: []string{"remote"},
+ Responses: responsesNoContent(),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) remoteWritePath() *v3.PathItem {
+ return &v3.PathItem{
+ Post: &v3.Operation{
+ OperationId: "remoteWrite",
+ Summary: "Remote write endpoint",
+ Description: "Prometheus remote write endpoint for sending metrics. Accepts Protocol Buffer encoded write requests.",
+ Tags: []string{"remote"},
+ Responses: responsesNoContent(),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) otlpWritePath() *v3.PathItem {
+ return &v3.PathItem{
+ Post: &v3.Operation{
+ OperationId: "otlpWrite",
+ Summary: "OTLP metrics write endpoint",
+ Description: "OpenTelemetry Protocol metrics ingestion endpoint. Accepts OTLP/HTTP metrics in Protocol Buffer format.",
+ Tags: []string{"otlp"},
+ Responses: responsesNoContent(),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) notificationsPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-notifications",
+ Summary: "Get notifications",
+ Tags: []string{"notifications"},
+ Responses: responsesWithErrorExamples("NotificationsOutputBody", notificationsResponseExamples(), errorResponseExamples(), "Notifications retrieved successfully.", "Error retrieving notifications."),
+ },
+ }
+}
+
+// notificationsLivePath defines the /notifications/live endpoint.
+// This endpoint uses OpenAPI 3.2's itemSchema feature for documenting SSE streams.
+// It is excluded from the OpenAPI 3.1 specification.
+func (*OpenAPIBuilder) notificationsLivePath() *v3.PathItem {
+ codes := orderedmap.New[string, *v3.Response]()
+ content := orderedmap.New[string, *v3.MediaType]()
+
+ // Create a schema for the SSE message structure.
+ // Each SSE message has a 'data' field containing JSON.
+ sseItemProps := orderedmap.New[string, *base.SchemaProxy]()
+ sseItemProps.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Description: "SSE data field containing JSON-encoded notification.",
+ ContentMediaType: "application/json",
+ ContentSchema: schemaRef("#/components/schemas/Notification"),
+ }))
+
+ content.Set("text/event-stream", &v3.MediaType{
+ // Use ItemSchema (OpenAPI 3.2) instead of Schema to describe each SSE message.
+ ItemSchema: base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Title: "Server Sent Event Message",
+ Description: "A single SSE message. The data field contains a JSON-encoded Notification object.",
+ Properties: sseItemProps,
+ Required: []string{"data"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ }),
+ Examples: notificationLiveExamples(),
+ })
+
+ codes.Set("200", &v3.Response{
+ Description: "Server-sent events stream established.",
+ Content: content,
+ })
+ codes.Set("default", errorResponse())
+
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "notifications-live",
+ Summary: "Stream live notifications via Server-Sent Events",
+ Description: "Subscribe to real-time server notifications using SSE. Each event contains a JSON-encoded Notification object in the data field.",
+ Tags: []string{"notifications"},
+ Responses: &v3.Responses{Codes: codes},
+ },
+ }
+}
+
+func (*OpenAPIBuilder) featuresPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-features",
+ Summary: "Get features",
+ Tags: []string{"features"},
+ Responses: responsesWithErrorExamples("FeaturesOutputBody", featuresResponseExamples(), errorResponseExamples(), "Feature flags retrieved successfully.", "Error retrieving features."),
+ },
+ }
+}
diff --git a/web/api/v1/openapi_schemas.go b/web/api/v1/openapi_schemas.go
new file mode 100644
index 0000000000..3a567983f4
--- /dev/null
+++ b/web/api/v1/openapi_schemas.go
@@ -0,0 +1,1223 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines all OpenAPI schema definitions for API request and response types.
+// Schemas are organized by functional area: query, labels, series, metadata, targets,
+// rules, alerts, and status endpoints.
+package v1
+
+import (
+ "github.com/pb33f/libopenapi/datamodel/high/base"
+ v3 "github.com/pb33f/libopenapi/datamodel/high/v3"
+ "github.com/pb33f/libopenapi/orderedmap"
+)
+
+// Schema definitions and components builder.
+
+func (b *OpenAPIBuilder) buildComponents() *v3.Components {
+ schemas := orderedmap.New[string, *base.SchemaProxy]()
+
+ // Core schemas.
+ schemas.Set("Error", b.errorSchema())
+ schemas.Set("Labels", b.labelsSchema())
+
+ // Query schemas.
+ schemas.Set("QueryOutputBody", b.responseBodySchema("QueryData", "Response body for instant query."))
+ schemas.Set("QueryRangeOutputBody", b.responseBodySchema("QueryData", "Response body for range query."))
+ schemas.Set("QueryPostInputBody", b.queryPostInputBodySchema())
+ schemas.Set("QueryRangePostInputBody", b.queryRangePostInputBodySchema())
+ schemas.Set("QueryExemplarsOutputBody", b.simpleResponseBodySchema())
+ schemas.Set("QueryExemplarsPostInputBody", b.queryExemplarsPostInputBodySchema())
+ schemas.Set("FormatQueryOutputBody", b.formatQueryOutputBodySchema())
+ schemas.Set("FormatQueryPostInputBody", b.formatQueryPostInputBodySchema())
+ schemas.Set("ParseQueryOutputBody", b.simpleResponseBodySchema())
+ schemas.Set("ParseQueryPostInputBody", b.parseQueryPostInputBodySchema())
+ schemas.Set("QueryData", b.queryDataSchema())
+ schemas.Set("FloatSample", b.floatSampleSchema())
+ schemas.Set("HistogramSample", b.histogramSampleSchema())
+ schemas.Set("FloatSeries", b.floatSeriesSchema())
+ schemas.Set("HistogramSeries", b.histogramSeriesSchema())
+ schemas.Set("HistogramValue", b.histogramValueSchema())
+
+ // Label schemas.
+ schemas.Set("LabelsOutputBody", b.stringArrayResponseBodySchema())
+ schemas.Set("LabelsPostInputBody", b.labelsPostInputBodySchema())
+ schemas.Set("LabelValuesOutputBody", b.stringArrayResponseBodySchema())
+
+ // Series schemas.
+ schemas.Set("SeriesOutputBody", b.labelsArrayResponseBodySchema())
+ schemas.Set("SeriesPostInputBody", b.seriesPostInputBodySchema())
+ schemas.Set("SeriesDeleteOutputBody", b.simpleResponseBodySchema())
+
+ // Metadata schemas.
+ schemas.Set("Metadata", b.metadataSchema())
+ schemas.Set("MetadataOutputBody", b.metadataOutputBodySchema())
+ schemas.Set("MetricMetadata", b.metricMetadataSchema())
+
+ // Target schemas.
+ schemas.Set("Target", b.targetSchema())
+ schemas.Set("DroppedTarget", b.droppedTargetSchema())
+ schemas.Set("TargetDiscovery", b.targetDiscoverySchema())
+ schemas.Set("TargetsOutputBody", b.refResponseBodySchema("TargetDiscovery", "Response body for targets endpoint."))
+ schemas.Set("TargetMetadataOutputBody", b.metricMetadataArrayResponseBodySchema())
+ schemas.Set("ScrapePoolsDiscovery", b.scrapePoolsDiscoverySchema())
+ schemas.Set("ScrapePoolsOutputBody", b.refResponseBodySchema("ScrapePoolsDiscovery", "Response body for scrape pools endpoint."))
+
+ // Relabel schemas.
+ schemas.Set("Config", b.configSchema())
+ schemas.Set("RelabelStep", b.relabelStepSchema())
+ schemas.Set("RelabelStepsResponse", b.relabelStepsResponseSchema())
+ schemas.Set("TargetRelabelStepsOutputBody", b.refResponseBodySchema("RelabelStepsResponse", "Response body for target relabel steps endpoint."))
+
+ // Rule schemas.
+ schemas.Set("RuleGroup", b.ruleGroupSchema())
+ schemas.Set("RuleDiscovery", b.ruleDiscoverySchema())
+ schemas.Set("RulesOutputBody", b.refResponseBodySchema("RuleDiscovery", "Response body for rules endpoint."))
+
+ // Alert schemas.
+ schemas.Set("Alert", b.alertSchema())
+ schemas.Set("AlertDiscovery", b.alertDiscoverySchema())
+ schemas.Set("AlertsOutputBody", b.refResponseBodySchema("AlertDiscovery", "Response body for alerts endpoint."))
+ schemas.Set("AlertmanagerTarget", b.alertmanagerTargetSchema())
+ schemas.Set("AlertmanagerDiscovery", b.alertmanagerDiscoverySchema())
+ schemas.Set("AlertmanagersOutputBody", b.refResponseBodySchema("AlertmanagerDiscovery", "Response body for alertmanagers endpoint."))
+
+ // Status schemas.
+ schemas.Set("StatusConfigData", b.statusConfigDataSchema())
+ schemas.Set("StatusConfigOutputBody", b.refResponseBodySchema("StatusConfigData", "Response body for status config endpoint."))
+ schemas.Set("RuntimeInfo", b.runtimeInfoSchema())
+ schemas.Set("StatusRuntimeInfoOutputBody", b.refResponseBodySchema("RuntimeInfo", "Response body for status runtime info endpoint."))
+ schemas.Set("PrometheusVersion", b.prometheusVersionSchema())
+ schemas.Set("StatusBuildInfoOutputBody", b.refResponseBodySchema("PrometheusVersion", "Response body for status build info endpoint."))
+ schemas.Set("StatusFlagsOutputBody", b.statusFlagsOutputBodySchema())
+ schemas.Set("HeadStats", b.headStatsSchema())
+ schemas.Set("TSDBStat", b.tsdbStatSchema())
+ schemas.Set("TSDBStatus", b.tsdbStatusSchema())
+ schemas.Set("StatusTSDBOutputBody", b.refResponseBodySchema("TSDBStatus", "Response body for status TSDB endpoint."))
+ schemas.Set("BlockDesc", b.blockDescSchema())
+ schemas.Set("BlockStats", b.blockStatsSchema())
+ schemas.Set("BlockMetaCompaction", b.blockMetaCompactionSchema())
+ schemas.Set("BlockMeta", b.blockMetaSchema())
+ schemas.Set("StatusTSDBBlocksData", b.statusTSDBBlocksDataSchema())
+ schemas.Set("StatusTSDBBlocksOutputBody", b.refResponseBodySchema("StatusTSDBBlocksData", "Response body for status TSDB blocks endpoint."))
+ schemas.Set("StatusWALReplayData", b.statusWALReplayDataSchema())
+ schemas.Set("StatusWALReplayOutputBody", b.refResponseBodySchema("StatusWALReplayData", "Response body for status WAL replay endpoint."))
+
+ // Admin schemas.
+ schemas.Set("DeleteSeriesOutputBody", b.statusOnlyResponseBodySchema())
+ schemas.Set("CleanTombstonesOutputBody", b.statusOnlyResponseBodySchema())
+ schemas.Set("DataStruct", b.dataStructSchema())
+ schemas.Set("SnapshotOutputBody", b.refResponseBodySchema("DataStruct", "Response body for snapshot endpoint."))
+
+ // Notification schemas.
+ schemas.Set("Notification", b.notificationSchema())
+ schemas.Set("NotificationsOutputBody", b.notificationArrayResponseBodySchema())
+
+ // Features schema.
+ schemas.Set("FeaturesOutputBody", b.simpleResponseBodySchema())
+
+ return &v3.Components{Schemas: schemas}
+}
+
+// Schema definitions using high-level structs.
+
+func (*OpenAPIBuilder) errorSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("errorType", stringSchemaWithDescriptionAndExample("Type of error that occurred.", "bad_data"))
+ props.Set("error", stringSchemaWithDescriptionAndExample("Human-readable error message.", "invalid parameter"))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Error response.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "errorType", "error"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) labelsSchema() *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Label set represented as a key-value map.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: true},
+ })
+}
+
+func (*OpenAPIBuilder) responseBodySchema(dataSchemaRef, description string) *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", schemaRef("#/components/schemas/"+dataSchemaRef))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: description,
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (b *OpenAPIBuilder) refResponseBodySchema(dataSchemaRef, description string) *base.SchemaProxy {
+ return b.responseBodySchema(dataSchemaRef, description)
+}
+
+func (*OpenAPIBuilder) simpleResponseBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Description: "Response data (structure varies by endpoint).",
+ Example: createYAMLNode(map[string]any{"result": "ok"}),
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Generic response body.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) statusOnlyResponseBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body containing only status.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) stringArrayResponseBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ Example: createYAMLNode([]string{"__name__", "job", "instance"}),
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body with an array of strings.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) labelsArrayResponseBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/Labels")},
+ Example: createYAMLNode([]map[string]string{{"__name__": "up", "job": "prometheus", "instance": "localhost:9090"}}),
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body with an array of label sets.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) metricMetadataArrayResponseBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/MetricMetadata")},
+ Example: createYAMLNode([]map[string]any{
+ {
+ "target": map[string]string{
+ "instance": "localhost:9090",
+ "job": "prometheus",
+ },
+ "metric": "up",
+ "type": "gauge",
+ "help": "The current health status of the target",
+ "unit": "",
+ },
+ }),
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body with an array of metric metadata.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) notificationArrayResponseBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/Notification")},
+ Example: createYAMLNode([]map[string]any{
+ {"text": "Server is running", "date": "2023-07-21T20:00:00.000Z", "active": true},
+ }),
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body with an array of notifications.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) floatSampleSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("metric", schemaRef("#/components/schemas/Labels"))
+ props.Set("value", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Timestamp and float value as [unixTimestamp, stringValue].",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}}),
+ stringSchema(),
+ },
+ })},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ Example: createYAMLNode([]any{1767436620, "1"}),
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "A sample with a float value.",
+ Required: []string{"metric", "value"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) histogramValueSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("count", stringSchemaWithDescription("Total count of observations."))
+ props.Set("sum", stringSchemaWithDescription("Sum of all observed values."))
+ props.Set("buckets", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Histogram buckets as [boundary_rule, lower, upper, count].",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}}),
+ stringSchema(),
+ },
+ })},
+ })},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Native histogram value representation.",
+ Required: []string{"count", "sum"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) histogramSampleSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("metric", schemaRef("#/components/schemas/Labels"))
+ props.Set("histogram", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Timestamp and histogram value as [unixTimestamp, histogramObject].",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}}),
+ schemaRef("#/components/schemas/HistogramValue"),
+ },
+ })},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ Example: createYAMLNode([]any{1767436620, map[string]any{"count": "60", "sum": "120", "buckets": []any{}}}),
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "A sample with a native histogram value.",
+ Required: []string{"metric", "histogram"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) floatSeriesSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("metric", schemaRef("#/components/schemas/Labels"))
+ props.Set("values", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Array of [timestamp, stringValue] pairs for float values.",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}}),
+ stringSchema(),
+ },
+ })},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ })},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "A time series with float values.",
+ Required: []string{"metric", "values"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) histogramSeriesSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("metric", schemaRef("#/components/schemas/Labels"))
+ props.Set("histograms", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Array of [timestamp, histogramObject] pairs for histogram values.",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}}),
+ schemaRef("#/components/schemas/HistogramValue"),
+ },
+ })},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ })},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "A time series with native histogram values.",
+ Required: []string{"metric", "histograms"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) queryDataSchema() *base.SchemaProxy {
+ // Vector query result.
+ vectorProps := orderedmap.New[string, *base.SchemaProxy]()
+ vectorProps.Set("resultType", stringSchemaWithConstValue("vector"))
+ vectorProps.Set("result", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Array of samples (either float or histogram).",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ AnyOf: []*base.SchemaProxy{
+ schemaRef("#/components/schemas/FloatSample"),
+ schemaRef("#/components/schemas/HistogramSample"),
+ },
+ })},
+ }))
+
+ // Matrix query result.
+ matrixProps := orderedmap.New[string, *base.SchemaProxy]()
+ matrixProps.Set("resultType", stringSchemaWithConstValue("matrix"))
+ matrixProps.Set("result", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Array of time series (either float or histogram).",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ AnyOf: []*base.SchemaProxy{
+ schemaRef("#/components/schemas/FloatSeries"),
+ schemaRef("#/components/schemas/HistogramSeries"),
+ },
+ })},
+ }))
+
+ // Scalar query result.
+ scalarProps := orderedmap.New[string, *base.SchemaProxy]()
+ scalarProps.Set("resultType", stringSchemaWithConstValue("scalar"))
+ scalarProps.Set("result", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Scalar value as [timestamp, stringValue].",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}}),
+ stringSchema(),
+ },
+ })},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ }))
+
+ // String query result.
+ stringResultProps := orderedmap.New[string, *base.SchemaProxy]()
+ stringResultProps.Set("resultType", stringSchemaWithConstValue("string"))
+ stringResultProps.Set("result", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "String value as [timestamp, stringValue].",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Description: "Query result data. The structure of 'result' depends on 'resultType'.",
+ AnyOf: []*base.SchemaProxy{
+ // resultType: vector -> result: array of samples.
+ base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Required: []string{"resultType", "result"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: vectorProps,
+ }),
+ // resultType: matrix -> result: array of series.
+ base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Required: []string{"resultType", "result"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: matrixProps,
+ }),
+ // resultType: scalar -> result: [timestamp, value].
+ base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Required: []string{"resultType", "result"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: scalarProps,
+ }),
+ // resultType: string -> result: [timestamp, stringValue].
+ base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Required: []string{"resultType", "result"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: stringResultProps,
+ }),
+ },
+ Example: createYAMLNode(map[string]any{
+ "resultType": "vector",
+ "result": []map[string]any{
+ {
+ "metric": map[string]string{"__name__": "up", "job": "prometheus"},
+ "value": []any{1627845600, "1"},
+ },
+ },
+ }),
+ })
+}
+
+func (*OpenAPIBuilder) queryPostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("query", stringSchemaWithDescriptionAndExample("Form field: The PromQL query to execute.", "up"))
+ props.Set("time", stringSchemaWithDescriptionAndExample("Form field: The evaluation timestamp (optional, defaults to current time).", "2023-07-21T20:10:51.781Z"))
+ props.Set("limit", integerSchemaWithDescriptionAndExample("Form field: The maximum number of metrics to return.", 100))
+ props.Set("timeout", stringSchemaWithDescriptionAndExample("Form field: Evaluation timeout (optional, defaults to and is capped by the value of the -query.timeout flag).", "30s"))
+ props.Set("lookback_delta", stringSchemaWithDescriptionAndExample("Form field: Override the lookback period for this query (optional).", "5m"))
+ props.Set("stats", stringSchemaWithDescriptionAndExample("Form field: When provided, include query statistics in the response (the special value 'all' enables more comprehensive statistics).", "all"))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for instant query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"query"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) queryRangePostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("query", stringSchemaWithDescriptionAndExample("Form field: The query to execute.", "rate(http_requests_total[5m])"))
+ props.Set("start", stringSchemaWithDescriptionAndExample("Form field: The start time of the query.", "2023-07-21T20:10:30.781Z"))
+ props.Set("end", stringSchemaWithDescriptionAndExample("Form field: The end time of the query.", "2023-07-21T20:20:30.781Z"))
+ props.Set("step", stringSchemaWithDescriptionAndExample("Form field: The step size of the query.", "15s"))
+ props.Set("limit", integerSchemaWithDescriptionAndExample("Form field: The maximum number of metrics to return.", 100))
+ props.Set("timeout", stringSchemaWithDescriptionAndExample("Form field: Evaluation timeout (optional, defaults to and is capped by the value of the -query.timeout flag).", "30s"))
+ props.Set("lookback_delta", stringSchemaWithDescriptionAndExample("Form field: Override the lookback period for this query (optional).", "5m"))
+ props.Set("stats", stringSchemaWithDescriptionAndExample("Form field: When provided, include query statistics in the response (the special value 'all' enables more comprehensive statistics).", "all"))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for range query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"query", "start", "end", "step"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) queryExemplarsPostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("query", stringSchemaWithDescriptionAndExample("Form field: The query to execute.", "http_requests_total"))
+ props.Set("start", stringSchemaWithDescriptionAndExample("Form field: The start time of the query.", "2023-07-21T20:00:00.000Z"))
+ props.Set("end", stringSchemaWithDescriptionAndExample("Form field: The end time of the query.", "2023-07-21T21:00:00.000Z"))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for exemplars query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"query"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) formatQueryOutputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", stringSchemaWithDescriptionAndExample("Formatted query string.", "sum by(status) (rate(http_requests_total[5m]))"))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body for format query endpoint.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) formatQueryPostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("query", stringSchemaWithDescriptionAndExample("Form field: The query to format.", "sum(rate(http_requests_total[5m])) by (status)"))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for format query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"query"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) parseQueryPostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("query", stringSchemaWithDescriptionAndExample("Form field: The query to parse.", "sum(rate(http_requests_total[5m]))"))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for parse query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"query"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) labelsPostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("start", stringSchemaWithDescriptionAndExample("Form field: The start time of the query.", "2023-07-21T20:00:00.000Z"))
+ props.Set("end", stringSchemaWithDescriptionAndExample("Form field: The end time of the query.", "2023-07-21T21:00:00.000Z"))
+ props.Set("match[]", stringArraySchemaWithDescriptionAndExample("Form field: Series selector argument that selects the series from which to read the label names.", []string{"{job=\"prometheus\"}"}))
+ props.Set("limit", integerSchemaWithDescriptionAndExample("Form field: The maximum number of label names to return.", 100))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for labels query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) seriesPostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("start", stringSchemaWithDescriptionAndExample("Form field: The start time of the query.", "2023-07-21T20:00:00.000Z"))
+ props.Set("end", stringSchemaWithDescriptionAndExample("Form field: The end time of the query.", "2023-07-21T21:00:00.000Z"))
+ props.Set("match[]", stringArraySchemaWithDescriptionAndExample("Form field: Series selector argument that selects the series to return.", []string{"{job=\"prometheus\"}"}))
+ props.Set("limit", integerSchemaWithDescriptionAndExample("Form field: The maximum number of series to return.", 100))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for series query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"match[]"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) metadataSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("type", stringSchemaWithDescription("Metric type (counter, gauge, histogram, summary, or untyped)."))
+ props.Set("unit", stringSchemaWithDescription("Unit of the metric."))
+ props.Set("help", stringSchemaWithDescription("Help text describing the metric."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Metric metadata.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"type", "unit", "help"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) metadataOutputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{
+ A: base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/Metadata")},
+ }),
+ },
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body for metadata endpoint.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) metricMetadataSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("target", schemaRef("#/components/schemas/Labels"))
+ props.Set("metric", stringSchemaWithDescription("Metric name."))
+ props.Set("type", stringSchemaWithDescription("Metric type (counter, gauge, histogram, summary, or untyped)."))
+ props.Set("help", stringSchemaWithDescription("Help text describing the metric."))
+ props.Set("unit", stringSchemaWithDescription("Unit of the metric."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Target metric metadata.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"target", "type", "help", "unit"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) targetSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("discoveredLabels", schemaRef("#/components/schemas/Labels"))
+ props.Set("labels", schemaRef("#/components/schemas/Labels"))
+ props.Set("scrapePool", stringSchemaWithDescription("Name of the scrape pool."))
+ props.Set("scrapeUrl", stringSchemaWithDescription("URL of the target."))
+ props.Set("globalUrl", stringSchemaWithDescription("Global URL of the target."))
+ props.Set("lastError", stringSchemaWithDescription("Last error message from scraping."))
+ props.Set("lastScrape", dateTimeSchemaWithDescription("Timestamp of the last scrape."))
+ props.Set("lastScrapeDuration", numberSchemaWithDescription("Duration of the last scrape in seconds."))
+ props.Set("health", stringSchemaWithDescription("Health status of the target (up, down, or unknown)."))
+ props.Set("scrapeInterval", stringSchemaWithDescription("Scrape interval for this target."))
+ props.Set("scrapeTimeout", stringSchemaWithDescription("Scrape timeout for this target."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Scrape target information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"discoveredLabels", "labels", "scrapePool", "scrapeUrl", "globalUrl", "lastError", "lastScrape", "lastScrapeDuration", "health", "scrapeInterval", "scrapeTimeout"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) droppedTargetSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("discoveredLabels", schemaRef("#/components/schemas/Labels"))
+ props.Set("scrapePool", stringSchemaWithDescription("Name of the scrape pool."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Dropped target information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"discoveredLabels", "scrapePool"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) targetDiscoverySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("activeTargets", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/Target")},
+ }))
+ props.Set("droppedTargets", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/DroppedTarget")},
+ }))
+ props.Set("droppedTargetCounts", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{A: integerSchema()},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Target discovery information including active and dropped targets.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"activeTargets", "droppedTargets", "droppedTargetCounts"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) scrapePoolsDiscoverySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("scrapePools", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "List of all configured scrape pools.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"scrapePools"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) configSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("source_labels", stringArraySchemaWithDescription("Source labels for relabeling."))
+ props.Set("separator", stringSchemaWithDescription("Separator for source label values."))
+ props.Set("regex", stringSchemaWithDescription("Regular expression for matching."))
+ props.Set("modulus", integerSchemaWithDescription("Modulus for hash-based relabeling."))
+ props.Set("target_label", stringSchemaWithDescription("Target label name."))
+ props.Set("replacement", stringSchemaWithDescription("Replacement value."))
+ props.Set("action", stringSchemaWithDescription("Relabel action."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Relabel configuration.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) relabelStepSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("rule", schemaRef("#/components/schemas/Config"))
+ props.Set("output", schemaRef("#/components/schemas/Labels"))
+ props.Set("keep", base.CreateSchemaProxy(&base.Schema{Type: []string{"boolean"}}))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Relabel step showing the rule, output, and whether the target was kept.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"rule", "output", "keep"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) relabelStepsResponseSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("steps", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/RelabelStep")},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Relabeling steps response.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"steps"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) ruleGroupSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("name", stringSchemaWithDescription("Name of the rule group."))
+ props.Set("file", stringSchemaWithDescription("File containing the rule group."))
+ props.Set("rules", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Rules in this group.",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{Type: []string{"object"}, Description: "Rule definition."})},
+ }))
+ props.Set("interval", numberSchemaWithDescription("Evaluation interval in seconds."))
+ props.Set("limit", integerSchemaWithDescription("Maximum number of alerts for this group."))
+ props.Set("evaluationTime", numberSchemaWithDescription("Time taken to evaluate the group in seconds."))
+ props.Set("lastEvaluation", dateTimeSchemaWithDescription("Timestamp of the last evaluation."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Rule group information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"name", "file", "rules", "interval", "limit", "evaluationTime", "lastEvaluation"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) ruleDiscoverySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("groups", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/RuleGroup")},
+ }))
+ props.Set("groupNextToken", stringSchemaWithDescription("Pagination token for the next page of groups."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Rule discovery information containing all rule groups.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"groups"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) alertSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("labels", schemaRef("#/components/schemas/Labels"))
+ props.Set("annotations", schemaRef("#/components/schemas/Labels"))
+ props.Set("state", stringSchemaWithDescription("State of the alert (pending, firing, or inactive)."))
+ props.Set("value", stringSchemaWithDescription("Value of the alert expression."))
+ props.Set("activeAt", dateTimeSchemaWithDescription("Timestamp when the alert became active."))
+ props.Set("keepFiringSince", dateTimeSchemaWithDescription("Timestamp since the alert has been kept firing."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Alert information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"labels", "annotations", "state", "value"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) alertDiscoverySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("alerts", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/Alert")},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Alert discovery information containing all active alerts.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"alerts"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) alertmanagerTargetSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("url", stringSchemaWithDescription("URL of the Alertmanager instance."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Alertmanager target information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"url"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) alertmanagerDiscoverySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("activeAlertmanagers", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/AlertmanagerTarget")},
+ }))
+ props.Set("droppedAlertmanagers", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/AlertmanagerTarget")},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Alertmanager discovery information including active and dropped instances.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"activeAlertmanagers", "droppedAlertmanagers"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) statusConfigDataSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("yaml", stringSchemaWithDescription("Prometheus configuration in YAML format."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Prometheus configuration.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"yaml"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) runtimeInfoSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("startTime", base.CreateSchemaProxy(&base.Schema{Type: []string{"string"}, Format: "date-time"}))
+ props.Set("CWD", stringSchema())
+ props.Set("hostname", stringSchema())
+ props.Set("serverTime", base.CreateSchemaProxy(&base.Schema{Type: []string{"string"}, Format: "date-time"}))
+ props.Set("reloadConfigSuccess", base.CreateSchemaProxy(&base.Schema{Type: []string{"boolean"}}))
+ props.Set("lastConfigTime", base.CreateSchemaProxy(&base.Schema{Type: []string{"string"}, Format: "date-time"}))
+ props.Set("corruptionCount", integerSchema())
+ props.Set("goroutineCount", integerSchema())
+ props.Set("GOMAXPROCS", integerSchema())
+ props.Set("GOMEMLIMIT", integerSchema())
+ props.Set("GOGC", stringSchema())
+ props.Set("GODEBUG", stringSchema())
+ props.Set("storageRetention", stringSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Prometheus runtime information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"startTime", "CWD", "hostname", "serverTime", "reloadConfigSuccess", "lastConfigTime", "corruptionCount", "goroutineCount", "GOMAXPROCS", "GOMEMLIMIT", "GOGC", "GODEBUG", "storageRetention"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) prometheusVersionSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("version", stringSchema())
+ props.Set("revision", stringSchema())
+ props.Set("branch", stringSchema())
+ props.Set("buildUser", stringSchema())
+ props.Set("buildDate", stringSchema())
+ props.Set("goVersion", stringSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Prometheus version information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"version", "revision", "branch", "buildUser", "buildDate", "goVersion"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) statusFlagsOutputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body for status flags endpoint.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) headStatsSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("numSeries", integerSchema())
+ props.Set("numLabelPairs", integerSchema())
+ props.Set("chunkCount", integerSchema())
+ props.Set("minTime", integerSchema())
+ props.Set("maxTime", integerSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "TSDB head statistics.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"numSeries", "numLabelPairs", "chunkCount", "minTime", "maxTime"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) tsdbStatSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("name", stringSchema())
+ props.Set("value", integerSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "TSDB statistic.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"name", "value"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) tsdbStatusSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("headStats", schemaRef("#/components/schemas/HeadStats"))
+ props.Set("seriesCountByMetricName", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/TSDBStat")},
+ }))
+ props.Set("labelValueCountByLabelName", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/TSDBStat")},
+ }))
+ props.Set("memoryInBytesByLabelName", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/TSDBStat")},
+ }))
+ props.Set("seriesCountByLabelValuePair", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/TSDBStat")},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "TSDB status information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"headStats", "seriesCountByMetricName", "labelValueCountByLabelName", "memoryInBytesByLabelName", "seriesCountByLabelValuePair"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) blockDescSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("ulid", stringSchema())
+ props.Set("minTime", integerSchema())
+ props.Set("maxTime", integerSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Block descriptor.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"ulid", "minTime", "maxTime"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) blockStatsSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("numSamples", integerSchema())
+ props.Set("numSeries", integerSchema())
+ props.Set("numChunks", integerSchema())
+ props.Set("numTombstones", integerSchema())
+ props.Set("numFloatSamples", integerSchema())
+ props.Set("numHistogramSamples", integerSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Block statistics.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) blockMetaCompactionSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("level", integerSchema())
+ props.Set("sources", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }))
+ props.Set("parents", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/BlockDesc")},
+ }))
+ props.Set("failed", base.CreateSchemaProxy(&base.Schema{Type: []string{"boolean"}}))
+ props.Set("deletable", base.CreateSchemaProxy(&base.Schema{Type: []string{"boolean"}}))
+ props.Set("hints", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Block compaction metadata.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"level"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) blockMetaSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("ulid", stringSchema())
+ props.Set("minTime", integerSchema())
+ props.Set("maxTime", integerSchema())
+ props.Set("stats", schemaRef("#/components/schemas/BlockStats"))
+ props.Set("compaction", schemaRef("#/components/schemas/BlockMetaCompaction"))
+ props.Set("version", integerSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Block metadata.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"ulid", "minTime", "maxTime", "compaction", "version"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) statusTSDBBlocksDataSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("blocks", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/BlockMeta")},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "TSDB blocks information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"blocks"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) statusWALReplayDataSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("min", integerSchema())
+ props.Set("max", integerSchema())
+ props.Set("current", integerSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "WAL replay status.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"min", "max", "current"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) dataStructSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("name", stringSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Generic data structure with a name field.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"name"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) notificationSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("text", stringSchema())
+ props.Set("date", base.CreateSchemaProxy(&base.Schema{Type: []string{"string"}, Format: "date-time"}))
+ props.Set("active", base.CreateSchemaProxy(&base.Schema{Type: []string{"boolean"}}))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Server notification.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"text", "date", "active"},
+ Properties: props,
+ })
+}
diff --git a/web/api/v1/openapi_test.go b/web/api/v1/openapi_test.go
new file mode 100644
index 0000000000..0d2f5cc83e
--- /dev/null
+++ b/web/api/v1/openapi_test.go
@@ -0,0 +1,289 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+
+ "github.com/prometheus/common/promslog"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v2"
+)
+
+// TestOpenAPIHTTPHandler verifies that the OpenAPI endpoint serves a valid specification
+// with correct headers, structure conforming to OpenAPI 3.1 standards, and consistent responses.
+func TestOpenAPIHTTPHandler(t *testing.T) {
+ builder := NewOpenAPIBuilder(OpenAPIOptions{}, promslog.NewNopLogger())
+
+ // First request.
+ req1 := httptest.NewRequest(http.MethodGet, "/api/v1/openapi.yaml", nil)
+ rec1 := httptest.NewRecorder()
+ builder.ServeOpenAPI(rec1, req1)
+
+ // Verify status code and headers.
+ require.Equal(t, http.StatusOK, rec1.Code)
+ require.True(t, strings.HasPrefix(rec1.Header().Get("Content-Type"), "application/yaml"), "Content-Type should start with application/yaml")
+ require.Equal(t, "no-cache, no-store, must-revalidate", rec1.Header().Get("Cache-Control"))
+
+ // Verify it is valid YAML.
+ var spec map[string]any
+ err := yaml.Unmarshal(rec1.Body.Bytes(), &spec)
+ require.NoError(t, err)
+
+ // Verify structure.
+ require.Contains(t, spec, "openapi")
+ require.Contains(t, spec, "info")
+ require.Contains(t, spec, "paths")
+ require.Contains(t, spec, "components")
+
+ // Verify OpenAPI version (default is 3.1.0).
+ require.Equal(t, "3.1.0", spec["openapi"])
+
+ // Verify info section.
+ info, ok := spec["info"].(map[any]any)
+ require.True(t, ok, "info should be a map")
+ require.Equal(t, "Prometheus API", info["title"])
+
+ // Verify paths exist.
+ paths, ok := spec["paths"].(map[any]any)
+ require.True(t, ok, "paths should be a map")
+ require.NotEmpty(t, paths, "paths should not be empty")
+
+ // Second request to verify response consistency.
+ req2 := httptest.NewRequest(http.MethodGet, "/api/v1/openapi.yaml", nil)
+ rec2 := httptest.NewRecorder()
+ builder.ServeOpenAPI(rec2, req2)
+
+ // Both responses should be identical.
+ require.Equal(t, rec1.Body.String(), rec2.Body.String())
+}
+
+// TestOpenAPIPathFiltering verifies that the IncludePaths option correctly filters
+// which API paths are included in the generated specification.
+func TestOpenAPIPathFiltering(t *testing.T) {
+ tests := []struct {
+ name string
+ includePaths []string
+ wantPaths []string
+ excludePaths []string
+ }{
+ {
+ name: "no filter includes all",
+ includePaths: nil,
+ wantPaths: []string{"/query", "/labels", "/alerts", "/targets"},
+ },
+ {
+ name: "filter query paths",
+ includePaths: []string{"/query"},
+ wantPaths: []string{"/query", "/query_range", "/query_exemplars"},
+ excludePaths: []string{"/labels", "/alerts", "/targets"},
+ },
+ {
+ name: "filter status paths",
+ includePaths: []string{"/status"},
+ wantPaths: []string{"/status/config", "/status/flags", "/status/runtimeinfo"},
+ excludePaths: []string{"/query", "/alerts", "/targets"},
+ },
+ {
+ name: "filter multiple prefixes",
+ includePaths: []string{"/label", "/series"},
+ wantPaths: []string{"/labels", "/label/{name}/values", "/series"},
+ excludePaths: []string{"/query", "/alerts", "/targets"},
+ },
+ {
+ name: "exact path match",
+ includePaths: []string{"/alerts"},
+ wantPaths: []string{"/alerts"},
+ excludePaths: []string{"/alertmanagers", "/query"},
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ builder := NewOpenAPIBuilder(OpenAPIOptions{
+ IncludePaths: tc.includePaths,
+ }, promslog.NewNopLogger())
+
+ req := httptest.NewRequest(http.MethodGet, "/api/v1/openapi.yaml", nil)
+ rec := httptest.NewRecorder()
+ builder.ServeOpenAPI(rec, req)
+
+ require.Equal(t, http.StatusOK, rec.Code)
+
+ var spec map[string]any
+ err := yaml.Unmarshal(rec.Body.Bytes(), &spec)
+ require.NoError(t, err)
+
+ paths, ok := spec["paths"].(map[any]any)
+ require.True(t, ok, "paths should be a map")
+
+ for _, want := range tc.wantPaths {
+ require.Contains(t, paths, want)
+ }
+
+ for _, exclude := range tc.excludePaths {
+ require.NotContains(t, paths, exclude)
+ }
+ })
+ }
+}
+
+// TestOpenAPISchemaCompleteness verifies that all referenced schemas in paths
+// are defined in the components/schemas section of the specification.
+func TestOpenAPISchemaCompleteness(t *testing.T) {
+ builder := NewOpenAPIBuilder(OpenAPIOptions{}, promslog.NewNopLogger())
+
+ req := httptest.NewRequest(http.MethodGet, "/api/v1/openapi.yaml", nil)
+ rec := httptest.NewRecorder()
+ builder.ServeOpenAPI(rec, req)
+
+ var spec map[string]any
+ err := yaml.Unmarshal(rec.Body.Bytes(), &spec)
+ require.NoError(t, err)
+
+ components, ok := spec["components"].(map[any]any)
+ require.True(t, ok, "components should be a map")
+
+ schemas, ok := components["schemas"].(map[any]any)
+ require.True(t, ok, "schemas should be a map")
+
+ // Verify essential schemas are present.
+ essentialSchemas := []string{
+ "Error",
+ "Labels",
+ "QueryOutputBody",
+ "LabelsOutputBody",
+ "SeriesOutputBody",
+ "TargetsOutputBody",
+ "AlertsOutputBody",
+ "RulesOutputBody",
+ "StatusConfigOutputBody",
+ "StatusFlagsOutputBody",
+ "PrometheusVersion",
+ }
+
+ for _, schema := range essentialSchemas {
+ require.Contains(t, schemas, schema)
+ }
+}
+
+// TODO: Add test to verify all routes from api.go Register() are covered in OpenAPI spec.
+// Consider wrapping Router to track registered paths and cross-check with OpenAPI paths.
+
+// TestOpenAPIShouldIncludePath verifies the shouldIncludePath method correctly
+// matches paths against the IncludePaths filter configuration.
+func TestOpenAPIShouldIncludePath(t *testing.T) {
+ tests := []struct {
+ name string
+ includePaths []string
+ path string
+ expected bool
+ }{
+ {
+ name: "empty filter includes all",
+ includePaths: nil,
+ path: "/query",
+ expected: true,
+ },
+ {
+ name: "exact match",
+ includePaths: []string{"/query"},
+ path: "/query",
+ expected: true,
+ },
+ {
+ name: "prefix match",
+ includePaths: []string{"/query"},
+ path: "/query_range",
+ expected: true,
+ },
+ {
+ name: "no match",
+ includePaths: []string{"/query"},
+ path: "/labels",
+ expected: false,
+ },
+ {
+ name: "multiple filters with match",
+ includePaths: []string{"/labels", "/series"},
+ path: "/series",
+ expected: true,
+ },
+ {
+ name: "multiple filters without match",
+ includePaths: []string{"/labels", "/series"},
+ path: "/query",
+ expected: false,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ builder := &OpenAPIBuilder{
+ options: OpenAPIOptions{
+ IncludePaths: tc.includePaths,
+ },
+ }
+
+ result := builder.shouldIncludePath(tc.path)
+ require.Equal(t, tc.expected, result)
+ })
+ }
+}
+
+// TestOpenAPIVersionConsistency verifies that both OpenAPI versions are properly generated
+// and that 3.2 has exactly one more path than 3.1 (/notifications/live).
+func TestOpenAPIVersionConsistency(t *testing.T) {
+ builder := NewOpenAPIBuilder(OpenAPIOptions{}, promslog.NewNopLogger())
+
+ // Fetch OpenAPI 3.1 spec (default).
+ req31 := httptest.NewRequest(http.MethodGet, "/api/v1/openapi.yaml", nil)
+ rec31 := httptest.NewRecorder()
+ builder.ServeOpenAPI(rec31, req31)
+
+ require.Equal(t, http.StatusOK, rec31.Code)
+
+ // Fetch OpenAPI 3.2 spec.
+ req32 := httptest.NewRequest(http.MethodGet, "/api/v1/openapi.yaml?openapi_version=3.2", nil)
+ rec32 := httptest.NewRecorder()
+ builder.ServeOpenAPI(rec32, req32)
+
+ require.Equal(t, http.StatusOK, rec32.Code)
+
+ // Parse both specs.
+ var spec31, spec32 map[string]any
+ err := yaml.Unmarshal(rec31.Body.Bytes(), &spec31)
+ require.NoError(t, err)
+ err = yaml.Unmarshal(rec32.Body.Bytes(), &spec32)
+ require.NoError(t, err)
+
+ // Verify versions are different.
+ require.Equal(t, "3.1.0", spec31["openapi"])
+ require.Equal(t, "3.2.0", spec32["openapi"])
+
+ // Verify /notifications/live is only in 3.2.
+ paths31 := spec31["paths"].(map[any]any)
+ paths32 := spec32["paths"].(map[any]any)
+
+ require.NotContains(t, paths31, "/notifications/live")
+
+ require.Contains(t, paths32, "/notifications/live")
+
+ // Verify 3.2 has exactly one more path than 3.1.
+ require.Len(t, paths32, len(paths31)+1,
+ "OpenAPI 3.2 should have exactly one more path than 3.1")
+}
diff --git a/web/api/v1/test_helpers.go b/web/api/v1/test_helpers.go
new file mode 100644
index 0000000000..2662b0c84b
--- /dev/null
+++ b/web/api/v1/test_helpers.go
@@ -0,0 +1,157 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/prometheus/common/route"
+
+ "github.com/prometheus/prometheus/web/api/testhelpers"
+)
+
+// newTestAPI creates a new API instance for testing using testhelpers.
+func newTestAPI(t *testing.T, cfg testhelpers.APIConfig) *testhelpers.APIWrapper {
+ t.Helper()
+
+ params := testhelpers.PrepareAPI(t, cfg)
+
+ // Adapt the testhelpers interfaces to v1 interfaces.
+ api := NewAPI(
+ params.QueryEngine,
+ params.Queryable,
+ nil, // appendable
+ params.ExemplarQueryable,
+ func(ctx context.Context) ScrapePoolsRetriever {
+ return adaptScrapePoolsRetriever(params.ScrapePoolsRetriever(ctx))
+ },
+ func(ctx context.Context) TargetRetriever {
+ return adaptTargetRetriever(params.TargetRetriever(ctx))
+ },
+ func(ctx context.Context) AlertmanagerRetriever {
+ return adaptAlertmanagerRetriever(params.AlertmanagerRetriever(ctx))
+ },
+ params.ConfigFunc,
+ params.FlagsMap,
+ GlobalURLOptions{},
+ params.ReadyFunc,
+ adaptTSDBAdminStats(params.TSDBAdmin),
+ params.DBDir,
+ false, // enableAdmin
+ params.Logger,
+ func(ctx context.Context) RulesRetriever {
+ return adaptRulesRetriever(params.RulesRetriever(ctx))
+ },
+ 0, // remoteReadSampleLimit
+ 0, // remoteReadConcurrencyLimit
+ 0, // remoteReadMaxBytesInFrame
+ false, // isAgent
+ nil, // corsOrigin
+ func() (RuntimeInfo, error) {
+ info, err := params.RuntimeInfoFunc()
+ return RuntimeInfo{
+ StartTime: info.StartTime,
+ CWD: info.CWD,
+ Hostname: info.Hostname,
+ ServerTime: info.ServerTime,
+ ReloadConfigSuccess: info.ReloadConfigSuccess,
+ LastConfigTime: info.LastConfigTime,
+ CorruptionCount: info.CorruptionCount,
+ GoroutineCount: info.GoroutineCount,
+ GOMAXPROCS: info.GOMAXPROCS,
+ GOMEMLIMIT: info.GOMEMLIMIT,
+ GOGC: info.GOGC,
+ GODEBUG: info.GODEBUG,
+ StorageRetention: info.StorageRetention,
+ }, err
+ },
+ &PrometheusVersion{
+ Version: params.BuildInfo.Version,
+ Revision: params.BuildInfo.Revision,
+ Branch: params.BuildInfo.Branch,
+ BuildUser: params.BuildInfo.BuildUser,
+ BuildDate: params.BuildInfo.BuildDate,
+ GoVersion: params.BuildInfo.GoVersion,
+ },
+ params.NotificationsGetter,
+ params.NotificationsSub,
+ params.Gatherer,
+ params.Registerer,
+ nil, // statsRenderer
+ false, // rwEnabled
+ nil, // acceptRemoteWriteProtoMsgs
+ false, // otlpEnabled
+ false, // otlpDeltaToCumulative
+ false, // otlpNativeDeltaIngestion
+ false, // stZeroIngestionEnabled
+ 5*time.Minute, // lookbackDelta
+ false, // enableTypeAndUnitLabels
+ false, // appendMetadata
+ nil, // overrideErrorCode
+ nil, // featureRegistry
+ OpenAPIOptions{}, // openAPIOptions
+ )
+
+ // Register routes.
+ router := route.New()
+ api.Register(router.WithPrefix("/api/v1"))
+
+ return &testhelpers.APIWrapper{
+ Handler: router,
+ }
+}
+
+// Adapter functions to convert testhelpers interfaces to v1 interfaces.
+
+type rulesRetrieverAdapter struct {
+ testhelpers.RulesRetriever
+}
+
+func adaptRulesRetriever(r testhelpers.RulesRetriever) RulesRetriever {
+ return &rulesRetrieverAdapter{r}
+}
+
+type targetRetrieverAdapter struct {
+ testhelpers.TargetRetriever
+}
+
+func adaptTargetRetriever(t testhelpers.TargetRetriever) TargetRetriever {
+ return &targetRetrieverAdapter{t}
+}
+
+type scrapePoolsRetrieverAdapter struct {
+ testhelpers.ScrapePoolsRetriever
+}
+
+func adaptScrapePoolsRetriever(s testhelpers.ScrapePoolsRetriever) ScrapePoolsRetriever {
+ return &scrapePoolsRetrieverAdapter{s}
+}
+
+type alertmanagerRetrieverAdapter struct {
+ testhelpers.AlertmanagerRetriever
+}
+
+func adaptAlertmanagerRetriever(a testhelpers.AlertmanagerRetriever) AlertmanagerRetriever {
+ return &alertmanagerRetrieverAdapter{a}
+}
+
+type tsdbAdminStatsAdapter struct {
+ testhelpers.TSDBAdminStats
+}
+
+func adaptTSDBAdminStats(t testhelpers.TSDBAdminStats) TSDBAdminStats {
+ return &tsdbAdminStatsAdapter{t}
+}
diff --git a/web/api/v1/testdata/openapi_3.1_golden.yaml b/web/api/v1/testdata/openapi_3.1_golden.yaml
new file mode 100644
index 0000000000..c69694b530
--- /dev/null
+++ b/web/api/v1/testdata/openapi_3.1_golden.yaml
@@ -0,0 +1,4401 @@
+openapi: 3.1.0
+info:
+ title: Prometheus API
+ description: Prometheus is an Open-Source monitoring system with a dimensional data model, flexible query language, efficient time series database and modern alerting approach.
+ contact:
+ name: Prometheus Community
+ url: https://prometheus.io/community/
+ version: 0.0.1-undefined
+servers:
+ - url: /api/v1
+paths:
+ /query:
+ get:
+ tags:
+ - query
+ summary: Evaluate an instant query
+ operationId: query
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of metrics to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: time
+ in: query
+ description: The evaluation timestamp (optional, defaults to current time).
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: query
+ in: query
+ description: The PromQL query to execute.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: up
+ - name: timeout
+ in: query
+ description: Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 30s
+ - name: lookback_delta
+ in: query
+ description: Override the lookback period for this query. Optional.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 5m
+ - name: stats
+ in: query
+ description: When provided, include query statistics in the response. The special value 'all' enables more comprehensive statistics.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: all
+ responses:
+ "200":
+ description: Query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryOutputBody'
+ examples:
+ vectorResult:
+ summary: 'Instant vector query: up'
+ value: {"status": "success", "data": {"resultType": "vector", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "value": [1767436620, "1"]}, {"metric": {"__name__": "up", "env": "demo", "instance": "demo.prometheus.io:9093", "job": "alertmanager"}, "value": [1767436620, "1"]}]}}
+ scalarResult:
+ summary: 'Scalar query: scalar(42)'
+ value:
+ data:
+ result:
+ - 1767436620
+ - "42"
+ resultType: scalar
+ status: success
+ matrixResult:
+ summary: 'Range vector query: up[5m]'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767436320, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Evaluate an instant query
+ operationId: query-post
+ requestBody:
+ description: Submit an instant query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/QueryPostInputBody'
+ examples:
+ simpleQuery:
+ summary: Simple instant query
+ value:
+ query: up
+ queryWithTime:
+ summary: Query with specific timestamp
+ value:
+ query: up{job="prometheus"}
+ time: "2026-01-02T13:37:00.000Z"
+ queryWithLimit:
+ summary: Query with limit and statistics
+ value:
+ limit: 100
+ query: rate(prometheus_http_requests_total{handler="/api/v1/query"}[5m])
+ stats: all
+ required: true
+ responses:
+ "200":
+ description: Instant query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryOutputBody'
+ examples:
+ vectorResult:
+ summary: 'Instant vector query: up'
+ value: {"status": "success", "data": {"resultType": "vector", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "value": [1767436620, "1"]}, {"metric": {"__name__": "up", "env": "demo", "instance": "demo.prometheus.io:9093", "job": "alertmanager"}, "value": [1767436620, "1"]}]}}
+ scalarResult:
+ summary: 'Scalar query: scalar(42)'
+ value:
+ data:
+ result:
+ - 1767436620
+ - "42"
+ resultType: scalar
+ status: success
+ matrixResult:
+ summary: 'Range vector query: up[5m]'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767436320, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing instant query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /query_range:
+ get:
+ tags:
+ - query
+ summary: Evaluate a range query
+ operationId: query-range
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of metrics to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: start
+ in: query
+ description: The start time of the query.
+ required: true
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: The end time of the query.
+ required: true
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: step
+ in: query
+ description: The step size of the query.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 15s
+ - name: query
+ in: query
+ description: The query to execute.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: rate(prometheus_http_requests_total{handler="/api/v1/query"}[5m])
+ - name: timeout
+ in: query
+ description: Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 30s
+ - name: lookback_delta
+ in: query
+ description: Override the lookback period for this query. Optional.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 5m
+ - name: stats
+ in: query
+ description: When provided, include query statistics in the response. The special value 'all' enables more comprehensive statistics.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: all
+ responses:
+ "200":
+ description: Range query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryRangeOutputBody'
+ examples:
+ matrixResult:
+ summary: 'Range query: rate(prometheus_http_requests_total[5m])'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767433020, "1"], [1767434820, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing range query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Evaluate a range query
+ operationId: query-range-post
+ requestBody:
+ description: Submit a range query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/QueryRangePostInputBody'
+ examples:
+ basicRange:
+ summary: Basic range query
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ query: up
+ start: "2026-01-02T12:37:00.000Z"
+ step: 15s
+ rateQuery:
+ summary: Rate calculation over time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ query: rate(prometheus_http_requests_total{handler="/api/v1/query"}[5m])
+ start: "2026-01-02T12:37:00.000Z"
+ step: 30s
+ timeout: 30s
+ required: true
+ responses:
+ "200":
+ description: Range query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryRangeOutputBody'
+ examples:
+ matrixResult:
+ summary: 'Range query: rate(prometheus_http_requests_total[5m])'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767433020, "1"], [1767434820, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing range query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /query_exemplars:
+ get:
+ tags:
+ - query
+ summary: Query exemplars
+ operationId: query-exemplars
+ parameters:
+ - name: start
+ in: query
+ description: Start timestamp for exemplars query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for exemplars query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: query
+ in: query
+ description: PromQL query to extract exemplars for.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: prometheus_http_requests_total
+ responses:
+ "200":
+ description: Exemplars retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryExemplarsOutputBody'
+ examples:
+ exemplarsResult:
+ summary: Exemplars for a metric with trace IDs
+ value:
+ data:
+ - exemplars:
+ - labels:
+ traceID: abc123def456
+ timestamp: 1.689956451781e+09
+ value: "1.5"
+ seriesLabels:
+ __name__: http_requests_total
+ job: api-server
+ method: GET
+ status: success
+ default:
+ description: Error retrieving exemplars.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Query exemplars
+ operationId: query-exemplars-post
+ requestBody:
+ description: Submit an exemplars query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/QueryExemplarsPostInputBody'
+ examples:
+ basicExemplar:
+ summary: Query exemplars for a metric
+ value:
+ query: prometheus_http_requests_total
+ exemplarWithTimeRange:
+ summary: Exemplars within specific time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ query: prometheus_http_requests_total{job="prometheus"}
+ start: "2026-01-02T12:37:00.000Z"
+ required: true
+ responses:
+ "200":
+ description: Exemplars query completed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryExemplarsOutputBody'
+ examples:
+ exemplarsResult:
+ summary: Exemplars for a metric with trace IDs
+ value:
+ data:
+ - exemplars:
+ - labels:
+ traceID: abc123def456
+ timestamp: 1.689956451781e+09
+ value: "1.5"
+ seriesLabels:
+ __name__: http_requests_total
+ job: api-server
+ method: GET
+ status: success
+ default:
+ description: Error processing exemplars query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /format_query:
+ get:
+ tags:
+ - query
+ summary: Format a PromQL query
+ operationId: format-query
+ parameters:
+ - name: query
+ in: query
+ description: PromQL expression to format.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: sum(rate(http_requests_total[5m])) by (job)
+ responses:
+ "200":
+ description: Query formatted successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/FormatQueryOutputBody'
+ examples:
+ formattedQuery:
+ summary: Formatted PromQL query
+ value:
+ data: sum by(job, status) (rate(http_requests_total[5m]))
+ status: success
+ default:
+ description: Error formatting query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Format a PromQL query
+ operationId: format-query-post
+ requestBody:
+ description: Submit a PromQL query to format. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/FormatQueryPostInputBody'
+ examples:
+ simpleFormat:
+ summary: Format a simple query
+ value:
+ query: up{job="prometheus"}
+ complexFormat:
+ summary: Format a complex query
+ value:
+ query: sum(rate(http_requests_total[5m])) by (job, status)
+ required: true
+ responses:
+ "200":
+ description: Query formatting completed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/FormatQueryOutputBody'
+ examples:
+ formattedQuery:
+ summary: Formatted PromQL query
+ value:
+ data: sum by(job, status) (rate(http_requests_total[5m]))
+ status: success
+ default:
+ description: Error formatting query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /parse_query:
+ get:
+ tags:
+ - query
+ summary: Parse a PromQL query
+ operationId: parse-query
+ parameters:
+ - name: query
+ in: query
+ description: PromQL expression to parse.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: up{job="prometheus"}
+ responses:
+ "200":
+ description: Query parsed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ParseQueryOutputBody'
+ examples:
+ parsedQuery:
+ summary: Parsed PromQL expression tree
+ value:
+ data:
+ resultType: vector
+ status: success
+ default:
+ description: Error parsing query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Parse a PromQL query
+ operationId: parse-query-post
+ requestBody:
+ description: Submit a PromQL query to parse. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/ParseQueryPostInputBody'
+ examples:
+ simpleParse:
+ summary: Parse a simple query
+ value:
+ query: up
+ complexParse:
+ summary: Parse a complex query
+ value:
+ query: rate(http_requests_total{job="api"}[5m])
+ required: true
+ responses:
+ "200":
+ description: Query parsed successfully via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ParseQueryOutputBody'
+ examples:
+ parsedQuery:
+ summary: Parsed PromQL expression tree
+ value:
+ data:
+ resultType: vector
+ status: success
+ default:
+ description: Error parsing query via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /labels:
+ get:
+ tags:
+ - labels
+ summary: Get label names
+ operationId: labels
+ parameters:
+ - name: start
+ in: query
+ description: Start timestamp for label names query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for label names query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: match[]
+ in: query
+ description: Series selector argument.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{job="prometheus"}'
+ - name: limit
+ in: query
+ description: Maximum number of label names to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ responses:
+ "200":
+ description: Label names retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/LabelsOutputBody'
+ examples:
+ labelNames:
+ summary: List of label names
+ value:
+ data:
+ - __name__
+ - active
+ - address
+ - alertmanager
+ - alertname
+ - alertstate
+ - backend
+ - branch
+ - code
+ - collector
+ - component
+ - device
+ - env
+ - endpoint
+ - fstype
+ - handler
+ - instance
+ - job
+ - le
+ - method
+ - mode
+ - name
+ status: success
+ default:
+ description: Error retrieving label names.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - labels
+ summary: Get label names
+ operationId: labels-post
+ requestBody:
+ description: Submit a label names query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/LabelsPostInputBody'
+ examples:
+ allLabels:
+ summary: Get all label names
+ value: {}
+ labelsWithTimeRange:
+ summary: Get label names within time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ start: "2026-01-02T12:37:00.000Z"
+ labelsWithMatch:
+ summary: Get label names matching series selector
+ value:
+ match[]:
+ - up
+ - process_start_time_seconds{job="prometheus"}
+ required: true
+ responses:
+ "200":
+ description: Label names retrieved successfully via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/LabelsOutputBody'
+ examples:
+ labelNames:
+ summary: List of label names
+ value:
+ data:
+ - __name__
+ - active
+ - address
+ - alertmanager
+ - alertname
+ - alertstate
+ - backend
+ - branch
+ - code
+ - collector
+ - component
+ - device
+ - env
+ - endpoint
+ - fstype
+ - handler
+ - instance
+ - job
+ - le
+ - method
+ - mode
+ - name
+ status: success
+ default:
+ description: Error retrieving label names via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /label/{name}/values:
+ get:
+ tags:
+ - labels
+ summary: Get label values
+ operationId: label-values
+ parameters:
+ - name: name
+ in: path
+ description: Label name.
+ required: true
+ schema:
+ type: string
+ - name: start
+ in: query
+ description: Start timestamp for label values query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for label values query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: match[]
+ in: query
+ description: Series selector argument.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{job="prometheus"}'
+ - name: limit
+ in: query
+ description: Maximum number of label values to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 1000
+ responses:
+ "200":
+ description: Label values retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/LabelValuesOutputBody'
+ examples:
+ labelValues:
+ summary: List of values for a label
+ value:
+ data:
+ - alertmanager
+ - blackbox
+ - caddy
+ - cadvisor
+ - grafana
+ - node
+ - prometheus
+ - random
+ status: success
+ default:
+ description: Error retrieving label values.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /series:
+ get:
+ tags:
+ - series
+ summary: Find series by label matchers
+ operationId: series
+ parameters:
+ - name: start
+ in: query
+ description: Start timestamp for series query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for series query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: match[]
+ in: query
+ description: Series selector argument.
+ required: true
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{job="prometheus"}'
+ - name: limit
+ in: query
+ description: Maximum number of series to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ responses:
+ "200":
+ description: Series returned matching the provided label matchers.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SeriesOutputBody'
+ examples:
+ seriesList:
+ summary: List of series matching the selector
+ value:
+ data:
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:8080
+ job: cadvisor
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9093
+ job: alertmanager
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9100
+ job: node
+ - __name__: up
+ instance: demo.prometheus.io:3000
+ job: grafana
+ - __name__: up
+ instance: demo.prometheus.io:8996
+ job: random
+ status: success
+ default:
+ description: Error retrieving series.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - series
+ summary: Find series by label matchers
+ operationId: series-post
+ requestBody:
+ description: Submit a series query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/SeriesPostInputBody'
+ examples:
+ seriesMatch:
+ summary: Find series by label matchers
+ value:
+ match[]:
+ - up
+ seriesWithTimeRange:
+ summary: Find series with time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ match[]:
+ - up
+ - process_cpu_seconds_total{job="prometheus"}
+ start: "2026-01-02T12:37:00.000Z"
+ required: true
+ responses:
+ "200":
+ description: Series returned matching the provided label matchers via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SeriesOutputBody'
+ examples:
+ seriesList:
+ summary: List of series matching the selector
+ value:
+ data:
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:8080
+ job: cadvisor
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9093
+ job: alertmanager
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9100
+ job: node
+ - __name__: up
+ instance: demo.prometheus.io:3000
+ job: grafana
+ - __name__: up
+ instance: demo.prometheus.io:8996
+ job: random
+ status: success
+ default:
+ description: Error retrieving series via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ delete:
+ tags:
+ - series
+ summary: Delete series
+ description: 'Delete series matching selectors. Note: This is deprecated, use POST /admin/tsdb/delete_series instead.'
+ operationId: delete-series
+ responses:
+ "200":
+ description: Series marked for deletion.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SeriesDeleteOutputBody'
+ examples:
+ seriesDeleted:
+ summary: Series marked for deletion
+ value:
+ status: success
+ default:
+ description: Error deleting series.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /metadata:
+ get:
+ tags:
+ - metadata
+ summary: Get metadata
+ operationId: get-metadata
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of metrics to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: limit_per_metric
+ in: query
+ description: The maximum number of metadata entries per metric.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 10
+ - name: metric
+ in: query
+ description: A metric name to filter metadata for.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: http_requests_total
+ responses:
+ "200":
+ description: Metric metadata retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/MetadataOutputBody'
+ examples:
+ metricMetadata:
+ summary: Metadata for metrics
+ value:
+ data:
+ go_gc_stack_starting_size_bytes:
+ - help: The stack size of new goroutines. Sourced from /gc/stack/starting-size:bytes.
+ type: gauge
+ unit: ""
+ prometheus_rule_group_iterations_missed_total:
+ - help: The total number of rule group evaluations missed due to slow rule group evaluation.
+ type: counter
+ unit: ""
+ prometheus_sd_updates_total:
+ - help: Total number of update events sent to the SD consumers.
+ type: counter
+ unit: ""
+ status: success
+ default:
+ description: Error retrieving metadata.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /scrape_pools:
+ get:
+ tags:
+ - targets
+ summary: Get scrape pools
+ operationId: get-scrape-pools
+ responses:
+ "200":
+ description: Scrape pools retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ScrapePoolsOutputBody'
+ examples:
+ scrapePoolsList:
+ summary: List of scrape pool names
+ value:
+ data:
+ scrapePools:
+ - alertmanager
+ - blackbox
+ - caddy
+ - cadvisor
+ - grafana
+ - node
+ - prometheus
+ - random
+ status: success
+ default:
+ description: Error retrieving scrape pools.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /targets:
+ get:
+ tags:
+ - targets
+ summary: Get targets
+ operationId: get-targets
+ parameters:
+ - name: scrapePool
+ in: query
+ description: Filter targets by scrape pool name.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: prometheus
+ - name: state
+ in: query
+ description: 'Filter by state: active, dropped, or any.'
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: active
+ responses:
+ "200":
+ description: Target discovery information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/TargetsOutputBody'
+ examples:
+ targetsList:
+ summary: Active and dropped targets
+ value:
+ data:
+ activeTargets:
+ - discoveredLabels:
+ __address__: demo.prometheus.io:9093
+ __meta_filepath: /etc/prometheus/file_sd/alertmanager.yml
+ __metrics_path__: /metrics
+ __scheme__: http
+ env: demo
+ job: alertmanager
+ globalUrl: http://demo.prometheus.io:9093/metrics
+ health: up
+ labels:
+ env: demo
+ instance: demo.prometheus.io:9093
+ job: alertmanager
+ lastError: ""
+ lastScrape: "2026-01-02T13:36:40.200Z"
+ lastScrapeDuration: 0.006576866
+ scrapeInterval: 15s
+ scrapePool: alertmanager
+ scrapeTimeout: 10s
+ scrapeUrl: http://demo.prometheus.io:9093/metrics
+ droppedTargetCounts:
+ alertmanager: 0
+ blackbox: 0
+ caddy: 0
+ cadvisor: 0
+ grafana: 0
+ node: 0
+ prometheus: 0
+ random: 0
+ droppedTargets: []
+ status: success
+ default:
+ description: Error retrieving targets.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /targets/metadata:
+ get:
+ tags:
+ - targets
+ summary: Get targets metadata
+ operationId: get-targets-metadata
+ parameters:
+ - name: match_target
+ in: query
+ description: Label selector to filter targets.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: '{job="prometheus"}'
+ - name: metric
+ in: query
+ description: Metric name to retrieve metadata for.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: http_requests_total
+ - name: limit
+ in: query
+ description: Maximum number of targets to match.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 10
+ responses:
+ "200":
+ description: Target metadata retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/TargetMetadataOutputBody'
+ examples:
+ targetMetadata:
+ summary: Metadata for targets
+ value:
+ data:
+ - help: The current health status of the target
+ metric: up
+ target:
+ instance: localhost:9090
+ job: prometheus
+ type: gauge
+ unit: ""
+ status: success
+ default:
+ description: Error retrieving target metadata.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /targets/relabel_steps:
+ get:
+ tags:
+ - targets
+ summary: Get targets relabel steps
+ operationId: get-targets-relabel-steps
+ parameters:
+ - name: scrapePool
+ in: query
+ description: Name of the scrape pool.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: prometheus
+ - name: labels
+ in: query
+ description: JSON-encoded labels to apply relabel rules to.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: '{"__address__":"localhost:9090","job":"prometheus"}'
+ responses:
+ "200":
+ description: Relabel steps retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/TargetRelabelStepsOutputBody'
+ examples:
+ relabelSteps:
+ summary: Relabel steps for a target
+ value:
+ data:
+ steps:
+ - keep: true
+ output:
+ __address__: localhost:9090
+ instance: localhost:9090
+ job: prometheus
+ rule:
+ action: replace
+ regex: (.*)
+ replacement: $1
+ source_labels:
+ - __address__
+ target_label: instance
+ status: success
+ default:
+ description: Error retrieving relabel steps.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /rules:
+ get:
+ tags:
+ - rules
+ summary: Get alerting and recording rules
+ operationId: rules
+ parameters:
+ - name: type
+ in: query
+ description: 'Filter by rule type: alert or record.'
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: alert
+ - name: rule_name[]
+ in: query
+ description: Filter by rule name.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - HighErrorRate
+ - name: rule_group[]
+ in: query
+ description: Filter by rule group name.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - example_alerts
+ - name: file[]
+ in: query
+ description: Filter by file path.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - /etc/prometheus/rules.yml
+ - name: match[]
+ in: query
+ description: Label matchers to filter rules.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{severity="critical"}'
+ - name: exclude_alerts
+ in: query
+ description: Exclude active alerts from response.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: "false"
+ - name: group_limit
+ in: query
+ description: Maximum number of rule groups to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: group_next_token
+ in: query
+ description: Pagination token for next page.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: abc123
+ responses:
+ "200":
+ description: Rules retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/RulesOutputBody'
+ examples:
+ ruleGroups:
+ summary: Alerting and recording rules
+ value:
+ data:
+ groups:
+ - evaluationTime: 0.000561635
+ file: /etc/prometheus/rules/ansible_managed.yml
+ interval: 15
+ lastEvaluation: "2026-01-02T13:36:56.874Z"
+ limit: 0
+ name: ansible managed alert rules
+ rules:
+ - annotations:
+ description: This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the "DeadMansSnitch" integration in PagerDuty.
+ summary: Ensure entire alerting pipeline is functional
+ duration: 600
+ evaluationTime: 0.000356688
+ health: ok
+ keepFiringFor: 0
+ labels:
+ severity: warning
+ lastEvaluation: "2026-01-02T13:36:56.874Z"
+ name: Watchdog
+ query: vector(1)
+ state: firing
+ type: alerting
+ status: success
+ default:
+ description: Error retrieving rules.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /alerts:
+ get:
+ tags:
+ - alerts
+ summary: Get active alerts
+ operationId: alerts
+ responses:
+ "200":
+ description: Active alerts retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AlertsOutputBody'
+ examples:
+ activeAlerts:
+ summary: Currently active alerts
+ value:
+ data:
+ alerts:
+ - activeAt: "2026-01-02T13:30:00.000Z"
+ annotations:
+ description: This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the "DeadMansSnitch" integration in PagerDuty.
+ summary: Ensure entire alerting pipeline is functional
+ labels:
+ alertname: Watchdog
+ severity: warning
+ state: firing
+ value: "1e+00"
+ status: success
+ default:
+ description: Error retrieving alerts.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /alertmanagers:
+ get:
+ tags:
+ - alerts
+ summary: Get Alertmanager discovery
+ operationId: alertmanagers
+ responses:
+ "200":
+ description: Alertmanager targets retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AlertmanagersOutputBody'
+ examples:
+ alertmanagerDiscovery:
+ summary: Alertmanager discovery results
+ value:
+ data:
+ activeAlertmanagers:
+ - url: http://demo.prometheus.io:9093/api/v2/alerts
+ droppedAlertmanagers: []
+ status: success
+ default:
+ description: Error retrieving Alertmanager targets.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/config:
+ get:
+ tags:
+ - status
+ summary: Get status config
+ operationId: get-status-config
+ responses:
+ "200":
+ description: Configuration retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusConfigOutputBody'
+ examples:
+ configYAML:
+ summary: Prometheus configuration
+ value:
+ data:
+ yaml: |
+ global:
+ scrape_interval: 15s
+ scrape_timeout: 10s
+ evaluation_interval: 15s
+ external_labels:
+ environment: demo-prometheus-io
+ alerting:
+ alertmanagers:
+ - scheme: http
+ static_configs:
+ - targets:
+ - demo.prometheus.io:9093
+ rule_files:
+ - /etc/prometheus/rules/*.yml
+ status: success
+ default:
+ description: Error retrieving configuration.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/runtimeinfo:
+ get:
+ tags:
+ - status
+ summary: Get status runtimeinfo
+ operationId: get-status-runtimeinfo
+ responses:
+ "200":
+ description: Runtime information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusRuntimeInfoOutputBody'
+ examples:
+ runtimeInfo:
+ summary: Runtime information
+ value:
+ data:
+ CWD: /
+ GODEBUG: ""
+ GOGC: "75"
+ GOMAXPROCS: 2
+ GOMEMLIMIT: 3703818240
+ corruptionCount: 0
+ goroutineCount: 88
+ hostname: demo-prometheus-io
+ lastConfigTime: "2026-01-01T13:37:00.000Z"
+ reloadConfigSuccess: true
+ serverTime: "2026-01-02T13:37:00.000Z"
+ startTime: "2026-01-01T13:37:00.000Z"
+ storageRetention: 31d
+ status: success
+ default:
+ description: Error retrieving runtime information.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/buildinfo:
+ get:
+ tags:
+ - status
+ summary: Get status buildinfo
+ operationId: get-status-buildinfo
+ responses:
+ "200":
+ description: Build information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusBuildInfoOutputBody'
+ examples:
+ buildInfo:
+ summary: Build information
+ value:
+ data:
+ branch: HEAD
+ buildDate: 20251030-07:26:10
+ buildUser: root@08c890a84441
+ goVersion: go1.25.3
+ revision: 0a41f0000705c69ab8e0f9a723fc73e39ed62b07
+ version: 3.7.3
+ status: success
+ default:
+ description: Error retrieving build information.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/flags:
+ get:
+ tags:
+ - status
+ summary: Get status flags
+ operationId: get-status-flags
+ responses:
+ "200":
+ description: Command-line flags retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusFlagsOutputBody'
+ examples:
+ flags:
+ summary: Command-line flags
+ value:
+ data:
+ agent: "false"
+ alertmanager.notification-queue-capacity: "10000"
+ config.file: /etc/prometheus/prometheus.yml
+ enable-feature: exemplar-storage,native-histograms
+ query.max-concurrency: "20"
+ query.timeout: 2m
+ storage.tsdb.path: /prometheus
+ storage.tsdb.retention.time: 15d
+ web.console.libraries: /usr/share/prometheus/console_libraries
+ web.console.templates: /usr/share/prometheus/consoles
+ web.enable-admin-api: "true"
+ web.enable-lifecycle: "true"
+ web.listen-address: 0.0.0.0:9090
+ web.page-title: Prometheus Time Series Collection and Processing Server
+ status: success
+ default:
+ description: Error retrieving flags.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/tsdb:
+ get:
+ tags:
+ - status
+ summary: Get TSDB status
+ operationId: status-tsdb
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of items to return per category.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 10
+ responses:
+ "200":
+ description: TSDB status retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusTSDBOutputBody'
+ examples:
+ tsdbStats:
+ summary: TSDB statistics
+ value:
+ data:
+ headStats:
+ chunkCount: 37525
+ maxTime: 1767436620000
+ minTime: 1767362400712
+ numLabelPairs: 2512
+ numSeries: 9925
+ labelValueCountByLabelName:
+ - name: __name__
+ value: 5
+ - name: job
+ value: 3
+ memoryInBytesByLabelName:
+ - name: __name__
+ value: 1024
+ - name: job
+ value: 512
+ seriesCountByLabelValuePair:
+ - name: job=prometheus
+ value: 100
+ - name: instance=localhost:9090
+ value: 100
+ seriesCountByMetricName:
+ - name: up
+ value: 100
+ - name: http_requests_total
+ value: 500
+ status: success
+ default:
+ description: Error retrieving TSDB status.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/tsdb/blocks:
+ get:
+ tags:
+ - status
+ summary: Get TSDB blocks information
+ operationId: status-tsdb-blocks
+ responses:
+ "200":
+ description: TSDB blocks information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusTSDBBlocksOutputBody'
+ examples:
+ tsdbBlocks:
+ summary: TSDB block information
+ value:
+ data:
+ blocks:
+ - compaction:
+ level: 4
+ sources:
+ - 01KBCJ7TR8A4QAJ3AA1J651P5S
+ - 01KBCS3J0E34567YPB8Y5W0E24
+ - 01KBCZZ9KRTYGG3E7HVQFGC3S3
+ maxTime: 1764763200000
+ minTime: 1764568801099
+ stats:
+ numChunks: 1073962
+ numSamples: 129505582
+ numSeries: 10661
+ ulid: 01KC4D6GXQA4CRHYKV78NEBVAE
+ version: 1
+ status: success
+ default:
+ description: Error retrieving TSDB blocks.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/walreplay:
+ get:
+ tags:
+ - status
+ summary: Get status walreplay
+ operationId: get-status-walreplay
+ responses:
+ "200":
+ description: WAL replay status retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusWALReplayOutputBody'
+ examples:
+ walReplay:
+ summary: WAL replay status
+ value:
+ data:
+ current: 3214
+ max: 3214
+ min: 3209
+ status: success
+ default:
+ description: Error retrieving WAL replay status.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /admin/tsdb/delete_series:
+ put:
+ tags:
+ - admin
+ summary: Delete series matching selectors via PUT
+ description: Deletes data for a selection of series in a time range using PUT method.
+ operationId: deleteSeriesPut
+ parameters:
+ - name: match[]
+ in: query
+ description: Series selectors to identify series to delete.
+ required: true
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{__name__=~"test.*"}'
+ - name: start
+ in: query
+ description: Start timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ responses:
+ "200":
+ description: Series deleted successfully via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/DeleteSeriesOutputBody'
+ examples:
+ deletionSuccess:
+ summary: Successful series deletion
+ value:
+ status: success
+ default:
+ description: Error deleting series via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - admin
+ summary: Delete series matching selectors
+ description: Deletes data for a selection of series in a time range.
+ operationId: deleteSeriesPost
+ parameters:
+ - name: match[]
+ in: query
+ description: Series selectors to identify series to delete.
+ required: true
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{__name__=~"test.*"}'
+ - name: start
+ in: query
+ description: Start timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ responses:
+ "200":
+ description: Series deleted successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/DeleteSeriesOutputBody'
+ examples:
+ deletionSuccess:
+ summary: Successful series deletion
+ value:
+ status: success
+ default:
+ description: Error deleting series.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /admin/tsdb/clean_tombstones:
+ put:
+ tags:
+ - admin
+ summary: Clean tombstones in the TSDB via PUT
+ description: Removes deleted data from disk and cleans up existing tombstones using PUT method.
+ operationId: cleanTombstonesPut
+ responses:
+ "200":
+ description: Tombstones cleaned successfully via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/CleanTombstonesOutputBody'
+ examples:
+ tombstonesCleaned:
+ summary: Tombstones cleaned successfully
+ value:
+ status: success
+ default:
+ description: Error cleaning tombstones via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - admin
+ summary: Clean tombstones in the TSDB
+ description: Removes deleted data from disk and cleans up existing tombstones.
+ operationId: cleanTombstonesPost
+ responses:
+ "200":
+ description: Tombstones cleaned successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/CleanTombstonesOutputBody'
+ examples:
+ tombstonesCleaned:
+ summary: Tombstones cleaned successfully
+ value:
+ status: success
+ default:
+ description: Error cleaning tombstones.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /admin/tsdb/snapshot:
+ put:
+ tags:
+ - admin
+ summary: Create a snapshot of the TSDB via PUT
+ description: Creates a snapshot of all current data using PUT method.
+ operationId: snapshotPut
+ parameters:
+ - name: skip_head
+ in: query
+ description: If true, do not snapshot data in the head block.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: "false"
+ responses:
+ "200":
+ description: Snapshot created successfully via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SnapshotOutputBody'
+ examples:
+ snapshotCreated:
+ summary: Snapshot created successfully
+ value:
+ data:
+ name: 20260102T133700Z-a1b2c3d4e5f67890
+ status: success
+ default:
+ description: Error creating snapshot via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - admin
+ summary: Create a snapshot of the TSDB
+ description: Creates a snapshot of all current data.
+ operationId: snapshotPost
+ parameters:
+ - name: skip_head
+ in: query
+ description: If true, do not snapshot data in the head block.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: "false"
+ responses:
+ "200":
+ description: Snapshot created successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SnapshotOutputBody'
+ examples:
+ snapshotCreated:
+ summary: Snapshot created successfully
+ value:
+ data:
+ name: 20260102T133700Z-a1b2c3d4e5f67890
+ status: success
+ default:
+ description: Error creating snapshot.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /read:
+ post:
+ tags:
+ - remote
+ summary: Remote read endpoint
+ description: Prometheus remote read endpoint for federated queries. Accepts and returns Protocol Buffer encoded data.
+ operationId: remoteRead
+ responses:
+ "204":
+ description: No Content
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /write:
+ post:
+ tags:
+ - remote
+ summary: Remote write endpoint
+ description: Prometheus remote write endpoint for sending metrics. Accepts Protocol Buffer encoded write requests.
+ operationId: remoteWrite
+ responses:
+ "204":
+ description: No Content
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /otlp/v1/metrics:
+ post:
+ tags:
+ - otlp
+ summary: OTLP metrics write endpoint
+ description: OpenTelemetry Protocol metrics ingestion endpoint. Accepts OTLP/HTTP metrics in Protocol Buffer format.
+ operationId: otlpWrite
+ responses:
+ "204":
+ description: No Content
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /notifications:
+ get:
+ tags:
+ - notifications
+ summary: Get notifications
+ operationId: get-notifications
+ responses:
+ "200":
+ description: Notifications retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/NotificationsOutputBody'
+ examples:
+ notifications:
+ summary: Server notifications
+ value:
+ data:
+ - active: true
+ date: "2026-01-02T16:14:50.046Z"
+ text: Configuration reload has failed.
+ status: success
+ default:
+ description: Error retrieving notifications.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /features:
+ get:
+ tags:
+ - features
+ summary: Get features
+ operationId: get-features
+ responses:
+ "200":
+ description: Feature flags retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/FeaturesOutputBody'
+ examples:
+ enabledFeatures:
+ summary: Enabled feature flags
+ value:
+ data:
+ - exemplar-storage
+ - remote-write-receiver
+ status: success
+ default:
+ description: Error retrieving features.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+components:
+ schemas:
+ Error:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ errorType:
+ type: string
+ description: Type of error that occurred.
+ example: bad_data
+ error:
+ type: string
+ description: Human-readable error message.
+ example: invalid parameter
+ required:
+ - status
+ - errorType
+ - error
+ additionalProperties: false
+ description: Error response.
+ Labels:
+ type: object
+ additionalProperties: true
+ description: Label set represented as a key-value map.
+ QueryOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/QueryData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for instant query.
+ QueryRangeOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/QueryData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for range query.
+ QueryPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The PromQL query to execute.'
+ example: up
+ time:
+ type: string
+ description: 'Form field: The evaluation timestamp (optional, defaults to current time).'
+ example: "2023-07-21T20:10:51.781Z"
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of metrics to return.'
+ example: 100
+ timeout:
+ type: string
+ description: 'Form field: Evaluation timeout (optional, defaults to and is capped by the value of the -query.timeout flag).'
+ example: 30s
+ lookback_delta:
+ type: string
+ description: 'Form field: Override the lookback period for this query (optional).'
+ example: 5m
+ stats:
+ type: string
+ description: 'Form field: When provided, include query statistics in the response (the special value ''all'' enables more comprehensive statistics).'
+ example: all
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for instant query.
+ QueryRangePostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to execute.'
+ example: rate(http_requests_total[5m])
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:10:30.781Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T20:20:30.781Z"
+ step:
+ type: string
+ description: 'Form field: The step size of the query.'
+ example: 15s
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of metrics to return.'
+ example: 100
+ timeout:
+ type: string
+ description: 'Form field: Evaluation timeout (optional, defaults to and is capped by the value of the -query.timeout flag).'
+ example: 30s
+ lookback_delta:
+ type: string
+ description: 'Form field: Override the lookback period for this query (optional).'
+ example: 5m
+ stats:
+ type: string
+ description: 'Form field: When provided, include query statistics in the response (the special value ''all'' enables more comprehensive statistics).'
+ example: all
+ required:
+ - query
+ - start
+ - end
+ - step
+ additionalProperties: false
+ description: POST request body for range query.
+ QueryExemplarsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+ QueryExemplarsPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to execute.'
+ example: http_requests_total
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:00:00.000Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T21:00:00.000Z"
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for exemplars query.
+ FormatQueryOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: string
+ description: Formatted query string.
+ example: sum by(status) (rate(http_requests_total[5m]))
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for format query endpoint.
+ FormatQueryPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to format.'
+ example: sum(rate(http_requests_total[5m])) by (status)
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for format query.
+ ParseQueryOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+ ParseQueryPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to parse.'
+ example: sum(rate(http_requests_total[5m]))
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for parse query.
+ QueryData:
+ anyOf:
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - vector
+ result:
+ type: array
+ items:
+ anyOf:
+ - $ref: '#/components/schemas/FloatSample'
+ - $ref: '#/components/schemas/HistogramSample'
+ description: Array of samples (either float or histogram).
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - matrix
+ result:
+ type: array
+ items:
+ anyOf:
+ - $ref: '#/components/schemas/FloatSeries'
+ - $ref: '#/components/schemas/HistogramSeries'
+ description: Array of time series (either float or histogram).
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - scalar
+ result:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ maxItems: 2
+ minItems: 2
+ description: Scalar value as [timestamp, stringValue].
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - string
+ result:
+ type: array
+ items:
+ type: string
+ maxItems: 2
+ minItems: 2
+ description: String value as [timestamp, stringValue].
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ description: Query result data. The structure of 'result' depends on 'resultType'.
+ example:
+ result:
+ - metric:
+ __name__: up
+ job: prometheus
+ value:
+ - 1627845600
+ - "1"
+ resultType: vector
+ FloatSample:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ value:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ maxItems: 2
+ minItems: 2
+ description: Timestamp and float value as [unixTimestamp, stringValue].
+ example:
+ - 1767436620
+ - "1"
+ required:
+ - metric
+ - value
+ additionalProperties: false
+ description: A sample with a float value.
+ HistogramSample:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ histogram:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - $ref: '#/components/schemas/HistogramValue'
+ maxItems: 2
+ minItems: 2
+ description: Timestamp and histogram value as [unixTimestamp, histogramObject].
+ example:
+ - 1767436620
+ - buckets: []
+ count: "60"
+ sum: "120"
+ required:
+ - metric
+ - histogram
+ additionalProperties: false
+ description: A sample with a native histogram value.
+ FloatSeries:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ values:
+ type: array
+ items:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ maxItems: 2
+ minItems: 2
+ description: Array of [timestamp, stringValue] pairs for float values.
+ required:
+ - metric
+ - values
+ additionalProperties: false
+ description: A time series with float values.
+ HistogramSeries:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ histograms:
+ type: array
+ items:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - $ref: '#/components/schemas/HistogramValue'
+ maxItems: 2
+ minItems: 2
+ description: Array of [timestamp, histogramObject] pairs for histogram values.
+ required:
+ - metric
+ - histograms
+ additionalProperties: false
+ description: A time series with native histogram values.
+ HistogramValue:
+ type: object
+ properties:
+ count:
+ type: string
+ description: Total count of observations.
+ sum:
+ type: string
+ description: Sum of all observed values.
+ buckets:
+ type: array
+ items:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ description: Histogram buckets as [boundary_rule, lower, upper, count].
+ required:
+ - count
+ - sum
+ additionalProperties: false
+ description: Native histogram value representation.
+ LabelsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ type: string
+ example:
+ - __name__
+ - job
+ - instance
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of strings.
+ LabelsPostInputBody:
+ type: object
+ properties:
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:00:00.000Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T21:00:00.000Z"
+ match[]:
+ type: array
+ items:
+ type: string
+ description: 'Form field: Series selector argument that selects the series from which to read the label names.'
+ example:
+ - '{job="prometheus"}'
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of label names to return.'
+ example: 100
+ additionalProperties: false
+ description: POST request body for labels query.
+ LabelValuesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ type: string
+ example:
+ - __name__
+ - job
+ - instance
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of strings.
+ SeriesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/Labels'
+ example:
+ - __name__: up
+ instance: localhost:9090
+ job: prometheus
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of label sets.
+ SeriesPostInputBody:
+ type: object
+ properties:
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:00:00.000Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T21:00:00.000Z"
+ match[]:
+ type: array
+ items:
+ type: string
+ description: 'Form field: Series selector argument that selects the series to return.'
+ example:
+ - '{job="prometheus"}'
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of series to return.'
+ example: 100
+ required:
+ - match[]
+ additionalProperties: false
+ description: POST request body for series query.
+ SeriesDeleteOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+ Metadata:
+ type: object
+ properties:
+ type:
+ type: string
+ description: Metric type (counter, gauge, histogram, summary, or untyped).
+ unit:
+ type: string
+ description: Unit of the metric.
+ help:
+ type: string
+ description: Help text describing the metric.
+ required:
+ - type
+ - unit
+ - help
+ additionalProperties: false
+ description: Metric metadata.
+ MetadataOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: object
+ additionalProperties:
+ type: array
+ items:
+ $ref: '#/components/schemas/Metadata'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for metadata endpoint.
+ MetricMetadata:
+ type: object
+ properties:
+ target:
+ $ref: '#/components/schemas/Labels'
+ metric:
+ type: string
+ description: Metric name.
+ type:
+ type: string
+ description: Metric type (counter, gauge, histogram, summary, or untyped).
+ help:
+ type: string
+ description: Help text describing the metric.
+ unit:
+ type: string
+ description: Unit of the metric.
+ required:
+ - target
+ - type
+ - help
+ - unit
+ additionalProperties: false
+ description: Target metric metadata.
+ Target:
+ type: object
+ properties:
+ discoveredLabels:
+ $ref: '#/components/schemas/Labels'
+ labels:
+ $ref: '#/components/schemas/Labels'
+ scrapePool:
+ type: string
+ description: Name of the scrape pool.
+ scrapeUrl:
+ type: string
+ description: URL of the target.
+ globalUrl:
+ type: string
+ description: Global URL of the target.
+ lastError:
+ type: string
+ description: Last error message from scraping.
+ lastScrape:
+ type: string
+ format: date-time
+ description: Timestamp of the last scrape.
+ lastScrapeDuration:
+ type: number
+ format: double
+ description: Duration of the last scrape in seconds.
+ health:
+ type: string
+ description: Health status of the target (up, down, or unknown).
+ scrapeInterval:
+ type: string
+ description: Scrape interval for this target.
+ scrapeTimeout:
+ type: string
+ description: Scrape timeout for this target.
+ required:
+ - discoveredLabels
+ - labels
+ - scrapePool
+ - scrapeUrl
+ - globalUrl
+ - lastError
+ - lastScrape
+ - lastScrapeDuration
+ - health
+ - scrapeInterval
+ - scrapeTimeout
+ additionalProperties: false
+ description: Scrape target information.
+ DroppedTarget:
+ type: object
+ properties:
+ discoveredLabels:
+ $ref: '#/components/schemas/Labels'
+ scrapePool:
+ type: string
+ description: Name of the scrape pool.
+ required:
+ - discoveredLabels
+ - scrapePool
+ additionalProperties: false
+ description: Dropped target information.
+ TargetDiscovery:
+ type: object
+ properties:
+ activeTargets:
+ type: array
+ items:
+ $ref: '#/components/schemas/Target'
+ droppedTargets:
+ type: array
+ items:
+ $ref: '#/components/schemas/DroppedTarget'
+ droppedTargetCounts:
+ type: object
+ additionalProperties:
+ type: integer
+ format: int64
+ required:
+ - activeTargets
+ - droppedTargets
+ - droppedTargetCounts
+ additionalProperties: false
+ description: Target discovery information including active and dropped targets.
+ TargetsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/TargetDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for targets endpoint.
+ TargetMetadataOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/MetricMetadata'
+ example:
+ - help: The current health status of the target
+ metric: up
+ target:
+ instance: localhost:9090
+ job: prometheus
+ type: gauge
+ unit: ""
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of metric metadata.
+ ScrapePoolsDiscovery:
+ type: object
+ properties:
+ scrapePools:
+ type: array
+ items:
+ type: string
+ required:
+ - scrapePools
+ additionalProperties: false
+ description: List of all configured scrape pools.
+ ScrapePoolsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/ScrapePoolsDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for scrape pools endpoint.
+ Config:
+ type: object
+ properties:
+ source_labels:
+ type: array
+ items:
+ type: string
+ description: Source labels for relabeling.
+ separator:
+ type: string
+ description: Separator for source label values.
+ regex:
+ type: string
+ description: Regular expression for matching.
+ modulus:
+ type: integer
+ format: int64
+ description: Modulus for hash-based relabeling.
+ target_label:
+ type: string
+ description: Target label name.
+ replacement:
+ type: string
+ description: Replacement value.
+ action:
+ type: string
+ description: Relabel action.
+ additionalProperties: false
+ description: Relabel configuration.
+ RelabelStep:
+ type: object
+ properties:
+ rule:
+ $ref: '#/components/schemas/Config'
+ output:
+ $ref: '#/components/schemas/Labels'
+ keep:
+ type: boolean
+ required:
+ - rule
+ - output
+ - keep
+ additionalProperties: false
+ description: Relabel step showing the rule, output, and whether the target was kept.
+ RelabelStepsResponse:
+ type: object
+ properties:
+ steps:
+ type: array
+ items:
+ $ref: '#/components/schemas/RelabelStep'
+ required:
+ - steps
+ additionalProperties: false
+ description: Relabeling steps response.
+ TargetRelabelStepsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/RelabelStepsResponse'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for target relabel steps endpoint.
+ RuleGroup:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name of the rule group.
+ file:
+ type: string
+ description: File containing the rule group.
+ rules:
+ type: array
+ items:
+ type: object
+ description: Rule definition.
+ description: Rules in this group.
+ interval:
+ type: number
+ format: double
+ description: Evaluation interval in seconds.
+ limit:
+ type: integer
+ format: int64
+ description: Maximum number of alerts for this group.
+ evaluationTime:
+ type: number
+ format: double
+ description: Time taken to evaluate the group in seconds.
+ lastEvaluation:
+ type: string
+ format: date-time
+ description: Timestamp of the last evaluation.
+ required:
+ - name
+ - file
+ - rules
+ - interval
+ - limit
+ - evaluationTime
+ - lastEvaluation
+ additionalProperties: false
+ description: Rule group information.
+ RuleDiscovery:
+ type: object
+ properties:
+ groups:
+ type: array
+ items:
+ $ref: '#/components/schemas/RuleGroup'
+ groupNextToken:
+ type: string
+ description: Pagination token for the next page of groups.
+ required:
+ - groups
+ additionalProperties: false
+ description: Rule discovery information containing all rule groups.
+ RulesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/RuleDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for rules endpoint.
+ Alert:
+ type: object
+ properties:
+ labels:
+ $ref: '#/components/schemas/Labels'
+ annotations:
+ $ref: '#/components/schemas/Labels'
+ state:
+ type: string
+ description: State of the alert (pending, firing, or inactive).
+ value:
+ type: string
+ description: Value of the alert expression.
+ activeAt:
+ type: string
+ format: date-time
+ description: Timestamp when the alert became active.
+ keepFiringSince:
+ type: string
+ format: date-time
+ description: Timestamp since the alert has been kept firing.
+ required:
+ - labels
+ - annotations
+ - state
+ - value
+ additionalProperties: false
+ description: Alert information.
+ AlertDiscovery:
+ type: object
+ properties:
+ alerts:
+ type: array
+ items:
+ $ref: '#/components/schemas/Alert'
+ required:
+ - alerts
+ additionalProperties: false
+ description: Alert discovery information containing all active alerts.
+ AlertsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/AlertDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for alerts endpoint.
+ AlertmanagerTarget:
+ type: object
+ properties:
+ url:
+ type: string
+ description: URL of the Alertmanager instance.
+ required:
+ - url
+ additionalProperties: false
+ description: Alertmanager target information.
+ AlertmanagerDiscovery:
+ type: object
+ properties:
+ activeAlertmanagers:
+ type: array
+ items:
+ $ref: '#/components/schemas/AlertmanagerTarget'
+ droppedAlertmanagers:
+ type: array
+ items:
+ $ref: '#/components/schemas/AlertmanagerTarget'
+ required:
+ - activeAlertmanagers
+ - droppedAlertmanagers
+ additionalProperties: false
+ description: Alertmanager discovery information including active and dropped instances.
+ AlertmanagersOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/AlertmanagerDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for alertmanagers endpoint.
+ StatusConfigData:
+ type: object
+ properties:
+ yaml:
+ type: string
+ description: Prometheus configuration in YAML format.
+ required:
+ - yaml
+ additionalProperties: false
+ description: Prometheus configuration.
+ StatusConfigOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/StatusConfigData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status config endpoint.
+ RuntimeInfo:
+ type: object
+ properties:
+ startTime:
+ type: string
+ format: date-time
+ CWD:
+ type: string
+ hostname:
+ type: string
+ serverTime:
+ type: string
+ format: date-time
+ reloadConfigSuccess:
+ type: boolean
+ lastConfigTime:
+ type: string
+ format: date-time
+ corruptionCount:
+ type: integer
+ format: int64
+ goroutineCount:
+ type: integer
+ format: int64
+ GOMAXPROCS:
+ type: integer
+ format: int64
+ GOMEMLIMIT:
+ type: integer
+ format: int64
+ GOGC:
+ type: string
+ GODEBUG:
+ type: string
+ storageRetention:
+ type: string
+ required:
+ - startTime
+ - CWD
+ - hostname
+ - serverTime
+ - reloadConfigSuccess
+ - lastConfigTime
+ - corruptionCount
+ - goroutineCount
+ - GOMAXPROCS
+ - GOMEMLIMIT
+ - GOGC
+ - GODEBUG
+ - storageRetention
+ additionalProperties: false
+ description: Prometheus runtime information.
+ StatusRuntimeInfoOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/RuntimeInfo'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status runtime info endpoint.
+ PrometheusVersion:
+ type: object
+ properties:
+ version:
+ type: string
+ revision:
+ type: string
+ branch:
+ type: string
+ buildUser:
+ type: string
+ buildDate:
+ type: string
+ goVersion:
+ type: string
+ required:
+ - version
+ - revision
+ - branch
+ - buildUser
+ - buildDate
+ - goVersion
+ additionalProperties: false
+ description: Prometheus version information.
+ StatusBuildInfoOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/PrometheusVersion'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status build info endpoint.
+ StatusFlagsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: object
+ additionalProperties:
+ type: string
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status flags endpoint.
+ HeadStats:
+ type: object
+ properties:
+ numSeries:
+ type: integer
+ format: int64
+ numLabelPairs:
+ type: integer
+ format: int64
+ chunkCount:
+ type: integer
+ format: int64
+ minTime:
+ type: integer
+ format: int64
+ maxTime:
+ type: integer
+ format: int64
+ required:
+ - numSeries
+ - numLabelPairs
+ - chunkCount
+ - minTime
+ - maxTime
+ additionalProperties: false
+ description: TSDB head statistics.
+ TSDBStat:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: integer
+ format: int64
+ required:
+ - name
+ - value
+ additionalProperties: false
+ description: TSDB statistic.
+ TSDBStatus:
+ type: object
+ properties:
+ headStats:
+ $ref: '#/components/schemas/HeadStats'
+ seriesCountByMetricName:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ labelValueCountByLabelName:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ memoryInBytesByLabelName:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ seriesCountByLabelValuePair:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ required:
+ - headStats
+ - seriesCountByMetricName
+ - labelValueCountByLabelName
+ - memoryInBytesByLabelName
+ - seriesCountByLabelValuePair
+ additionalProperties: false
+ description: TSDB status information.
+ StatusTSDBOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/TSDBStatus'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status TSDB endpoint.
+ BlockDesc:
+ type: object
+ properties:
+ ulid:
+ type: string
+ minTime:
+ type: integer
+ format: int64
+ maxTime:
+ type: integer
+ format: int64
+ required:
+ - ulid
+ - minTime
+ - maxTime
+ additionalProperties: false
+ description: Block descriptor.
+ BlockStats:
+ type: object
+ properties:
+ numSamples:
+ type: integer
+ format: int64
+ numSeries:
+ type: integer
+ format: int64
+ numChunks:
+ type: integer
+ format: int64
+ numTombstones:
+ type: integer
+ format: int64
+ numFloatSamples:
+ type: integer
+ format: int64
+ numHistogramSamples:
+ type: integer
+ format: int64
+ additionalProperties: false
+ description: Block statistics.
+ BlockMetaCompaction:
+ type: object
+ properties:
+ level:
+ type: integer
+ format: int64
+ sources:
+ type: array
+ items:
+ type: string
+ parents:
+ type: array
+ items:
+ $ref: '#/components/schemas/BlockDesc'
+ failed:
+ type: boolean
+ deletable:
+ type: boolean
+ hints:
+ type: array
+ items:
+ type: string
+ required:
+ - level
+ additionalProperties: false
+ description: Block compaction metadata.
+ BlockMeta:
+ type: object
+ properties:
+ ulid:
+ type: string
+ minTime:
+ type: integer
+ format: int64
+ maxTime:
+ type: integer
+ format: int64
+ stats:
+ $ref: '#/components/schemas/BlockStats'
+ compaction:
+ $ref: '#/components/schemas/BlockMetaCompaction'
+ version:
+ type: integer
+ format: int64
+ required:
+ - ulid
+ - minTime
+ - maxTime
+ - compaction
+ - version
+ additionalProperties: false
+ description: Block metadata.
+ StatusTSDBBlocksData:
+ type: object
+ properties:
+ blocks:
+ type: array
+ items:
+ $ref: '#/components/schemas/BlockMeta'
+ required:
+ - blocks
+ additionalProperties: false
+ description: TSDB blocks information.
+ StatusTSDBBlocksOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/StatusTSDBBlocksData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status TSDB blocks endpoint.
+ StatusWALReplayData:
+ type: object
+ properties:
+ min:
+ type: integer
+ format: int64
+ max:
+ type: integer
+ format: int64
+ current:
+ type: integer
+ format: int64
+ required:
+ - min
+ - max
+ - current
+ additionalProperties: false
+ description: WAL replay status.
+ StatusWALReplayOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/StatusWALReplayData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status WAL replay endpoint.
+ DeleteSeriesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ additionalProperties: false
+ description: Response body containing only status.
+ CleanTombstonesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ additionalProperties: false
+ description: Response body containing only status.
+ DataStruct:
+ type: object
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ additionalProperties: false
+ description: Generic data structure with a name field.
+ SnapshotOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/DataStruct'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for snapshot endpoint.
+ Notification:
+ type: object
+ properties:
+ text:
+ type: string
+ date:
+ type: string
+ format: date-time
+ active:
+ type: boolean
+ required:
+ - text
+ - date
+ - active
+ additionalProperties: false
+ description: Server notification.
+ NotificationsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/Notification'
+ example:
+ - active: true
+ date: "2023-07-21T20:00:00.000Z"
+ text: Server is running
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of notifications.
+ FeaturesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+tags:
+ - name: query
+ description: Query and evaluate PromQL expressions.
+ - name: metadata
+ description: Retrieve metric metadata such as type and unit.
+ - name: labels
+ description: Query label names and values.
+ - name: series
+ description: Query and manage time series.
+ - name: targets
+ description: Retrieve target and scrape pool information.
+ - name: rules
+ description: Query recording and alerting rules.
+ - name: alerts
+ description: Query active alerts and alertmanager discovery.
+ - name: status
+ description: Retrieve server status and configuration.
+ - name: admin
+ description: Administrative operations for TSDB management.
+ - name: features
+ description: Query enabled features.
+ - name: remote
+ description: Remote read and write endpoints.
+ - name: otlp
+ description: OpenTelemetry Protocol metrics ingestion.
+ - name: notifications
+ description: Server notifications and events.
diff --git a/web/api/v1/testdata/openapi_3.2_golden.yaml b/web/api/v1/testdata/openapi_3.2_golden.yaml
new file mode 100644
index 0000000000..f122408013
--- /dev/null
+++ b/web/api/v1/testdata/openapi_3.2_golden.yaml
@@ -0,0 +1,4452 @@
+openapi: 3.2.0
+info:
+ title: Prometheus API
+ description: Prometheus is an Open-Source monitoring system with a dimensional data model, flexible query language, efficient time series database and modern alerting approach.
+ contact:
+ name: Prometheus Community
+ url: https://prometheus.io/community/
+ version: 0.0.1-undefined
+servers:
+ - url: /api/v1
+paths:
+ /query:
+ get:
+ tags:
+ - query
+ summary: Evaluate an instant query
+ operationId: query
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of metrics to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: time
+ in: query
+ description: The evaluation timestamp (optional, defaults to current time).
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: query
+ in: query
+ description: The PromQL query to execute.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: up
+ - name: timeout
+ in: query
+ description: Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 30s
+ - name: lookback_delta
+ in: query
+ description: Override the lookback period for this query. Optional.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 5m
+ - name: stats
+ in: query
+ description: When provided, include query statistics in the response. The special value 'all' enables more comprehensive statistics.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: all
+ responses:
+ "200":
+ description: Query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryOutputBody'
+ examples:
+ vectorResult:
+ summary: 'Instant vector query: up'
+ value: {"status": "success", "data": {"resultType": "vector", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "value": [1767436620, "1"]}, {"metric": {"__name__": "up", "env": "demo", "instance": "demo.prometheus.io:9093", "job": "alertmanager"}, "value": [1767436620, "1"]}]}}
+ scalarResult:
+ summary: 'Scalar query: scalar(42)'
+ value:
+ data:
+ result:
+ - 1767436620
+ - "42"
+ resultType: scalar
+ status: success
+ matrixResult:
+ summary: 'Range vector query: up[5m]'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767436320, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Evaluate an instant query
+ operationId: query-post
+ requestBody:
+ description: Submit an instant query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/QueryPostInputBody'
+ examples:
+ simpleQuery:
+ summary: Simple instant query
+ value:
+ query: up
+ queryWithTime:
+ summary: Query with specific timestamp
+ value:
+ query: up{job="prometheus"}
+ time: "2026-01-02T13:37:00.000Z"
+ queryWithLimit:
+ summary: Query with limit and statistics
+ value:
+ limit: 100
+ query: rate(prometheus_http_requests_total{handler="/api/v1/query"}[5m])
+ stats: all
+ required: true
+ responses:
+ "200":
+ description: Instant query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryOutputBody'
+ examples:
+ vectorResult:
+ summary: 'Instant vector query: up'
+ value: {"status": "success", "data": {"resultType": "vector", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "value": [1767436620, "1"]}, {"metric": {"__name__": "up", "env": "demo", "instance": "demo.prometheus.io:9093", "job": "alertmanager"}, "value": [1767436620, "1"]}]}}
+ scalarResult:
+ summary: 'Scalar query: scalar(42)'
+ value:
+ data:
+ result:
+ - 1767436620
+ - "42"
+ resultType: scalar
+ status: success
+ matrixResult:
+ summary: 'Range vector query: up[5m]'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767436320, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing instant query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /query_range:
+ get:
+ tags:
+ - query
+ summary: Evaluate a range query
+ operationId: query-range
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of metrics to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: start
+ in: query
+ description: The start time of the query.
+ required: true
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: The end time of the query.
+ required: true
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: step
+ in: query
+ description: The step size of the query.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 15s
+ - name: query
+ in: query
+ description: The query to execute.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: rate(prometheus_http_requests_total{handler="/api/v1/query"}[5m])
+ - name: timeout
+ in: query
+ description: Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 30s
+ - name: lookback_delta
+ in: query
+ description: Override the lookback period for this query. Optional.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 5m
+ - name: stats
+ in: query
+ description: When provided, include query statistics in the response. The special value 'all' enables more comprehensive statistics.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: all
+ responses:
+ "200":
+ description: Range query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryRangeOutputBody'
+ examples:
+ matrixResult:
+ summary: 'Range query: rate(prometheus_http_requests_total[5m])'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767433020, "1"], [1767434820, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing range query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Evaluate a range query
+ operationId: query-range-post
+ requestBody:
+ description: Submit a range query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/QueryRangePostInputBody'
+ examples:
+ basicRange:
+ summary: Basic range query
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ query: up
+ start: "2026-01-02T12:37:00.000Z"
+ step: 15s
+ rateQuery:
+ summary: Rate calculation over time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ query: rate(prometheus_http_requests_total{handler="/api/v1/query"}[5m])
+ start: "2026-01-02T12:37:00.000Z"
+ step: 30s
+ timeout: 30s
+ required: true
+ responses:
+ "200":
+ description: Range query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryRangeOutputBody'
+ examples:
+ matrixResult:
+ summary: 'Range query: rate(prometheus_http_requests_total[5m])'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767433020, "1"], [1767434820, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing range query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /query_exemplars:
+ get:
+ tags:
+ - query
+ summary: Query exemplars
+ operationId: query-exemplars
+ parameters:
+ - name: start
+ in: query
+ description: Start timestamp for exemplars query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for exemplars query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: query
+ in: query
+ description: PromQL query to extract exemplars for.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: prometheus_http_requests_total
+ responses:
+ "200":
+ description: Exemplars retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryExemplarsOutputBody'
+ examples:
+ exemplarsResult:
+ summary: Exemplars for a metric with trace IDs
+ value:
+ data:
+ - exemplars:
+ - labels:
+ traceID: abc123def456
+ timestamp: 1.689956451781e+09
+ value: "1.5"
+ seriesLabels:
+ __name__: http_requests_total
+ job: api-server
+ method: GET
+ status: success
+ default:
+ description: Error retrieving exemplars.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Query exemplars
+ operationId: query-exemplars-post
+ requestBody:
+ description: Submit an exemplars query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/QueryExemplarsPostInputBody'
+ examples:
+ basicExemplar:
+ summary: Query exemplars for a metric
+ value:
+ query: prometheus_http_requests_total
+ exemplarWithTimeRange:
+ summary: Exemplars within specific time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ query: prometheus_http_requests_total{job="prometheus"}
+ start: "2026-01-02T12:37:00.000Z"
+ required: true
+ responses:
+ "200":
+ description: Exemplars query completed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryExemplarsOutputBody'
+ examples:
+ exemplarsResult:
+ summary: Exemplars for a metric with trace IDs
+ value:
+ data:
+ - exemplars:
+ - labels:
+ traceID: abc123def456
+ timestamp: 1.689956451781e+09
+ value: "1.5"
+ seriesLabels:
+ __name__: http_requests_total
+ job: api-server
+ method: GET
+ status: success
+ default:
+ description: Error processing exemplars query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /format_query:
+ get:
+ tags:
+ - query
+ summary: Format a PromQL query
+ operationId: format-query
+ parameters:
+ - name: query
+ in: query
+ description: PromQL expression to format.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: sum(rate(http_requests_total[5m])) by (job)
+ responses:
+ "200":
+ description: Query formatted successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/FormatQueryOutputBody'
+ examples:
+ formattedQuery:
+ summary: Formatted PromQL query
+ value:
+ data: sum by(job, status) (rate(http_requests_total[5m]))
+ status: success
+ default:
+ description: Error formatting query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Format a PromQL query
+ operationId: format-query-post
+ requestBody:
+ description: Submit a PromQL query to format. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/FormatQueryPostInputBody'
+ examples:
+ simpleFormat:
+ summary: Format a simple query
+ value:
+ query: up{job="prometheus"}
+ complexFormat:
+ summary: Format a complex query
+ value:
+ query: sum(rate(http_requests_total[5m])) by (job, status)
+ required: true
+ responses:
+ "200":
+ description: Query formatting completed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/FormatQueryOutputBody'
+ examples:
+ formattedQuery:
+ summary: Formatted PromQL query
+ value:
+ data: sum by(job, status) (rate(http_requests_total[5m]))
+ status: success
+ default:
+ description: Error formatting query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /parse_query:
+ get:
+ tags:
+ - query
+ summary: Parse a PromQL query
+ operationId: parse-query
+ parameters:
+ - name: query
+ in: query
+ description: PromQL expression to parse.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: up{job="prometheus"}
+ responses:
+ "200":
+ description: Query parsed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ParseQueryOutputBody'
+ examples:
+ parsedQuery:
+ summary: Parsed PromQL expression tree
+ value:
+ data:
+ resultType: vector
+ status: success
+ default:
+ description: Error parsing query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Parse a PromQL query
+ operationId: parse-query-post
+ requestBody:
+ description: Submit a PromQL query to parse. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/ParseQueryPostInputBody'
+ examples:
+ simpleParse:
+ summary: Parse a simple query
+ value:
+ query: up
+ complexParse:
+ summary: Parse a complex query
+ value:
+ query: rate(http_requests_total{job="api"}[5m])
+ required: true
+ responses:
+ "200":
+ description: Query parsed successfully via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ParseQueryOutputBody'
+ examples:
+ parsedQuery:
+ summary: Parsed PromQL expression tree
+ value:
+ data:
+ resultType: vector
+ status: success
+ default:
+ description: Error parsing query via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /labels:
+ get:
+ tags:
+ - labels
+ summary: Get label names
+ operationId: labels
+ parameters:
+ - name: start
+ in: query
+ description: Start timestamp for label names query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for label names query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: match[]
+ in: query
+ description: Series selector argument.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{job="prometheus"}'
+ - name: limit
+ in: query
+ description: Maximum number of label names to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ responses:
+ "200":
+ description: Label names retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/LabelsOutputBody'
+ examples:
+ labelNames:
+ summary: List of label names
+ value:
+ data:
+ - __name__
+ - active
+ - address
+ - alertmanager
+ - alertname
+ - alertstate
+ - backend
+ - branch
+ - code
+ - collector
+ - component
+ - device
+ - env
+ - endpoint
+ - fstype
+ - handler
+ - instance
+ - job
+ - le
+ - method
+ - mode
+ - name
+ status: success
+ default:
+ description: Error retrieving label names.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - labels
+ summary: Get label names
+ operationId: labels-post
+ requestBody:
+ description: Submit a label names query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/LabelsPostInputBody'
+ examples:
+ allLabels:
+ summary: Get all label names
+ value: {}
+ labelsWithTimeRange:
+ summary: Get label names within time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ start: "2026-01-02T12:37:00.000Z"
+ labelsWithMatch:
+ summary: Get label names matching series selector
+ value:
+ match[]:
+ - up
+ - process_start_time_seconds{job="prometheus"}
+ required: true
+ responses:
+ "200":
+ description: Label names retrieved successfully via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/LabelsOutputBody'
+ examples:
+ labelNames:
+ summary: List of label names
+ value:
+ data:
+ - __name__
+ - active
+ - address
+ - alertmanager
+ - alertname
+ - alertstate
+ - backend
+ - branch
+ - code
+ - collector
+ - component
+ - device
+ - env
+ - endpoint
+ - fstype
+ - handler
+ - instance
+ - job
+ - le
+ - method
+ - mode
+ - name
+ status: success
+ default:
+ description: Error retrieving label names via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /label/{name}/values:
+ get:
+ tags:
+ - labels
+ summary: Get label values
+ operationId: label-values
+ parameters:
+ - name: name
+ in: path
+ description: Label name.
+ required: true
+ schema:
+ type: string
+ - name: start
+ in: query
+ description: Start timestamp for label values query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for label values query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: match[]
+ in: query
+ description: Series selector argument.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{job="prometheus"}'
+ - name: limit
+ in: query
+ description: Maximum number of label values to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 1000
+ responses:
+ "200":
+ description: Label values retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/LabelValuesOutputBody'
+ examples:
+ labelValues:
+ summary: List of values for a label
+ value:
+ data:
+ - alertmanager
+ - blackbox
+ - caddy
+ - cadvisor
+ - grafana
+ - node
+ - prometheus
+ - random
+ status: success
+ default:
+ description: Error retrieving label values.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /series:
+ get:
+ tags:
+ - series
+ summary: Find series by label matchers
+ operationId: series
+ parameters:
+ - name: start
+ in: query
+ description: Start timestamp for series query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for series query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: match[]
+ in: query
+ description: Series selector argument.
+ required: true
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{job="prometheus"}'
+ - name: limit
+ in: query
+ description: Maximum number of series to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ responses:
+ "200":
+ description: Series returned matching the provided label matchers.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SeriesOutputBody'
+ examples:
+ seriesList:
+ summary: List of series matching the selector
+ value:
+ data:
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:8080
+ job: cadvisor
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9093
+ job: alertmanager
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9100
+ job: node
+ - __name__: up
+ instance: demo.prometheus.io:3000
+ job: grafana
+ - __name__: up
+ instance: demo.prometheus.io:8996
+ job: random
+ status: success
+ default:
+ description: Error retrieving series.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - series
+ summary: Find series by label matchers
+ operationId: series-post
+ requestBody:
+ description: Submit a series query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/SeriesPostInputBody'
+ examples:
+ seriesMatch:
+ summary: Find series by label matchers
+ value:
+ match[]:
+ - up
+ seriesWithTimeRange:
+ summary: Find series with time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ match[]:
+ - up
+ - process_cpu_seconds_total{job="prometheus"}
+ start: "2026-01-02T12:37:00.000Z"
+ required: true
+ responses:
+ "200":
+ description: Series returned matching the provided label matchers via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SeriesOutputBody'
+ examples:
+ seriesList:
+ summary: List of series matching the selector
+ value:
+ data:
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:8080
+ job: cadvisor
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9093
+ job: alertmanager
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9100
+ job: node
+ - __name__: up
+ instance: demo.prometheus.io:3000
+ job: grafana
+ - __name__: up
+ instance: demo.prometheus.io:8996
+ job: random
+ status: success
+ default:
+ description: Error retrieving series via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ delete:
+ tags:
+ - series
+ summary: Delete series
+ description: 'Delete series matching selectors. Note: This is deprecated, use POST /admin/tsdb/delete_series instead.'
+ operationId: delete-series
+ responses:
+ "200":
+ description: Series marked for deletion.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SeriesDeleteOutputBody'
+ examples:
+ seriesDeleted:
+ summary: Series marked for deletion
+ value:
+ status: success
+ default:
+ description: Error deleting series.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /metadata:
+ get:
+ tags:
+ - metadata
+ summary: Get metadata
+ operationId: get-metadata
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of metrics to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: limit_per_metric
+ in: query
+ description: The maximum number of metadata entries per metric.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 10
+ - name: metric
+ in: query
+ description: A metric name to filter metadata for.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: http_requests_total
+ responses:
+ "200":
+ description: Metric metadata retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/MetadataOutputBody'
+ examples:
+ metricMetadata:
+ summary: Metadata for metrics
+ value:
+ data:
+ go_gc_stack_starting_size_bytes:
+ - help: The stack size of new goroutines. Sourced from /gc/stack/starting-size:bytes.
+ type: gauge
+ unit: ""
+ prometheus_rule_group_iterations_missed_total:
+ - help: The total number of rule group evaluations missed due to slow rule group evaluation.
+ type: counter
+ unit: ""
+ prometheus_sd_updates_total:
+ - help: Total number of update events sent to the SD consumers.
+ type: counter
+ unit: ""
+ status: success
+ default:
+ description: Error retrieving metadata.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /scrape_pools:
+ get:
+ tags:
+ - targets
+ summary: Get scrape pools
+ operationId: get-scrape-pools
+ responses:
+ "200":
+ description: Scrape pools retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ScrapePoolsOutputBody'
+ examples:
+ scrapePoolsList:
+ summary: List of scrape pool names
+ value:
+ data:
+ scrapePools:
+ - alertmanager
+ - blackbox
+ - caddy
+ - cadvisor
+ - grafana
+ - node
+ - prometheus
+ - random
+ status: success
+ default:
+ description: Error retrieving scrape pools.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /targets:
+ get:
+ tags:
+ - targets
+ summary: Get targets
+ operationId: get-targets
+ parameters:
+ - name: scrapePool
+ in: query
+ description: Filter targets by scrape pool name.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: prometheus
+ - name: state
+ in: query
+ description: 'Filter by state: active, dropped, or any.'
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: active
+ responses:
+ "200":
+ description: Target discovery information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/TargetsOutputBody'
+ examples:
+ targetsList:
+ summary: Active and dropped targets
+ value:
+ data:
+ activeTargets:
+ - discoveredLabels:
+ __address__: demo.prometheus.io:9093
+ __meta_filepath: /etc/prometheus/file_sd/alertmanager.yml
+ __metrics_path__: /metrics
+ __scheme__: http
+ env: demo
+ job: alertmanager
+ globalUrl: http://demo.prometheus.io:9093/metrics
+ health: up
+ labels:
+ env: demo
+ instance: demo.prometheus.io:9093
+ job: alertmanager
+ lastError: ""
+ lastScrape: "2026-01-02T13:36:40.200Z"
+ lastScrapeDuration: 0.006576866
+ scrapeInterval: 15s
+ scrapePool: alertmanager
+ scrapeTimeout: 10s
+ scrapeUrl: http://demo.prometheus.io:9093/metrics
+ droppedTargetCounts:
+ alertmanager: 0
+ blackbox: 0
+ caddy: 0
+ cadvisor: 0
+ grafana: 0
+ node: 0
+ prometheus: 0
+ random: 0
+ droppedTargets: []
+ status: success
+ default:
+ description: Error retrieving targets.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /targets/metadata:
+ get:
+ tags:
+ - targets
+ summary: Get targets metadata
+ operationId: get-targets-metadata
+ parameters:
+ - name: match_target
+ in: query
+ description: Label selector to filter targets.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: '{job="prometheus"}'
+ - name: metric
+ in: query
+ description: Metric name to retrieve metadata for.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: http_requests_total
+ - name: limit
+ in: query
+ description: Maximum number of targets to match.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 10
+ responses:
+ "200":
+ description: Target metadata retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/TargetMetadataOutputBody'
+ examples:
+ targetMetadata:
+ summary: Metadata for targets
+ value:
+ data:
+ - help: The current health status of the target
+ metric: up
+ target:
+ instance: localhost:9090
+ job: prometheus
+ type: gauge
+ unit: ""
+ status: success
+ default:
+ description: Error retrieving target metadata.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /targets/relabel_steps:
+ get:
+ tags:
+ - targets
+ summary: Get targets relabel steps
+ operationId: get-targets-relabel-steps
+ parameters:
+ - name: scrapePool
+ in: query
+ description: Name of the scrape pool.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: prometheus
+ - name: labels
+ in: query
+ description: JSON-encoded labels to apply relabel rules to.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: '{"__address__":"localhost:9090","job":"prometheus"}'
+ responses:
+ "200":
+ description: Relabel steps retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/TargetRelabelStepsOutputBody'
+ examples:
+ relabelSteps:
+ summary: Relabel steps for a target
+ value:
+ data:
+ steps:
+ - keep: true
+ output:
+ __address__: localhost:9090
+ instance: localhost:9090
+ job: prometheus
+ rule:
+ action: replace
+ regex: (.*)
+ replacement: $1
+ source_labels:
+ - __address__
+ target_label: instance
+ status: success
+ default:
+ description: Error retrieving relabel steps.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /rules:
+ get:
+ tags:
+ - rules
+ summary: Get alerting and recording rules
+ operationId: rules
+ parameters:
+ - name: type
+ in: query
+ description: 'Filter by rule type: alert or record.'
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: alert
+ - name: rule_name[]
+ in: query
+ description: Filter by rule name.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - HighErrorRate
+ - name: rule_group[]
+ in: query
+ description: Filter by rule group name.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - example_alerts
+ - name: file[]
+ in: query
+ description: Filter by file path.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - /etc/prometheus/rules.yml
+ - name: match[]
+ in: query
+ description: Label matchers to filter rules.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{severity="critical"}'
+ - name: exclude_alerts
+ in: query
+ description: Exclude active alerts from response.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: "false"
+ - name: group_limit
+ in: query
+ description: Maximum number of rule groups to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: group_next_token
+ in: query
+ description: Pagination token for next page.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: abc123
+ responses:
+ "200":
+ description: Rules retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/RulesOutputBody'
+ examples:
+ ruleGroups:
+ summary: Alerting and recording rules
+ value:
+ data:
+ groups:
+ - evaluationTime: 0.000561635
+ file: /etc/prometheus/rules/ansible_managed.yml
+ interval: 15
+ lastEvaluation: "2026-01-02T13:36:56.874Z"
+ limit: 0
+ name: ansible managed alert rules
+ rules:
+ - annotations:
+ description: This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the "DeadMansSnitch" integration in PagerDuty.
+ summary: Ensure entire alerting pipeline is functional
+ duration: 600
+ evaluationTime: 0.000356688
+ health: ok
+ keepFiringFor: 0
+ labels:
+ severity: warning
+ lastEvaluation: "2026-01-02T13:36:56.874Z"
+ name: Watchdog
+ query: vector(1)
+ state: firing
+ type: alerting
+ status: success
+ default:
+ description: Error retrieving rules.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /alerts:
+ get:
+ tags:
+ - alerts
+ summary: Get active alerts
+ operationId: alerts
+ responses:
+ "200":
+ description: Active alerts retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AlertsOutputBody'
+ examples:
+ activeAlerts:
+ summary: Currently active alerts
+ value:
+ data:
+ alerts:
+ - activeAt: "2026-01-02T13:30:00.000Z"
+ annotations:
+ description: This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the "DeadMansSnitch" integration in PagerDuty.
+ summary: Ensure entire alerting pipeline is functional
+ labels:
+ alertname: Watchdog
+ severity: warning
+ state: firing
+ value: "1e+00"
+ status: success
+ default:
+ description: Error retrieving alerts.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /alertmanagers:
+ get:
+ tags:
+ - alerts
+ summary: Get Alertmanager discovery
+ operationId: alertmanagers
+ responses:
+ "200":
+ description: Alertmanager targets retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AlertmanagersOutputBody'
+ examples:
+ alertmanagerDiscovery:
+ summary: Alertmanager discovery results
+ value:
+ data:
+ activeAlertmanagers:
+ - url: http://demo.prometheus.io:9093/api/v2/alerts
+ droppedAlertmanagers: []
+ status: success
+ default:
+ description: Error retrieving Alertmanager targets.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/config:
+ get:
+ tags:
+ - status
+ summary: Get status config
+ operationId: get-status-config
+ responses:
+ "200":
+ description: Configuration retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusConfigOutputBody'
+ examples:
+ configYAML:
+ summary: Prometheus configuration
+ value:
+ data:
+ yaml: |
+ global:
+ scrape_interval: 15s
+ scrape_timeout: 10s
+ evaluation_interval: 15s
+ external_labels:
+ environment: demo-prometheus-io
+ alerting:
+ alertmanagers:
+ - scheme: http
+ static_configs:
+ - targets:
+ - demo.prometheus.io:9093
+ rule_files:
+ - /etc/prometheus/rules/*.yml
+ status: success
+ default:
+ description: Error retrieving configuration.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/runtimeinfo:
+ get:
+ tags:
+ - status
+ summary: Get status runtimeinfo
+ operationId: get-status-runtimeinfo
+ responses:
+ "200":
+ description: Runtime information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusRuntimeInfoOutputBody'
+ examples:
+ runtimeInfo:
+ summary: Runtime information
+ value:
+ data:
+ CWD: /
+ GODEBUG: ""
+ GOGC: "75"
+ GOMAXPROCS: 2
+ GOMEMLIMIT: 3703818240
+ corruptionCount: 0
+ goroutineCount: 88
+ hostname: demo-prometheus-io
+ lastConfigTime: "2026-01-01T13:37:00.000Z"
+ reloadConfigSuccess: true
+ serverTime: "2026-01-02T13:37:00.000Z"
+ startTime: "2026-01-01T13:37:00.000Z"
+ storageRetention: 31d
+ status: success
+ default:
+ description: Error retrieving runtime information.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/buildinfo:
+ get:
+ tags:
+ - status
+ summary: Get status buildinfo
+ operationId: get-status-buildinfo
+ responses:
+ "200":
+ description: Build information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusBuildInfoOutputBody'
+ examples:
+ buildInfo:
+ summary: Build information
+ value:
+ data:
+ branch: HEAD
+ buildDate: 20251030-07:26:10
+ buildUser: root@08c890a84441
+ goVersion: go1.25.3
+ revision: 0a41f0000705c69ab8e0f9a723fc73e39ed62b07
+ version: 3.7.3
+ status: success
+ default:
+ description: Error retrieving build information.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/flags:
+ get:
+ tags:
+ - status
+ summary: Get status flags
+ operationId: get-status-flags
+ responses:
+ "200":
+ description: Command-line flags retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusFlagsOutputBody'
+ examples:
+ flags:
+ summary: Command-line flags
+ value:
+ data:
+ agent: "false"
+ alertmanager.notification-queue-capacity: "10000"
+ config.file: /etc/prometheus/prometheus.yml
+ enable-feature: exemplar-storage,native-histograms
+ query.max-concurrency: "20"
+ query.timeout: 2m
+ storage.tsdb.path: /prometheus
+ storage.tsdb.retention.time: 15d
+ web.console.libraries: /usr/share/prometheus/console_libraries
+ web.console.templates: /usr/share/prometheus/consoles
+ web.enable-admin-api: "true"
+ web.enable-lifecycle: "true"
+ web.listen-address: 0.0.0.0:9090
+ web.page-title: Prometheus Time Series Collection and Processing Server
+ status: success
+ default:
+ description: Error retrieving flags.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/tsdb:
+ get:
+ tags:
+ - status
+ summary: Get TSDB status
+ operationId: status-tsdb
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of items to return per category.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 10
+ responses:
+ "200":
+ description: TSDB status retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusTSDBOutputBody'
+ examples:
+ tsdbStats:
+ summary: TSDB statistics
+ value:
+ data:
+ headStats:
+ chunkCount: 37525
+ maxTime: 1767436620000
+ minTime: 1767362400712
+ numLabelPairs: 2512
+ numSeries: 9925
+ labelValueCountByLabelName:
+ - name: __name__
+ value: 5
+ - name: job
+ value: 3
+ memoryInBytesByLabelName:
+ - name: __name__
+ value: 1024
+ - name: job
+ value: 512
+ seriesCountByLabelValuePair:
+ - name: job=prometheus
+ value: 100
+ - name: instance=localhost:9090
+ value: 100
+ seriesCountByMetricName:
+ - name: up
+ value: 100
+ - name: http_requests_total
+ value: 500
+ status: success
+ default:
+ description: Error retrieving TSDB status.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/tsdb/blocks:
+ get:
+ tags:
+ - status
+ summary: Get TSDB blocks information
+ operationId: status-tsdb-blocks
+ responses:
+ "200":
+ description: TSDB blocks information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusTSDBBlocksOutputBody'
+ examples:
+ tsdbBlocks:
+ summary: TSDB block information
+ value:
+ data:
+ blocks:
+ - compaction:
+ level: 4
+ sources:
+ - 01KBCJ7TR8A4QAJ3AA1J651P5S
+ - 01KBCS3J0E34567YPB8Y5W0E24
+ - 01KBCZZ9KRTYGG3E7HVQFGC3S3
+ maxTime: 1764763200000
+ minTime: 1764568801099
+ stats:
+ numChunks: 1073962
+ numSamples: 129505582
+ numSeries: 10661
+ ulid: 01KC4D6GXQA4CRHYKV78NEBVAE
+ version: 1
+ status: success
+ default:
+ description: Error retrieving TSDB blocks.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/walreplay:
+ get:
+ tags:
+ - status
+ summary: Get status walreplay
+ operationId: get-status-walreplay
+ responses:
+ "200":
+ description: WAL replay status retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusWALReplayOutputBody'
+ examples:
+ walReplay:
+ summary: WAL replay status
+ value:
+ data:
+ current: 3214
+ max: 3214
+ min: 3209
+ status: success
+ default:
+ description: Error retrieving WAL replay status.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /admin/tsdb/delete_series:
+ put:
+ tags:
+ - admin
+ summary: Delete series matching selectors via PUT
+ description: Deletes data for a selection of series in a time range using PUT method.
+ operationId: deleteSeriesPut
+ parameters:
+ - name: match[]
+ in: query
+ description: Series selectors to identify series to delete.
+ required: true
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{__name__=~"test.*"}'
+ - name: start
+ in: query
+ description: Start timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ responses:
+ "200":
+ description: Series deleted successfully via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/DeleteSeriesOutputBody'
+ examples:
+ deletionSuccess:
+ summary: Successful series deletion
+ value:
+ status: success
+ default:
+ description: Error deleting series via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - admin
+ summary: Delete series matching selectors
+ description: Deletes data for a selection of series in a time range.
+ operationId: deleteSeriesPost
+ parameters:
+ - name: match[]
+ in: query
+ description: Series selectors to identify series to delete.
+ required: true
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{__name__=~"test.*"}'
+ - name: start
+ in: query
+ description: Start timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ responses:
+ "200":
+ description: Series deleted successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/DeleteSeriesOutputBody'
+ examples:
+ deletionSuccess:
+ summary: Successful series deletion
+ value:
+ status: success
+ default:
+ description: Error deleting series.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /admin/tsdb/clean_tombstones:
+ put:
+ tags:
+ - admin
+ summary: Clean tombstones in the TSDB via PUT
+ description: Removes deleted data from disk and cleans up existing tombstones using PUT method.
+ operationId: cleanTombstonesPut
+ responses:
+ "200":
+ description: Tombstones cleaned successfully via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/CleanTombstonesOutputBody'
+ examples:
+ tombstonesCleaned:
+ summary: Tombstones cleaned successfully
+ value:
+ status: success
+ default:
+ description: Error cleaning tombstones via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - admin
+ summary: Clean tombstones in the TSDB
+ description: Removes deleted data from disk and cleans up existing tombstones.
+ operationId: cleanTombstonesPost
+ responses:
+ "200":
+ description: Tombstones cleaned successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/CleanTombstonesOutputBody'
+ examples:
+ tombstonesCleaned:
+ summary: Tombstones cleaned successfully
+ value:
+ status: success
+ default:
+ description: Error cleaning tombstones.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /admin/tsdb/snapshot:
+ put:
+ tags:
+ - admin
+ summary: Create a snapshot of the TSDB via PUT
+ description: Creates a snapshot of all current data using PUT method.
+ operationId: snapshotPut
+ parameters:
+ - name: skip_head
+ in: query
+ description: If true, do not snapshot data in the head block.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: "false"
+ responses:
+ "200":
+ description: Snapshot created successfully via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SnapshotOutputBody'
+ examples:
+ snapshotCreated:
+ summary: Snapshot created successfully
+ value:
+ data:
+ name: 20260102T133700Z-a1b2c3d4e5f67890
+ status: success
+ default:
+ description: Error creating snapshot via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - admin
+ summary: Create a snapshot of the TSDB
+ description: Creates a snapshot of all current data.
+ operationId: snapshotPost
+ parameters:
+ - name: skip_head
+ in: query
+ description: If true, do not snapshot data in the head block.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: "false"
+ responses:
+ "200":
+ description: Snapshot created successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SnapshotOutputBody'
+ examples:
+ snapshotCreated:
+ summary: Snapshot created successfully
+ value:
+ data:
+ name: 20260102T133700Z-a1b2c3d4e5f67890
+ status: success
+ default:
+ description: Error creating snapshot.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /read:
+ post:
+ tags:
+ - remote
+ summary: Remote read endpoint
+ description: Prometheus remote read endpoint for federated queries. Accepts and returns Protocol Buffer encoded data.
+ operationId: remoteRead
+ responses:
+ "204":
+ description: No Content
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /write:
+ post:
+ tags:
+ - remote
+ summary: Remote write endpoint
+ description: Prometheus remote write endpoint for sending metrics. Accepts Protocol Buffer encoded write requests.
+ operationId: remoteWrite
+ responses:
+ "204":
+ description: No Content
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /otlp/v1/metrics:
+ post:
+ tags:
+ - otlp
+ summary: OTLP metrics write endpoint
+ description: OpenTelemetry Protocol metrics ingestion endpoint. Accepts OTLP/HTTP metrics in Protocol Buffer format.
+ operationId: otlpWrite
+ responses:
+ "204":
+ description: No Content
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /notifications:
+ get:
+ tags:
+ - notifications
+ summary: Get notifications
+ operationId: get-notifications
+ responses:
+ "200":
+ description: Notifications retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/NotificationsOutputBody'
+ examples:
+ notifications:
+ summary: Server notifications
+ value:
+ data:
+ - active: true
+ date: "2026-01-02T16:14:50.046Z"
+ text: Configuration reload has failed.
+ status: success
+ default:
+ description: Error retrieving notifications.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /notifications/live:
+ get:
+ tags:
+ - notifications
+ summary: Stream live notifications via Server-Sent Events
+ description: Subscribe to real-time server notifications using SSE. Each event contains a JSON-encoded Notification object in the data field.
+ operationId: notifications-live
+ responses:
+ "200":
+ description: Server-sent events stream established.
+ content:
+ text/event-stream:
+ itemSchema:
+ type: object
+ properties:
+ data:
+ type: string
+ contentSchema:
+ $ref: '#/components/schemas/Notification'
+ description: SSE data field containing JSON-encoded notification.
+ contentMediaType: application/json
+ title: Server Sent Event Message
+ required:
+ - data
+ additionalProperties: false
+ description: A single SSE message. The data field contains a JSON-encoded Notification object.
+ examples:
+ activeNotification:
+ summary: Active notification SSE message
+ description: An SSE message containing an active server notification.
+ value:
+ data: '{"text":"Configuration reload has failed.","date":"2026-01-02T16:14:50.046Z","active":true}'
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /features:
+ get:
+ tags:
+ - features
+ summary: Get features
+ operationId: get-features
+ responses:
+ "200":
+ description: Feature flags retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/FeaturesOutputBody'
+ examples:
+ enabledFeatures:
+ summary: Enabled feature flags
+ value:
+ data:
+ - exemplar-storage
+ - remote-write-receiver
+ status: success
+ default:
+ description: Error retrieving features.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+components:
+ schemas:
+ Error:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ errorType:
+ type: string
+ description: Type of error that occurred.
+ example: bad_data
+ error:
+ type: string
+ description: Human-readable error message.
+ example: invalid parameter
+ required:
+ - status
+ - errorType
+ - error
+ additionalProperties: false
+ description: Error response.
+ Labels:
+ type: object
+ additionalProperties: true
+ description: Label set represented as a key-value map.
+ QueryOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/QueryData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for instant query.
+ QueryRangeOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/QueryData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for range query.
+ QueryPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The PromQL query to execute.'
+ example: up
+ time:
+ type: string
+ description: 'Form field: The evaluation timestamp (optional, defaults to current time).'
+ example: "2023-07-21T20:10:51.781Z"
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of metrics to return.'
+ example: 100
+ timeout:
+ type: string
+ description: 'Form field: Evaluation timeout (optional, defaults to and is capped by the value of the -query.timeout flag).'
+ example: 30s
+ lookback_delta:
+ type: string
+ description: 'Form field: Override the lookback period for this query (optional).'
+ example: 5m
+ stats:
+ type: string
+ description: 'Form field: When provided, include query statistics in the response (the special value ''all'' enables more comprehensive statistics).'
+ example: all
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for instant query.
+ QueryRangePostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to execute.'
+ example: rate(http_requests_total[5m])
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:10:30.781Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T20:20:30.781Z"
+ step:
+ type: string
+ description: 'Form field: The step size of the query.'
+ example: 15s
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of metrics to return.'
+ example: 100
+ timeout:
+ type: string
+ description: 'Form field: Evaluation timeout (optional, defaults to and is capped by the value of the -query.timeout flag).'
+ example: 30s
+ lookback_delta:
+ type: string
+ description: 'Form field: Override the lookback period for this query (optional).'
+ example: 5m
+ stats:
+ type: string
+ description: 'Form field: When provided, include query statistics in the response (the special value ''all'' enables more comprehensive statistics).'
+ example: all
+ required:
+ - query
+ - start
+ - end
+ - step
+ additionalProperties: false
+ description: POST request body for range query.
+ QueryExemplarsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+ QueryExemplarsPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to execute.'
+ example: http_requests_total
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:00:00.000Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T21:00:00.000Z"
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for exemplars query.
+ FormatQueryOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: string
+ description: Formatted query string.
+ example: sum by(status) (rate(http_requests_total[5m]))
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for format query endpoint.
+ FormatQueryPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to format.'
+ example: sum(rate(http_requests_total[5m])) by (status)
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for format query.
+ ParseQueryOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+ ParseQueryPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to parse.'
+ example: sum(rate(http_requests_total[5m]))
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for parse query.
+ QueryData:
+ anyOf:
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - vector
+ result:
+ type: array
+ items:
+ anyOf:
+ - $ref: '#/components/schemas/FloatSample'
+ - $ref: '#/components/schemas/HistogramSample'
+ description: Array of samples (either float or histogram).
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - matrix
+ result:
+ type: array
+ items:
+ anyOf:
+ - $ref: '#/components/schemas/FloatSeries'
+ - $ref: '#/components/schemas/HistogramSeries'
+ description: Array of time series (either float or histogram).
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - scalar
+ result:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ maxItems: 2
+ minItems: 2
+ description: Scalar value as [timestamp, stringValue].
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - string
+ result:
+ type: array
+ items:
+ type: string
+ maxItems: 2
+ minItems: 2
+ description: String value as [timestamp, stringValue].
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ description: Query result data. The structure of 'result' depends on 'resultType'.
+ example:
+ result:
+ - metric:
+ __name__: up
+ job: prometheus
+ value:
+ - 1627845600
+ - "1"
+ resultType: vector
+ FloatSample:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ value:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ maxItems: 2
+ minItems: 2
+ description: Timestamp and float value as [unixTimestamp, stringValue].
+ example:
+ - 1767436620
+ - "1"
+ required:
+ - metric
+ - value
+ additionalProperties: false
+ description: A sample with a float value.
+ HistogramSample:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ histogram:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - $ref: '#/components/schemas/HistogramValue'
+ maxItems: 2
+ minItems: 2
+ description: Timestamp and histogram value as [unixTimestamp, histogramObject].
+ example:
+ - 1767436620
+ - buckets: []
+ count: "60"
+ sum: "120"
+ required:
+ - metric
+ - histogram
+ additionalProperties: false
+ description: A sample with a native histogram value.
+ FloatSeries:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ values:
+ type: array
+ items:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ maxItems: 2
+ minItems: 2
+ description: Array of [timestamp, stringValue] pairs for float values.
+ required:
+ - metric
+ - values
+ additionalProperties: false
+ description: A time series with float values.
+ HistogramSeries:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ histograms:
+ type: array
+ items:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - $ref: '#/components/schemas/HistogramValue'
+ maxItems: 2
+ minItems: 2
+ description: Array of [timestamp, histogramObject] pairs for histogram values.
+ required:
+ - metric
+ - histograms
+ additionalProperties: false
+ description: A time series with native histogram values.
+ HistogramValue:
+ type: object
+ properties:
+ count:
+ type: string
+ description: Total count of observations.
+ sum:
+ type: string
+ description: Sum of all observed values.
+ buckets:
+ type: array
+ items:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ description: Histogram buckets as [boundary_rule, lower, upper, count].
+ required:
+ - count
+ - sum
+ additionalProperties: false
+ description: Native histogram value representation.
+ LabelsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ type: string
+ example:
+ - __name__
+ - job
+ - instance
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of strings.
+ LabelsPostInputBody:
+ type: object
+ properties:
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:00:00.000Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T21:00:00.000Z"
+ match[]:
+ type: array
+ items:
+ type: string
+ description: 'Form field: Series selector argument that selects the series from which to read the label names.'
+ example:
+ - '{job="prometheus"}'
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of label names to return.'
+ example: 100
+ additionalProperties: false
+ description: POST request body for labels query.
+ LabelValuesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ type: string
+ example:
+ - __name__
+ - job
+ - instance
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of strings.
+ SeriesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/Labels'
+ example:
+ - __name__: up
+ instance: localhost:9090
+ job: prometheus
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of label sets.
+ SeriesPostInputBody:
+ type: object
+ properties:
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:00:00.000Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T21:00:00.000Z"
+ match[]:
+ type: array
+ items:
+ type: string
+ description: 'Form field: Series selector argument that selects the series to return.'
+ example:
+ - '{job="prometheus"}'
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of series to return.'
+ example: 100
+ required:
+ - match[]
+ additionalProperties: false
+ description: POST request body for series query.
+ SeriesDeleteOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+ Metadata:
+ type: object
+ properties:
+ type:
+ type: string
+ description: Metric type (counter, gauge, histogram, summary, or untyped).
+ unit:
+ type: string
+ description: Unit of the metric.
+ help:
+ type: string
+ description: Help text describing the metric.
+ required:
+ - type
+ - unit
+ - help
+ additionalProperties: false
+ description: Metric metadata.
+ MetadataOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: object
+ additionalProperties:
+ type: array
+ items:
+ $ref: '#/components/schemas/Metadata'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for metadata endpoint.
+ MetricMetadata:
+ type: object
+ properties:
+ target:
+ $ref: '#/components/schemas/Labels'
+ metric:
+ type: string
+ description: Metric name.
+ type:
+ type: string
+ description: Metric type (counter, gauge, histogram, summary, or untyped).
+ help:
+ type: string
+ description: Help text describing the metric.
+ unit:
+ type: string
+ description: Unit of the metric.
+ required:
+ - target
+ - type
+ - help
+ - unit
+ additionalProperties: false
+ description: Target metric metadata.
+ Target:
+ type: object
+ properties:
+ discoveredLabels:
+ $ref: '#/components/schemas/Labels'
+ labels:
+ $ref: '#/components/schemas/Labels'
+ scrapePool:
+ type: string
+ description: Name of the scrape pool.
+ scrapeUrl:
+ type: string
+ description: URL of the target.
+ globalUrl:
+ type: string
+ description: Global URL of the target.
+ lastError:
+ type: string
+ description: Last error message from scraping.
+ lastScrape:
+ type: string
+ format: date-time
+ description: Timestamp of the last scrape.
+ lastScrapeDuration:
+ type: number
+ format: double
+ description: Duration of the last scrape in seconds.
+ health:
+ type: string
+ description: Health status of the target (up, down, or unknown).
+ scrapeInterval:
+ type: string
+ description: Scrape interval for this target.
+ scrapeTimeout:
+ type: string
+ description: Scrape timeout for this target.
+ required:
+ - discoveredLabels
+ - labels
+ - scrapePool
+ - scrapeUrl
+ - globalUrl
+ - lastError
+ - lastScrape
+ - lastScrapeDuration
+ - health
+ - scrapeInterval
+ - scrapeTimeout
+ additionalProperties: false
+ description: Scrape target information.
+ DroppedTarget:
+ type: object
+ properties:
+ discoveredLabels:
+ $ref: '#/components/schemas/Labels'
+ scrapePool:
+ type: string
+ description: Name of the scrape pool.
+ required:
+ - discoveredLabels
+ - scrapePool
+ additionalProperties: false
+ description: Dropped target information.
+ TargetDiscovery:
+ type: object
+ properties:
+ activeTargets:
+ type: array
+ items:
+ $ref: '#/components/schemas/Target'
+ droppedTargets:
+ type: array
+ items:
+ $ref: '#/components/schemas/DroppedTarget'
+ droppedTargetCounts:
+ type: object
+ additionalProperties:
+ type: integer
+ format: int64
+ required:
+ - activeTargets
+ - droppedTargets
+ - droppedTargetCounts
+ additionalProperties: false
+ description: Target discovery information including active and dropped targets.
+ TargetsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/TargetDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for targets endpoint.
+ TargetMetadataOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/MetricMetadata'
+ example:
+ - help: The current health status of the target
+ metric: up
+ target:
+ instance: localhost:9090
+ job: prometheus
+ type: gauge
+ unit: ""
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of metric metadata.
+ ScrapePoolsDiscovery:
+ type: object
+ properties:
+ scrapePools:
+ type: array
+ items:
+ type: string
+ required:
+ - scrapePools
+ additionalProperties: false
+ description: List of all configured scrape pools.
+ ScrapePoolsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/ScrapePoolsDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for scrape pools endpoint.
+ Config:
+ type: object
+ properties:
+ source_labels:
+ type: array
+ items:
+ type: string
+ description: Source labels for relabeling.
+ separator:
+ type: string
+ description: Separator for source label values.
+ regex:
+ type: string
+ description: Regular expression for matching.
+ modulus:
+ type: integer
+ format: int64
+ description: Modulus for hash-based relabeling.
+ target_label:
+ type: string
+ description: Target label name.
+ replacement:
+ type: string
+ description: Replacement value.
+ action:
+ type: string
+ description: Relabel action.
+ additionalProperties: false
+ description: Relabel configuration.
+ RelabelStep:
+ type: object
+ properties:
+ rule:
+ $ref: '#/components/schemas/Config'
+ output:
+ $ref: '#/components/schemas/Labels'
+ keep:
+ type: boolean
+ required:
+ - rule
+ - output
+ - keep
+ additionalProperties: false
+ description: Relabel step showing the rule, output, and whether the target was kept.
+ RelabelStepsResponse:
+ type: object
+ properties:
+ steps:
+ type: array
+ items:
+ $ref: '#/components/schemas/RelabelStep'
+ required:
+ - steps
+ additionalProperties: false
+ description: Relabeling steps response.
+ TargetRelabelStepsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/RelabelStepsResponse'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for target relabel steps endpoint.
+ RuleGroup:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name of the rule group.
+ file:
+ type: string
+ description: File containing the rule group.
+ rules:
+ type: array
+ items:
+ type: object
+ description: Rule definition.
+ description: Rules in this group.
+ interval:
+ type: number
+ format: double
+ description: Evaluation interval in seconds.
+ limit:
+ type: integer
+ format: int64
+ description: Maximum number of alerts for this group.
+ evaluationTime:
+ type: number
+ format: double
+ description: Time taken to evaluate the group in seconds.
+ lastEvaluation:
+ type: string
+ format: date-time
+ description: Timestamp of the last evaluation.
+ required:
+ - name
+ - file
+ - rules
+ - interval
+ - limit
+ - evaluationTime
+ - lastEvaluation
+ additionalProperties: false
+ description: Rule group information.
+ RuleDiscovery:
+ type: object
+ properties:
+ groups:
+ type: array
+ items:
+ $ref: '#/components/schemas/RuleGroup'
+ groupNextToken:
+ type: string
+ description: Pagination token for the next page of groups.
+ required:
+ - groups
+ additionalProperties: false
+ description: Rule discovery information containing all rule groups.
+ RulesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/RuleDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for rules endpoint.
+ Alert:
+ type: object
+ properties:
+ labels:
+ $ref: '#/components/schemas/Labels'
+ annotations:
+ $ref: '#/components/schemas/Labels'
+ state:
+ type: string
+ description: State of the alert (pending, firing, or inactive).
+ value:
+ type: string
+ description: Value of the alert expression.
+ activeAt:
+ type: string
+ format: date-time
+ description: Timestamp when the alert became active.
+ keepFiringSince:
+ type: string
+ format: date-time
+ description: Timestamp since the alert has been kept firing.
+ required:
+ - labels
+ - annotations
+ - state
+ - value
+ additionalProperties: false
+ description: Alert information.
+ AlertDiscovery:
+ type: object
+ properties:
+ alerts:
+ type: array
+ items:
+ $ref: '#/components/schemas/Alert'
+ required:
+ - alerts
+ additionalProperties: false
+ description: Alert discovery information containing all active alerts.
+ AlertsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/AlertDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for alerts endpoint.
+ AlertmanagerTarget:
+ type: object
+ properties:
+ url:
+ type: string
+ description: URL of the Alertmanager instance.
+ required:
+ - url
+ additionalProperties: false
+ description: Alertmanager target information.
+ AlertmanagerDiscovery:
+ type: object
+ properties:
+ activeAlertmanagers:
+ type: array
+ items:
+ $ref: '#/components/schemas/AlertmanagerTarget'
+ droppedAlertmanagers:
+ type: array
+ items:
+ $ref: '#/components/schemas/AlertmanagerTarget'
+ required:
+ - activeAlertmanagers
+ - droppedAlertmanagers
+ additionalProperties: false
+ description: Alertmanager discovery information including active and dropped instances.
+ AlertmanagersOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/AlertmanagerDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for alertmanagers endpoint.
+ StatusConfigData:
+ type: object
+ properties:
+ yaml:
+ type: string
+ description: Prometheus configuration in YAML format.
+ required:
+ - yaml
+ additionalProperties: false
+ description: Prometheus configuration.
+ StatusConfigOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/StatusConfigData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status config endpoint.
+ RuntimeInfo:
+ type: object
+ properties:
+ startTime:
+ type: string
+ format: date-time
+ CWD:
+ type: string
+ hostname:
+ type: string
+ serverTime:
+ type: string
+ format: date-time
+ reloadConfigSuccess:
+ type: boolean
+ lastConfigTime:
+ type: string
+ format: date-time
+ corruptionCount:
+ type: integer
+ format: int64
+ goroutineCount:
+ type: integer
+ format: int64
+ GOMAXPROCS:
+ type: integer
+ format: int64
+ GOMEMLIMIT:
+ type: integer
+ format: int64
+ GOGC:
+ type: string
+ GODEBUG:
+ type: string
+ storageRetention:
+ type: string
+ required:
+ - startTime
+ - CWD
+ - hostname
+ - serverTime
+ - reloadConfigSuccess
+ - lastConfigTime
+ - corruptionCount
+ - goroutineCount
+ - GOMAXPROCS
+ - GOMEMLIMIT
+ - GOGC
+ - GODEBUG
+ - storageRetention
+ additionalProperties: false
+ description: Prometheus runtime information.
+ StatusRuntimeInfoOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/RuntimeInfo'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status runtime info endpoint.
+ PrometheusVersion:
+ type: object
+ properties:
+ version:
+ type: string
+ revision:
+ type: string
+ branch:
+ type: string
+ buildUser:
+ type: string
+ buildDate:
+ type: string
+ goVersion:
+ type: string
+ required:
+ - version
+ - revision
+ - branch
+ - buildUser
+ - buildDate
+ - goVersion
+ additionalProperties: false
+ description: Prometheus version information.
+ StatusBuildInfoOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/PrometheusVersion'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status build info endpoint.
+ StatusFlagsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: object
+ additionalProperties:
+ type: string
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status flags endpoint.
+ HeadStats:
+ type: object
+ properties:
+ numSeries:
+ type: integer
+ format: int64
+ numLabelPairs:
+ type: integer
+ format: int64
+ chunkCount:
+ type: integer
+ format: int64
+ minTime:
+ type: integer
+ format: int64
+ maxTime:
+ type: integer
+ format: int64
+ required:
+ - numSeries
+ - numLabelPairs
+ - chunkCount
+ - minTime
+ - maxTime
+ additionalProperties: false
+ description: TSDB head statistics.
+ TSDBStat:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: integer
+ format: int64
+ required:
+ - name
+ - value
+ additionalProperties: false
+ description: TSDB statistic.
+ TSDBStatus:
+ type: object
+ properties:
+ headStats:
+ $ref: '#/components/schemas/HeadStats'
+ seriesCountByMetricName:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ labelValueCountByLabelName:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ memoryInBytesByLabelName:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ seriesCountByLabelValuePair:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ required:
+ - headStats
+ - seriesCountByMetricName
+ - labelValueCountByLabelName
+ - memoryInBytesByLabelName
+ - seriesCountByLabelValuePair
+ additionalProperties: false
+ description: TSDB status information.
+ StatusTSDBOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/TSDBStatus'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status TSDB endpoint.
+ BlockDesc:
+ type: object
+ properties:
+ ulid:
+ type: string
+ minTime:
+ type: integer
+ format: int64
+ maxTime:
+ type: integer
+ format: int64
+ required:
+ - ulid
+ - minTime
+ - maxTime
+ additionalProperties: false
+ description: Block descriptor.
+ BlockStats:
+ type: object
+ properties:
+ numSamples:
+ type: integer
+ format: int64
+ numSeries:
+ type: integer
+ format: int64
+ numChunks:
+ type: integer
+ format: int64
+ numTombstones:
+ type: integer
+ format: int64
+ numFloatSamples:
+ type: integer
+ format: int64
+ numHistogramSamples:
+ type: integer
+ format: int64
+ additionalProperties: false
+ description: Block statistics.
+ BlockMetaCompaction:
+ type: object
+ properties:
+ level:
+ type: integer
+ format: int64
+ sources:
+ type: array
+ items:
+ type: string
+ parents:
+ type: array
+ items:
+ $ref: '#/components/schemas/BlockDesc'
+ failed:
+ type: boolean
+ deletable:
+ type: boolean
+ hints:
+ type: array
+ items:
+ type: string
+ required:
+ - level
+ additionalProperties: false
+ description: Block compaction metadata.
+ BlockMeta:
+ type: object
+ properties:
+ ulid:
+ type: string
+ minTime:
+ type: integer
+ format: int64
+ maxTime:
+ type: integer
+ format: int64
+ stats:
+ $ref: '#/components/schemas/BlockStats'
+ compaction:
+ $ref: '#/components/schemas/BlockMetaCompaction'
+ version:
+ type: integer
+ format: int64
+ required:
+ - ulid
+ - minTime
+ - maxTime
+ - compaction
+ - version
+ additionalProperties: false
+ description: Block metadata.
+ StatusTSDBBlocksData:
+ type: object
+ properties:
+ blocks:
+ type: array
+ items:
+ $ref: '#/components/schemas/BlockMeta'
+ required:
+ - blocks
+ additionalProperties: false
+ description: TSDB blocks information.
+ StatusTSDBBlocksOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/StatusTSDBBlocksData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status TSDB blocks endpoint.
+ StatusWALReplayData:
+ type: object
+ properties:
+ min:
+ type: integer
+ format: int64
+ max:
+ type: integer
+ format: int64
+ current:
+ type: integer
+ format: int64
+ required:
+ - min
+ - max
+ - current
+ additionalProperties: false
+ description: WAL replay status.
+ StatusWALReplayOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/StatusWALReplayData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status WAL replay endpoint.
+ DeleteSeriesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ additionalProperties: false
+ description: Response body containing only status.
+ CleanTombstonesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ additionalProperties: false
+ description: Response body containing only status.
+ DataStruct:
+ type: object
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ additionalProperties: false
+ description: Generic data structure with a name field.
+ SnapshotOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/DataStruct'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for snapshot endpoint.
+ Notification:
+ type: object
+ properties:
+ text:
+ type: string
+ date:
+ type: string
+ format: date-time
+ active:
+ type: boolean
+ required:
+ - text
+ - date
+ - active
+ additionalProperties: false
+ description: Server notification.
+ NotificationsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/Notification'
+ example:
+ - active: true
+ date: "2023-07-21T20:00:00.000Z"
+ text: Server is running
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of notifications.
+ FeaturesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+tags:
+ - name: query
+ summary: Query
+ description: Query and evaluate PromQL expressions.
+ - name: metadata
+ summary: Metadata
+ description: Retrieve metric metadata such as type and unit.
+ - name: labels
+ summary: Labels
+ description: Query label names and values.
+ - name: series
+ summary: Series
+ description: Query and manage time series.
+ - name: targets
+ summary: Targets
+ description: Retrieve target and scrape pool information.
+ - name: rules
+ summary: Rules
+ description: Query recording and alerting rules.
+ - name: alerts
+ summary: Alerts
+ description: Query active alerts and alertmanager discovery.
+ - name: status
+ summary: Status
+ description: Retrieve server status and configuration.
+ - name: admin
+ summary: Admin
+ description: Administrative operations for TSDB management.
+ - name: features
+ summary: Features
+ description: Query enabled features.
+ - name: remote
+ summary: Remote Storage
+ description: Remote read and write endpoints.
+ - name: otlp
+ summary: OTLP
+ description: OpenTelemetry Protocol metrics ingestion.
+ - name: notifications
+ summary: Notifications
+ description: Server notifications and events.
diff --git a/web/ui/mantine-ui/src/promql/tools/go.mod b/web/ui/mantine-ui/src/promql/tools/go.mod
index a3abc881e2..d3f69a698b 100644
--- a/web/ui/mantine-ui/src/promql/tools/go.mod
+++ b/web/ui/mantine-ui/src/promql/tools/go.mod
@@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/web/ui/mantine-ui/src/promql/tools
-go 1.24.0
+go 1.25.0
require (
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853
diff --git a/web/web.go b/web/web.go
index 4df447be64..aec2f2d4ee 100644
--- a/web/web.go
+++ b/web/web.go
@@ -361,6 +361,11 @@ func New(logger *slog.Logger, o *Options) *Handler {
app = h.storage
}
+ version := ""
+ if o.Version != nil {
+ version = o.Version.Version
+ }
+
h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, app, h.exemplarStorage, factorySPr, factoryTr, factoryAr,
func() config.Config {
h.mtx.RLock()
@@ -402,6 +407,10 @@ func New(logger *slog.Logger, o *Options) *Handler {
o.AppendMetadata,
nil,
o.FeatureRegistry,
+ api_v1.OpenAPIOptions{
+ ExternalURL: o.ExternalURL.String(),
+ Version: version,
+ },
)
if r := o.FeatureRegistry; r != nil {
diff --git a/web/web_test.go b/web/web_test.go
index ce682912a9..cbcf15ffdc 100644
--- a/web/web_test.go
+++ b/web/web_test.go
@@ -328,6 +328,7 @@ func TestDebugHandler(t *testing.T) {
Host: "localhost.localdomain:9090",
Scheme: "http",
},
+ Version: &PrometheusVersion{},
}
handler := New(nil, opts)
handler.SetReady(Ready)
@@ -353,6 +354,7 @@ func TestHTTPMetrics(t *testing.T) {
Host: "localhost.localdomain:9090",
Scheme: "http",
},
+ Version: &PrometheusVersion{},
})
getReady := func() int {
t.Helper()
From 3bc688e5cbbc7c7aba0c6df5bb9293ee1a012d03 Mon Sep 17 00:00:00 2001
From: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
Date: Thu, 29 Jan 2026 14:26:03 +0100
Subject: [PATCH 094/165] Features API: Add OpenAPI 3.1 and 3.2
Now that #17825 is merged.
Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
---
cmd/prometheus/testdata/features.json | 2 ++
web/web.go | 2 ++
2 files changed, 4 insertions(+)
diff --git a/cmd/prometheus/testdata/features.json b/cmd/prometheus/testdata/features.json
index 4c893daae2..4f74b7e810 100644
--- a/cmd/prometheus/testdata/features.json
+++ b/cmd/prometheus/testdata/features.json
@@ -4,6 +4,8 @@
"exclude_alerts": true,
"label_values_match": true,
"lifecycle": false,
+ "openapi_3.1": true,
+ "openapi_3.2": true,
"otlp_write_receiver": false,
"query_stats": true,
"query_warnings": true,
diff --git a/web/web.go b/web/web.go
index aec2f2d4ee..cb9258d87f 100644
--- a/web/web.go
+++ b/web/web.go
@@ -427,6 +427,8 @@ func New(logger *slog.Logger, o *Options) *Handler {
r.Enable(features.API, "time_range_series") // start/end parameters for /series endpoint.
r.Enable(features.API, "time_range_labels") // start/end parameters for /labels endpoints.
r.Enable(features.API, "exclude_alerts") // exclude_alerts parameter for /rules endpoint.
+ r.Enable(features.API, "openapi_3.1") // OpenAPI 3.1 specification support.
+ r.Enable(features.API, "openapi_3.2") // OpenAPI 3.2 specification support.
r.Set(features.UI, "ui_v3", !o.UseOldUI)
r.Set(features.UI, "ui_v2", o.UseOldUI)
}
From 0fc70f3a6251776480e78c93859439b735fa06c3 Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Thu, 29 Jan 2026 14:26:40 +0000
Subject: [PATCH 095/165] tsdb: kill unused mint,maxt tracking (#17967)
Signed-off-by: bwplotka
---
tsdb/head_append.go | 24 ------------------------
tsdb/head_append_v2.go | 14 --------------
2 files changed, 38 deletions(-)
diff --git a/tsdb/head_append.go b/tsdb/head_append.go
index 539884e74b..c171079509 100644
--- a/tsdb/head_append.go
+++ b/tsdb/head_append.go
@@ -168,8 +168,6 @@ func (h *Head) appender() *headAppender {
headAppenderBase: headAppenderBase{
head: h,
minValidTime: minValidTime,
- mint: math.MaxInt64,
- maxt: math.MinInt64,
headMaxt: h.MaxTime(),
oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(),
seriesRefs: h.getRefSeriesBuffer(),
@@ -393,7 +391,6 @@ func (b *appendBatch) close(h *Head) {
type headAppenderBase struct {
head *Head
minValidTime int64 // No samples below this timestamp are allowed.
- mint, maxt int64
headMaxt int64 // We track it here to not take the lock for every sample appended.
oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample.
@@ -477,13 +474,6 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
return 0, err
}
- if t < a.mint {
- a.mint = t
- }
- if t > a.maxt {
- a.maxt = t
- }
-
b := a.getCurrentBatch(stFloat, s.ref)
b.floats = append(b.floats, record.RefSample{
Ref: s.ref,
@@ -527,9 +517,6 @@ func (a *headAppender) AppendSTZeroSample(ref storage.SeriesRef, lset labels.Lab
return storage.SeriesRef(s.ref), storage.ErrOutOfOrderST
}
- if st > a.maxt {
- a.maxt = st
- }
b := a.getCurrentBatch(stFloat, s.ref)
b.floats = append(b.floats, record.RefSample{Ref: s.ref, T: st, V: 0.0})
b.floatSeries = append(b.floatSeries, s)
@@ -903,13 +890,6 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
b.floatHistogramSeries = append(b.floatHistogramSeries, s)
}
- if t < a.mint {
- a.mint = t
- }
- if t > a.maxt {
- a.maxt = t
- }
-
return storage.SeriesRef(s.ref), nil
}
@@ -1013,10 +993,6 @@ func (a *headAppender) AppendHistogramSTZeroSample(ref storage.SeriesRef, lset l
b.floatHistogramSeries = append(b.floatHistogramSeries, s)
}
- if st > a.maxt {
- a.maxt = st
- }
-
return storage.SeriesRef(s.ref), nil
}
diff --git a/tsdb/head_append_v2.go b/tsdb/head_append_v2.go
index 4a62d56741..2c09c4cbd5 100644
--- a/tsdb/head_append_v2.go
+++ b/tsdb/head_append_v2.go
@@ -17,7 +17,6 @@ import (
"context"
"errors"
"fmt"
- "math"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
@@ -89,8 +88,6 @@ func (h *Head) appenderV2() *headAppenderV2 {
headAppenderBase: headAppenderBase{
head: h,
minValidTime: minValidTime,
- mint: math.MaxInt64,
- maxt: math.MinInt64,
headMaxt: h.MaxTime(),
oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(),
seriesRefs: h.getRefSeriesBuffer(),
@@ -193,13 +190,6 @@ func (a *headAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t i
return 0, appErr
}
- if t < a.mint {
- a.mint = t
- }
- if t > a.maxt {
- a.maxt = t
- }
-
if isStale {
// For stale values we never attempt to process metadata/exemplars, claim the success.
return storage.SeriesRef(s.ref), nil
@@ -390,10 +380,6 @@ func (a *headAppenderV2) bestEffortAppendSTZeroSample(s *memSeries, ls labels.La
a.head.logger.Debug("Error when appending ST", "series", s.lset.String(), "st", st, "t", t, "err", err)
return
}
-
- if st > a.maxt {
- a.maxt = st
- }
}
var _ storage.GetRef = &headAppenderV2{}
From 88f6ee4c8e324d783fb1b262f4df9e1ea8427a97 Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Fri, 30 Jan 2026 11:44:07 +0000
Subject: [PATCH 096/165] tests(scrape): add TestScrapeLoopAppend_WithStorage
(#17937)
Signed-off-by: bwplotka
---
scrape/scrape_test.go | 188 +++++++++++++++++++++++++++++++++++++++--
tsdb/head_append_v2.go | 3 -
2 files changed, 183 insertions(+), 8 deletions(-)
diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go
index f9a0834bd1..b29b445d01 100644
--- a/scrape/scrape_test.go
+++ b/scrape/scrape_test.go
@@ -1436,7 +1436,9 @@ func readTextParseTestMetrics(t testing.TB) []byte {
if err != nil {
t.Fatal(err)
}
- return b
+
+ // Replace all Carriage Return chars that appear when testing on windows.
+ return bytes.ReplaceAll(b, []byte{'\r'}, nil)
}
func makeTestGauges(n int) []byte {
@@ -1543,6 +1545,184 @@ func TestPromTextToProto(t *testing.T) {
require.Equal(t, "promhttp_metric_handler_requests_total", got[236])
}
+// TestScrapeLoopAppend_WithStorage tests appends and storage integration for the
+// large input files that are also used in benchmarks.
+func TestScrapeLoopAppend_WithStorage(t *testing.T) {
+ ts := time.Now()
+
+ for _, appV2 := range []bool{false, true} {
+ for _, tc := range []struct {
+ name string
+ parsableText []byte
+
+ expectedSamplesLen int
+ testAppendedSamples func(t *testing.T, committed []sample)
+ testExemplars func(t *testing.T, er []exemplar.QueryResult)
+ }{
+ {
+ name: "1Fam2000Gauges",
+ parsableText: makeTestGauges(2000),
+
+ expectedSamplesLen: 2000,
+ testAppendedSamples: func(t *testing.T, committed []sample) {
+ var expectedMF string
+ if appV2 {
+ expectedMF = "metric_a" // Only AppenderV2 supports metric family passing.
+ }
+ // Verify a few samples.
+ testutil.RequireEqual(t, sample{
+ MF: expectedMF,
+ M: metadata.Metadata{Type: model.MetricTypeGauge, Help: "help text"},
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a", "foo", "0", "bar", "0"), V: 1, T: timestamp.FromTime(ts),
+ }, committed[0])
+ testutil.RequireEqual(t, sample{
+ MF: expectedMF,
+ M: metadata.Metadata{Type: model.MetricTypeGauge, Help: "help text"},
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a", "foo", "1245", "bar", "124500"), V: 1, T: timestamp.FromTime(ts),
+ }, committed[1245])
+ testutil.RequireEqual(t, sample{
+ MF: expectedMF,
+ M: metadata.Metadata{Type: model.MetricTypeGauge, Help: "help text"},
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a", "foo", "1999", "bar", "199900"), V: 1, T: timestamp.FromTime(ts),
+ }, committed[len(committed)-1])
+ },
+ },
+ {
+ name: "237FamsAllTypes",
+ parsableText: readTextParseTestMetrics(t),
+
+ expectedSamplesLen: 1857,
+ testAppendedSamples: func(t *testing.T, committed []sample) {
+ // Verify a few samples.
+ testutil.RequireEqual(t, sample{
+ MF: func() string {
+ if !appV2 {
+ return ""
+ }
+ return "go_gc_gomemlimit_bytes"
+ }(),
+ M: metadata.Metadata{Type: model.MetricTypeGauge, Help: "Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes"},
+ L: labels.FromStrings(model.MetricNameLabel, "go_gc_gomemlimit_bytes"), V: 9.03676723e+08, T: timestamp.FromTime(ts),
+ }, committed[11])
+ testutil.RequireEqual(t, sample{
+ MF: func() string {
+ if !appV2 {
+ return "" // Only AppenderV2 supports metric family passing.
+ }
+ return "prometheus_http_request_duration_seconds"
+ }(),
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Help: "Histogram of latencies for HTTP requests."},
+ L: labels.FromStrings(model.MetricNameLabel, "prometheus_http_request_duration_seconds_bucket", "handler", "/api/v1/query_range", "le", "120.0"), V: 118157, T: timestamp.FromTime(ts),
+ }, committed[448])
+ testutil.RequireEqual(t, sample{
+ MF: func() string {
+ if !appV2 {
+ return "" // Only AppenderV2 supports metric family passing.
+ }
+ return "promhttp_metric_handler_requests_total"
+ }(),
+ M: metadata.Metadata{Type: model.MetricTypeCounter, Help: "Total number of scrapes by HTTP status code."},
+ L: labels.FromStrings(model.MetricNameLabel, "promhttp_metric_handler_requests_total", "code", "503"), V: 0, T: timestamp.FromTime(ts),
+ }, committed[len(committed)-1])
+ },
+ },
+ {
+ name: "100HistsWithExemplars",
+ parsableText: makeTestHistogramsWithExemplars(100),
+
+ expectedSamplesLen: 24 * 100,
+ testAppendedSamples: func(t *testing.T, committed []sample) {
+ // Verify a few samples.
+ m := metadata.Metadata{Type: model.MetricTypeHistogram, Help: "RPC latency distributions."}
+ testutil.RequireEqual(t, sample{
+ MF: func() string {
+ if !appV2 {
+ return "" // Only AppenderV2 supports metric family passing.
+ }
+ return "rpc_durations_histogram0_seconds"
+ }(),
+ M: m, L: labels.FromStrings(model.MetricNameLabel, "rpc_durations_histogram0_seconds_bucket", "le", "0.0003100000000000002"), V: 15, T: timestamp.FromTime(ts),
+ ES: []exemplar.Exemplar{
+ {Labels: labels.FromStrings("dummyID", "9818"), Value: 0.0002791130914009552, Ts: 1726839814982, HasTs: true},
+ },
+ }, committed[13])
+ testutil.RequireEqual(t, sample{
+ MF: func() string {
+ if !appV2 {
+ return "" // Only AppenderV2 supports metric family passing.
+ }
+ return "rpc_durations_histogram49_seconds"
+ }(),
+ M: m, L: labels.FromStrings(model.MetricNameLabel, "rpc_durations_histogram49_seconds_sum"), V: -8.452185437166741e-05, T: timestamp.FromTime(ts),
+ }, committed[24*50-3])
+
+ // This series does not have metadata, nor metric family, because of isSeriesPartOfFamily bug and OpenMetric 1.0 limitations around _created series.
+ // TODO(bwplotka): Fix with https://github.com/prometheus/prometheus/issues/17900
+ testutil.RequireEqual(t, sample{
+ L: labels.FromStrings(model.MetricNameLabel, "rpc_durations_histogram99_seconds_created"), V: 1.726839813016302e+09, T: timestamp.FromTime(ts),
+ }, committed[len(committed)-1])
+ },
+ testExemplars: func(t *testing.T, er []exemplar.QueryResult) {
+ // 12 out of 24 histogram series have exemplars.
+ require.Len(t, er, 12*100)
+ testutil.RequireEqual(t, exemplar.QueryResult{
+ SeriesLabels: labels.FromStrings(model.MetricNameLabel, "rpc_durations_histogram0_seconds_bucket", "le", "0.0003100000000000002"),
+ Exemplars: []exemplar.Exemplar{
+ {Labels: labels.FromStrings("dummyID", "9818"), Value: 0.0002791130914009552, Ts: 1726839814982, HasTs: true},
+ },
+ }, er[10])
+ testutil.RequireEqual(t, exemplar.QueryResult{
+ SeriesLabels: labels.FromStrings(model.MetricNameLabel, "rpc_durations_histogram9_seconds_bucket", "le", "1.0000000000000216e-05"),
+ Exemplars: []exemplar.Exemplar{
+ {Labels: labels.FromStrings("dummyID", "19206"), Value: -4.6156147425468016e-05, Ts: 1726839815133, HasTs: true},
+ },
+ }, er[len(er)-1])
+ },
+ },
+ } {
+ t.Run(fmt.Sprintf("appV2=%v/data=%v", appV2, tc.name), func(t *testing.T) {
+ s := teststorage.New(t, func(opt *tsdb.Options) {
+ opt.EnableMetadataWALRecords = true
+ })
+
+ appTest := teststorage.NewAppendable().Then(s)
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2))
+ app := sl.appender()
+
+ _, _, _, err := app.append(tc.parsableText, "application/openmetrics-text", ts)
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // Check the recorded samples on the Appender layer.
+ require.Nil(t, appTest.PendingSamples())
+ require.Nil(t, appTest.RolledbackSamples())
+
+ got := appTest.ResultSamples()
+ require.Len(t, got, tc.expectedSamplesLen)
+ tc.testAppendedSamples(t, got)
+
+ // Check basic storage stats.
+ stats := s.Head().Stats(model.MetricNameLabel, 2000)
+ require.Equal(t, tc.expectedSamplesLen, int(stats.NumSeries))
+
+ // Check exemplars.
+ eq, err := s.ExemplarQuerier(t.Context())
+ require.NoError(t, err)
+
+ er, err := eq.Select(math.MinInt64, math.MaxInt64, nil)
+ require.NoError(t, err)
+
+ if tc.testExemplars != nil {
+ tc.testExemplars(t, er)
+ } else {
+ // Expect no exemplars.
+ require.Empty(t, er, "%v is not empty", er)
+ }
+ })
+ }
+ }
+}
+
// BenchmarkScrapeLoopAppend benchmarks scrape appends for typical cases.
//
// Benchmark compares append function run across 4 dimensions:
@@ -1567,7 +1747,7 @@ func BenchmarkScrapeLoopAppend(b *testing.B) {
name string
parsableText []byte
}{
- {name: "1Fam1000Gauges", parsableText: makeTestGauges(2000)}, // ~68.1 KB, ~77.9 KB in proto.
+ {name: "1Fam2000Gauges", parsableText: makeTestGauges(2000)}, // ~68.1 KB, ~77.9 KB in proto.
{name: "237FamsAllTypes", parsableText: readTextParseTestMetrics(b)}, // ~185.7 KB, ~70.6 KB in proto.
} {
b.Run(fmt.Sprintf("appV2=%v/appendMetadataToWAL=%v/data=%v", appV2, appendMetadataToWAL, data.name), func(b *testing.B) {
@@ -3218,9 +3398,7 @@ metric: <
}
sl.alwaysScrapeClassicHist = test.alwaysScrapeClassicHist
// This test does not care about metadata.
- // Having this true would mean we need to add metadata to sample
- // expectations.
- // TODO(bwplotka): Add cases for append metadata to WAL and pass metadata
+ // TODO(bwplotka): Add metadata expectations and turn it on.
sl.appendMetadataToWAL = false
})
app := sl.appender()
diff --git a/tsdb/head_append_v2.go b/tsdb/head_append_v2.go
index 2c09c4cbd5..87b62df536 100644
--- a/tsdb/head_append_v2.go
+++ b/tsdb/head_append_v2.go
@@ -200,9 +200,6 @@ func (a *headAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t i
// Currently only exemplars can return partial errors.
partialErr = a.appendExemplars(s, opts.Exemplars)
}
-
- // TODO(bwplotka): Move/reuse metadata tests from scrape, once scrape adopts AppenderV2.
- // Currently tsdb package does not test metadata.
if a.head.opts.EnableMetadataWALRecords && !opts.Metadata.IsEmpty() {
s.Lock()
metaChanged := s.meta == nil || !s.meta.Equals(opts.Metadata)
From e2d028a46e5c555c3f53240be275af9dafff8c72 Mon Sep 17 00:00:00 2001
From: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
Date: Fri, 30 Jan 2026 14:21:03 +0100
Subject: [PATCH 097/165] OpenAPI: Add support for stats
An oversight on the OpenAPI specification; which did not include stats.
Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
---
web/api/testhelpers/assertions.go | 10 +++
web/api/v1/api_scenarios_test.go | 91 +++++++++++++++++++++
web/api/v1/openapi_schemas.go | 73 +++++++++++++++++
web/api/v1/testdata/openapi_3.1_golden.yaml | 52 ++++++++++++
web/api/v1/testdata/openapi_3.2_golden.yaml | 52 ++++++++++++
5 files changed, 278 insertions(+)
diff --git a/web/api/testhelpers/assertions.go b/web/api/testhelpers/assertions.go
index 53010b08b5..8a0a0d4a97 100644
--- a/web/api/testhelpers/assertions.go
+++ b/web/api/testhelpers/assertions.go
@@ -55,6 +55,16 @@ func (r *Response) RequireJSONPathExists(path string) *Response {
return r
}
+// RequireJSONPathNotExists asserts that a JSON path does not exist and returns the response for chaining.
+func (r *Response) RequireJSONPathNotExists(path string) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.Nil(r.t, value, "JSON path %q should not exist but was found", path)
+ return r
+}
+
// RequireEquals asserts that a JSON path equals the expected value and returns the response for chaining.
func (r *Response) RequireEquals(path string, expected any) *Response {
r.t.Helper()
diff --git a/web/api/v1/api_scenarios_test.go b/web/api/v1/api_scenarios_test.go
index a707680c57..5bdccf08d5 100644
--- a/web/api/v1/api_scenarios_test.go
+++ b/web/api/v1/api_scenarios_test.go
@@ -417,3 +417,94 @@ func TestAPIWithNativeHistograms(t *testing.T) {
RequireLenAtLeast("$.data", 1)
})
}
+
+// TestAPIWithStats tests the API with the stats query parameter.
+func TestAPIWithStats(t *testing.T) {
+ // Create an API with sample series data.
+ api := newTestAPI(t, testhelpers.APIConfig{
+ Queryable: testhelpers.NewLazyLoader(func() storage.SampleAndChunkQueryable {
+ return testhelpers.NewQueryableWithSeries(testhelpers.FixtureMultipleSeries())
+ }),
+ })
+
+ now := time.Now().Unix()
+
+ // Test combinations of methods, endpoints, and stats values.
+ methods := []string{"GET", "POST"}
+ statsValues := []struct {
+ value string
+ expectStats bool
+ }{
+ {"true", true},
+ {"all", true},
+ {"1", true},
+ {"", false},
+ }
+
+ for _, method := range methods {
+ for _, stats := range statsValues {
+ t.Run(method+" /api/v1/query with stats="+stats.value, func(t *testing.T) {
+ var params []string
+ if stats.value != "" {
+ params = []string{"query", "up", "stats", stats.value}
+ } else {
+ params = []string{"query", "up"}
+ }
+
+ var resp *testhelpers.Response
+ if method == "GET" {
+ resp = testhelpers.GET(t, api, "/api/v1/query", params...)
+ } else {
+ resp = testhelpers.POST(t, api, "/api/v1/query", params...)
+ }
+
+ resp.RequireSuccess().ValidateOpenAPI()
+
+ if stats.expectStats {
+ resp.RequireJSONPathExists("$.data.stats").
+ RequireJSONPathExists("$.data.stats.timings").
+ RequireJSONPathExists("$.data.stats.samples")
+ } else {
+ resp.RequireJSONPathNotExists("$.data.stats")
+ }
+ })
+
+ t.Run(method+" /api/v1/query_range with stats="+stats.value, func(t *testing.T) {
+ var params []string
+ if stats.value != "" {
+ params = []string{
+ "query", "up",
+ "start", strconv.FormatInt(now-120, 10),
+ "end", strconv.FormatInt(now, 10),
+ "step", "60",
+ "stats", stats.value,
+ }
+ } else {
+ params = []string{
+ "query", "up",
+ "start", strconv.FormatInt(now-120, 10),
+ "end", strconv.FormatInt(now, 10),
+ "step", "60",
+ }
+ }
+
+ var resp *testhelpers.Response
+ if method == "GET" {
+ resp = testhelpers.GET(t, api, "/api/v1/query_range", params...)
+ } else {
+ resp = testhelpers.POST(t, api, "/api/v1/query_range", params...)
+ }
+
+ resp.RequireSuccess().ValidateOpenAPI()
+
+ if stats.expectStats {
+ resp.RequireJSONPathExists("$.data.stats").
+ RequireJSONPathExists("$.data.stats.timings").
+ RequireJSONPathExists("$.data.stats.samples")
+ } else {
+ resp.RequireJSONPathNotExists("$.data.stats")
+ }
+ })
+ }
+ }
+}
diff --git a/web/api/v1/openapi_schemas.go b/web/api/v1/openapi_schemas.go
index 3a567983f4..de39b43e37 100644
--- a/web/api/v1/openapi_schemas.go
+++ b/web/api/v1/openapi_schemas.go
@@ -43,6 +43,7 @@ func (b *OpenAPIBuilder) buildComponents() *v3.Components {
schemas.Set("ParseQueryOutputBody", b.simpleResponseBodySchema())
schemas.Set("ParseQueryPostInputBody", b.parseQueryPostInputBodySchema())
schemas.Set("QueryData", b.queryDataSchema())
+ schemas.Set("QueryStats", b.queryStatsSchema())
schemas.Set("FloatSample", b.floatSampleSchema())
schemas.Set("HistogramSample", b.histogramSampleSchema())
schemas.Set("FloatSeries", b.floatSeriesSchema())
@@ -450,6 +451,7 @@ func (*OpenAPIBuilder) queryDataSchema() *base.SchemaProxy {
},
})},
}))
+ vectorProps.Set("stats", schemaRef("#/components/schemas/QueryStats"))
// Matrix query result.
matrixProps := orderedmap.New[string, *base.SchemaProxy]()
@@ -464,6 +466,7 @@ func (*OpenAPIBuilder) queryDataSchema() *base.SchemaProxy {
},
})},
}))
+ matrixProps.Set("stats", schemaRef("#/components/schemas/QueryStats"))
// Scalar query result.
scalarProps := orderedmap.New[string, *base.SchemaProxy]()
@@ -480,6 +483,7 @@ func (*OpenAPIBuilder) queryDataSchema() *base.SchemaProxy {
MinItems: int64Ptr(2),
MaxItems: int64Ptr(2),
}))
+ scalarProps.Set("stats", schemaRef("#/components/schemas/QueryStats"))
// String query result.
stringResultProps := orderedmap.New[string, *base.SchemaProxy]()
@@ -491,6 +495,7 @@ func (*OpenAPIBuilder) queryDataSchema() *base.SchemaProxy {
MinItems: int64Ptr(2),
MaxItems: int64Ptr(2),
}))
+ stringResultProps.Set("stats", schemaRef("#/components/schemas/QueryStats"))
return base.CreateSchemaProxy(&base.Schema{
Description: "Query result data. The structure of 'result' depends on 'resultType'.",
@@ -536,6 +541,74 @@ func (*OpenAPIBuilder) queryDataSchema() *base.SchemaProxy {
})
}
+func (*OpenAPIBuilder) queryStatsSchema() *base.SchemaProxy {
+ // Timings object.
+ timingsProps := orderedmap.New[string, *base.SchemaProxy]()
+ timingsProps.Set("evalTotalTime", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Description: "Total evaluation time in seconds.",
+ }))
+ timingsProps.Set("resultSortTime", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Description: "Time spent sorting results in seconds.",
+ }))
+ timingsProps.Set("queryPreparationTime", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Description: "Query preparation time in seconds.",
+ }))
+ timingsProps.Set("innerEvalTime", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Description: "Inner evaluation time in seconds.",
+ }))
+ timingsProps.Set("execQueueTime", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Description: "Execution queue wait time in seconds.",
+ }))
+ timingsProps.Set("execTotalTime", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Description: "Total execution time in seconds.",
+ }))
+
+ // Samples object.
+ samplesProps := orderedmap.New[string, *base.SchemaProxy]()
+ samplesProps.Set("totalQueryableSamples", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"integer"},
+ Description: "Total number of samples that were queryable.",
+ }))
+ samplesProps.Set("peakSamples", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"integer"},
+ Description: "Peak number of samples in memory.",
+ }))
+ samplesProps.Set("totalQueryableSamplesPerStep", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Total queryable samples per step (only included with stats=all).",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Timestamp and sample count as [timestamp, count].",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}})},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ })},
+ }))
+
+ // Main stats object.
+ statsProps := orderedmap.New[string, *base.SchemaProxy]()
+ statsProps.Set("timings", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Properties: timingsProps,
+ }))
+ statsProps.Set("samples", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Properties: samplesProps,
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Query execution statistics (included when the stats query parameter is provided).",
+ Properties: statsProps,
+ })
+}
+
func (*OpenAPIBuilder) queryPostInputBodySchema() *base.SchemaProxy {
props := orderedmap.New[string, *base.SchemaProxy]()
props.Set("query", stringSchemaWithDescriptionAndExample("Form field: The PromQL query to execute.", "up"))
diff --git a/web/api/v1/testdata/openapi_3.1_golden.yaml b/web/api/v1/testdata/openapi_3.1_golden.yaml
index c69694b530..b1514f209d 100644
--- a/web/api/v1/testdata/openapi_3.1_golden.yaml
+++ b/web/api/v1/testdata/openapi_3.1_golden.yaml
@@ -2843,6 +2843,8 @@ components:
- $ref: '#/components/schemas/FloatSample'
- $ref: '#/components/schemas/HistogramSample'
description: Array of samples (either float or histogram).
+ stats:
+ $ref: '#/components/schemas/QueryStats'
required:
- resultType
- result
@@ -2860,6 +2862,8 @@ components:
- $ref: '#/components/schemas/FloatSeries'
- $ref: '#/components/schemas/HistogramSeries'
description: Array of time series (either float or histogram).
+ stats:
+ $ref: '#/components/schemas/QueryStats'
required:
- resultType
- result
@@ -2879,6 +2883,8 @@ components:
maxItems: 2
minItems: 2
description: Scalar value as [timestamp, stringValue].
+ stats:
+ $ref: '#/components/schemas/QueryStats'
required:
- resultType
- result
@@ -2896,6 +2902,8 @@ components:
maxItems: 2
minItems: 2
description: String value as [timestamp, stringValue].
+ stats:
+ $ref: '#/components/schemas/QueryStats'
required:
- resultType
- result
@@ -2910,6 +2918,50 @@ components:
- 1627845600
- "1"
resultType: vector
+ QueryStats:
+ type: object
+ properties:
+ timings:
+ type: object
+ properties:
+ evalTotalTime:
+ type: number
+ description: Total evaluation time in seconds.
+ resultSortTime:
+ type: number
+ description: Time spent sorting results in seconds.
+ queryPreparationTime:
+ type: number
+ description: Query preparation time in seconds.
+ innerEvalTime:
+ type: number
+ description: Inner evaluation time in seconds.
+ execQueueTime:
+ type: number
+ description: Execution queue wait time in seconds.
+ execTotalTime:
+ type: number
+ description: Total execution time in seconds.
+ samples:
+ type: object
+ properties:
+ totalQueryableSamples:
+ type: integer
+ description: Total number of samples that were queryable.
+ peakSamples:
+ type: integer
+ description: Peak number of samples in memory.
+ totalQueryableSamplesPerStep:
+ type: array
+ items:
+ type: array
+ items:
+ type: number
+ maxItems: 2
+ minItems: 2
+ description: Timestamp and sample count as [timestamp, count].
+ description: Total queryable samples per step (only included with stats=all).
+ description: Query execution statistics (included when the stats query parameter is provided).
FloatSample:
type: object
properties:
diff --git a/web/api/v1/testdata/openapi_3.2_golden.yaml b/web/api/v1/testdata/openapi_3.2_golden.yaml
index f122408013..fa79fffecc 100644
--- a/web/api/v1/testdata/openapi_3.2_golden.yaml
+++ b/web/api/v1/testdata/openapi_3.2_golden.yaml
@@ -2881,6 +2881,8 @@ components:
- $ref: '#/components/schemas/FloatSample'
- $ref: '#/components/schemas/HistogramSample'
description: Array of samples (either float or histogram).
+ stats:
+ $ref: '#/components/schemas/QueryStats'
required:
- resultType
- result
@@ -2898,6 +2900,8 @@ components:
- $ref: '#/components/schemas/FloatSeries'
- $ref: '#/components/schemas/HistogramSeries'
description: Array of time series (either float or histogram).
+ stats:
+ $ref: '#/components/schemas/QueryStats'
required:
- resultType
- result
@@ -2917,6 +2921,8 @@ components:
maxItems: 2
minItems: 2
description: Scalar value as [timestamp, stringValue].
+ stats:
+ $ref: '#/components/schemas/QueryStats'
required:
- resultType
- result
@@ -2934,6 +2940,8 @@ components:
maxItems: 2
minItems: 2
description: String value as [timestamp, stringValue].
+ stats:
+ $ref: '#/components/schemas/QueryStats'
required:
- resultType
- result
@@ -2948,6 +2956,50 @@ components:
- 1627845600
- "1"
resultType: vector
+ QueryStats:
+ type: object
+ properties:
+ timings:
+ type: object
+ properties:
+ evalTotalTime:
+ type: number
+ description: Total evaluation time in seconds.
+ resultSortTime:
+ type: number
+ description: Time spent sorting results in seconds.
+ queryPreparationTime:
+ type: number
+ description: Query preparation time in seconds.
+ innerEvalTime:
+ type: number
+ description: Inner evaluation time in seconds.
+ execQueueTime:
+ type: number
+ description: Execution queue wait time in seconds.
+ execTotalTime:
+ type: number
+ description: Total execution time in seconds.
+ samples:
+ type: object
+ properties:
+ totalQueryableSamples:
+ type: integer
+ description: Total number of samples that were queryable.
+ peakSamples:
+ type: integer
+ description: Peak number of samples in memory.
+ totalQueryableSamplesPerStep:
+ type: array
+ items:
+ type: array
+ items:
+ type: number
+ maxItems: 2
+ minItems: 2
+ description: Timestamp and sample count as [timestamp, count].
+ description: Total queryable samples per step (only included with stats=all).
+ description: Query execution statistics (included when the stats query parameter is provided).
FloatSample:
type: object
properties:
From 91b0f353b0698b7e5c663383809d250a11cdcfb1 Mon Sep 17 00:00:00 2001
From: bwplotka
Date: Fri, 30 Jan 2026 15:19:35 +0000
Subject: [PATCH 098/165] storage: add BenchmarkFanoutAppenderV2
Signed-off-by: bwplotka
---
storage/fanout_test.go | 36 ++++++++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
diff --git a/storage/fanout_test.go b/storage/fanout_test.go
index 948934d041..da6ec8690a 100644
--- a/storage/fanout_test.go
+++ b/storage/fanout_test.go
@@ -16,6 +16,7 @@ package storage_test
import (
"context"
"errors"
+ "strconv"
"testing"
"github.com/prometheus/common/model"
@@ -563,3 +564,38 @@ func TestFanoutAppenderV2(t *testing.T) {
})
}
}
+
+// Recommended CLI invocation:
+/*
+ export bench=fanoutAppender && go test ./storage/... \
+ -run '^$' -bench '^BenchmarkFanoutAppenderV2' \
+ -benchtime 2s -count 6 -cpu 2 -timeout 999m \
+ | tee ${bench}.txt
+*/
+func BenchmarkFanoutAppenderV2(b *testing.B) {
+ ex := exemplar.Exemplar{Value: 1}
+
+ var series []labels.Labels
+ for i := range 1000 {
+ series = append(series, labels.FromStrings(model.MetricNameLabel, "metric1", "i", strconv.Itoa(i)))
+ }
+ for _, tt := range fanoutAppenderTestCases(nil) {
+ b.Run(tt.name, func(b *testing.B) {
+ f := storage.NewFanout(nil, mockStorage{appV2: tt.primary}, mockStorage{appV2: tt.secondary})
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for b.Loop() {
+ app := f.AppenderV2(b.Context())
+ for _, s := range series {
+ // Purposefully skip errors as we want to benchmark error cases too (majority of the fanout logic).
+ _, _ = app.Append(0, s, 0, 0, 1, nil, nil, storage.AOptions{
+ Exemplars: []exemplar.Exemplar{ex},
+ })
+ }
+ require.NoError(b, app.Rollback())
+
+ }
+ })
+ }
+}
From 6c18bba5c697c2b7febbaf863927a849c3f19fdc Mon Sep 17 00:00:00 2001
From: bwplotka
Date: Fri, 30 Jan 2026 15:54:52 +0000
Subject: [PATCH 099/165] Revert "storage: add BenchmarkFanoutAppenderV2"
This reverts commit 91b0f353b0698b7e5c663383809d250a11cdcfb1.
---
storage/fanout_test.go | 36 ------------------------------------
1 file changed, 36 deletions(-)
diff --git a/storage/fanout_test.go b/storage/fanout_test.go
index da6ec8690a..948934d041 100644
--- a/storage/fanout_test.go
+++ b/storage/fanout_test.go
@@ -16,7 +16,6 @@ package storage_test
import (
"context"
"errors"
- "strconv"
"testing"
"github.com/prometheus/common/model"
@@ -564,38 +563,3 @@ func TestFanoutAppenderV2(t *testing.T) {
})
}
}
-
-// Recommended CLI invocation:
-/*
- export bench=fanoutAppender && go test ./storage/... \
- -run '^$' -bench '^BenchmarkFanoutAppenderV2' \
- -benchtime 2s -count 6 -cpu 2 -timeout 999m \
- | tee ${bench}.txt
-*/
-func BenchmarkFanoutAppenderV2(b *testing.B) {
- ex := exemplar.Exemplar{Value: 1}
-
- var series []labels.Labels
- for i := range 1000 {
- series = append(series, labels.FromStrings(model.MetricNameLabel, "metric1", "i", strconv.Itoa(i)))
- }
- for _, tt := range fanoutAppenderTestCases(nil) {
- b.Run(tt.name, func(b *testing.B) {
- f := storage.NewFanout(nil, mockStorage{appV2: tt.primary}, mockStorage{appV2: tt.secondary})
-
- b.ReportAllocs()
- b.ResetTimer()
- for b.Loop() {
- app := f.AppenderV2(b.Context())
- for _, s := range series {
- // Purposefully skip errors as we want to benchmark error cases too (majority of the fanout logic).
- _, _ = app.Append(0, s, 0, 0, 1, nil, nil, storage.AOptions{
- Exemplars: []exemplar.Exemplar{ex},
- })
- }
- require.NoError(b, app.Rollback())
-
- }
- })
- }
-}
From d9db76631d31d89a90f8142d37d4e36b5b9a0b8d Mon Sep 17 00:00:00 2001
From: Arve Knudsen
Date: Sun, 1 Feb 2026 16:52:26 +0100
Subject: [PATCH 100/165] tsdb: fix flaky TestWaitForPendingReadersInTimeRange
tests (#17985)
The tests were flaky because they used hard-coded time.After(550ms)
waits, which had only 50ms margin over WaitForPendingReadersInTimeRange's
500ms poll interval. On slow CI runners, this margin wasn't reliable.
Use synctest for deterministic time control:
- Wrap test logic in synctest.Test() to use fake time
- Use synctest.Wait() to let goroutines reach dormant state
- Use time.Sleep() to advance fake time past the poll interval
- No more timing-dependent assertions
This makes the tests both reliable and ~60x faster (0.05s vs 3s).
Fixes both TestWaitForPendingReadersInTimeRange and
TestWaitForPendingReadersInTimeRange_AppenderV2.
Signed-off-by: Arve Knudsen
---
tsdb/head_append_v2_test.go | 39 +++++++++++++++++++++++++++----------
tsdb/head_test.go | 39 +++++++++++++++++++++++++++----------
2 files changed, 58 insertions(+), 20 deletions(-)
diff --git a/tsdb/head_append_v2_test.go b/tsdb/head_append_v2_test.go
index 20401c16fe..6bb88bf16e 100644
--- a/tsdb/head_append_v2_test.go
+++ b/tsdb/head_append_v2_test.go
@@ -53,6 +53,7 @@ import (
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
"github.com/prometheus/prometheus/util/testutil"
+ "github.com/prometheus/prometheus/util/testutil/synctest"
)
// TODO(bwplotka): Ensure non-ported tests are not deleted from db_test.go when removing AppenderV1 flow (#17632),
@@ -1625,17 +1626,35 @@ func TestWaitForPendingReadersInTimeRange_AppenderV2(t *testing.T) {
}
for _, c := range cases {
t.Run(fmt.Sprintf("mint=%d,maxt=%d,shouldWait=%t", c.mint, c.maxt, c.shouldWait), func(t *testing.T) {
+ // checkWaiting verifies WaitForPendingReadersInTimeRange behavior using synctest
+ // for deterministic time control. The function should block while an overlapping
+ // querier is open and return immediately when there's no overlap.
checkWaiting := func(cl io.Closer) {
- var waitOver atomic.Bool
- go func() {
- db.head.WaitForPendingReadersInTimeRange(truncMint, truncMaxt)
- waitOver.Store(true)
- }()
- <-time.After(550 * time.Millisecond)
- require.Equal(t, !c.shouldWait, waitOver.Load())
- require.NoError(t, cl.Close())
- <-time.After(550 * time.Millisecond)
- require.True(t, waitOver.Load())
+ synctest.Test(t, func(t *testing.T) {
+ var waitOver atomic.Bool
+ go func() {
+ db.head.WaitForPendingReadersInTimeRange(truncMint, truncMaxt)
+ waitOver.Store(true)
+ }()
+
+ // Wait for goroutine to either complete (no overlap) or block on Sleep (overlap).
+ synctest.Wait()
+
+ if c.shouldWait {
+ require.False(t, waitOver.Load(),
+ "WaitForPendingReadersInTimeRange should block while overlapping querier is open")
+ require.NoError(t, cl.Close())
+ // Advance fake time past the 500ms poll interval, then let goroutine process.
+ time.Sleep(time.Second)
+ synctest.Wait()
+ require.True(t, waitOver.Load(),
+ "WaitForPendingReadersInTimeRange should complete after querier is closed")
+ } else {
+ require.True(t, waitOver.Load(),
+ "WaitForPendingReadersInTimeRange should return immediately when no overlap")
+ require.NoError(t, cl.Close())
+ }
+ })
}
q, err := db.Querier(c.mint, c.maxt)
diff --git a/tsdb/head_test.go b/tsdb/head_test.go
index 493f938860..aee61602ff 100644
--- a/tsdb/head_test.go
+++ b/tsdb/head_test.go
@@ -56,6 +56,7 @@ import (
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
"github.com/prometheus/prometheus/util/testutil"
+ "github.com/prometheus/prometheus/util/testutil/synctest"
)
// newTestHeadDefaultOptions returns the HeadOptions that should be used by default in unit tests.
@@ -3861,17 +3862,35 @@ func TestWaitForPendingReadersInTimeRange(t *testing.T) {
}
for _, c := range cases {
t.Run(fmt.Sprintf("mint=%d,maxt=%d,shouldWait=%t", c.mint, c.maxt, c.shouldWait), func(t *testing.T) {
+ // checkWaiting verifies WaitForPendingReadersInTimeRange behavior using synctest
+ // for deterministic time control. The function should block while an overlapping
+ // querier is open and return immediately when there's no overlap.
checkWaiting := func(cl io.Closer) {
- var waitOver atomic.Bool
- go func() {
- db.head.WaitForPendingReadersInTimeRange(truncMint, truncMaxt)
- waitOver.Store(true)
- }()
- <-time.After(550 * time.Millisecond)
- require.Equal(t, !c.shouldWait, waitOver.Load())
- require.NoError(t, cl.Close())
- <-time.After(550 * time.Millisecond)
- require.True(t, waitOver.Load())
+ synctest.Test(t, func(t *testing.T) {
+ var waitOver atomic.Bool
+ go func() {
+ db.head.WaitForPendingReadersInTimeRange(truncMint, truncMaxt)
+ waitOver.Store(true)
+ }()
+
+ // Wait for goroutine to either complete (no overlap) or block on Sleep (overlap).
+ synctest.Wait()
+
+ if c.shouldWait {
+ require.False(t, waitOver.Load(),
+ "WaitForPendingReadersInTimeRange should block while overlapping querier is open")
+ require.NoError(t, cl.Close())
+ // Advance fake time past the 500ms poll interval, then let goroutine process.
+ time.Sleep(time.Second)
+ synctest.Wait()
+ require.True(t, waitOver.Load(),
+ "WaitForPendingReadersInTimeRange should complete after querier is closed")
+ } else {
+ require.True(t, waitOver.Load(),
+ "WaitForPendingReadersInTimeRange should return immediately when no overlap")
+ require.NoError(t, cl.Close())
+ }
+ })
}
q, err := db.Querier(c.mint, c.maxt)
From 9657c23c374db182dfefc1d4eb007fa44d4d711a Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Mon, 2 Feb 2026 07:04:30 +0000
Subject: [PATCH 101/165] storage: optimized fanoutAppenderV2 (#17976)
* storage: add BenchmarkFanoutAppenderV2
Signed-off-by: bwplotka
* fix: optimized fanoutAppenderV2
Signed-off-by: bwplotka
* optimized more
Signed-off-by: bwplotka
---------
Signed-off-by: bwplotka
---
storage/fanout.go | 16 ++++----
storage/fanout_test.go | 39 +++++++++++++++++++
storage/interface_append.go | 35 +++++++++++++----
util/teststorage/appender.go | 74 +++++++++++++++++++++---------------
4 files changed, 119 insertions(+), 45 deletions(-)
diff --git a/storage/fanout.go b/storage/fanout.go
index 9baa31d9af..21f5f715e4 100644
--- a/storage/fanout.go
+++ b/storage/fanout.go
@@ -300,20 +300,22 @@ type fanoutAppenderV2 struct {
}
func (f *fanoutAppenderV2) Append(ref SeriesRef, l labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts AOptions) (SeriesRef, error) {
+ var partialErr *AppendPartialError
+
ref, err := f.primary.Append(ref, l, st, t, v, h, fh, opts)
- var partialErr AppendPartialError
- if partialErr.Handle(err) != nil {
+ partialErr, err = partialErr.Handle(err)
+ if err != nil {
return ref, err
}
for _, appender := range f.secondaries {
- if _, err := appender.Append(ref, l, st, t, v, h, fh, opts); err != nil {
- if partialErr.Handle(err) != nil {
- return ref, err
- }
+ _, serr := appender.Append(ref, l, st, t, v, h, fh, opts)
+ partialErr, serr = partialErr.Handle(serr)
+ if serr != nil {
+ return ref, serr
}
}
- return ref, partialErr.ErrOrNil()
+ return ref, partialErr.ToError()
}
func (f *fanoutAppenderV2) Commit() (err error) {
diff --git a/storage/fanout_test.go b/storage/fanout_test.go
index 948934d041..027511aa3a 100644
--- a/storage/fanout_test.go
+++ b/storage/fanout_test.go
@@ -16,6 +16,7 @@ package storage_test
import (
"context"
"errors"
+ "strconv"
"testing"
"github.com/prometheus/common/model"
@@ -563,3 +564,41 @@ func TestFanoutAppenderV2(t *testing.T) {
})
}
}
+
+// Recommended CLI invocation:
+/*
+ export bench=fanoutAppender && go test ./storage/... \
+ -run '^$' -bench '^BenchmarkFanoutAppenderV2' \
+ -benchtime 2s -count 6 -cpu 2 -timeout 999m \
+ | tee ${bench}.txt
+*/
+func BenchmarkFanoutAppenderV2(b *testing.B) {
+ ex := []exemplar.Exemplar{{Value: 1}}
+
+ var series []labels.Labels
+ for i := range 1000 {
+ series = append(series, labels.FromStrings(model.MetricNameLabel, "metric1", "i", strconv.Itoa(i)))
+ }
+ for _, tt := range fanoutAppenderTestCases(nil) {
+ // Turn our mock appender into ~noop for no allocs.
+ tt.primary.SkipRecording(true)
+ tt.secondary.SkipRecording(true)
+
+ b.Run(tt.name, func(b *testing.B) {
+ f := storage.NewFanout(nil, mockStorage{appV2: tt.primary}, mockStorage{appV2: tt.secondary})
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for b.Loop() {
+ app := f.AppenderV2(b.Context())
+ for _, s := range series {
+ // Purposefully skip errors as we want to benchmark error cases too (majority of the fanout logic).
+ _, _ = app.Append(0, s, 0, 0, 1, nil, nil, storage.AOptions{
+ Exemplars: ex,
+ })
+ }
+ require.NoError(b, app.Rollback())
+ }
+ })
+ }
+}
diff --git a/storage/interface_append.go b/storage/interface_append.go
index aa4ae84152..b5ee4b49c8 100644
--- a/storage/interface_append.go
+++ b/storage/interface_append.go
@@ -90,6 +90,10 @@ type AppendPartialError struct {
// Error returns combined error string.
func (e *AppendPartialError) Error() string {
+ if e == nil {
+ return ""
+ }
+
errs := errors.Join(e.ExemplarErrors...)
if errs == nil {
return ""
@@ -97,29 +101,46 @@ func (e *AppendPartialError) Error() string {
return errs.Error()
}
-// ErrOrNil returns AppendPartialError as error, returning nil
+// ToError returns AppendPartialError as error, returning nil
// if there are no errors.
-func (e *AppendPartialError) ErrOrNil() error {
- if len(e.ExemplarErrors) == 0 {
+func (e *AppendPartialError) ToError() error {
+ if e == nil {
return nil
}
return e
}
+// Is implements method that's expected by errors.Is.
+func (*AppendPartialError) Is(target error) bool {
+ // This does not need to handle wrapped errors as AppendPartialError.Is should be used
+ // via errors.Is.
+ _, ok := target.(*AppendPartialError)
+ return ok
+}
+
// Handle handles the given err that may be an AppendPartialError.
// If the err is nil or not an AppendPartialError it returns err.
// Otherwise, partial errors are aggregated.
-func (e *AppendPartialError) Handle(err error) error {
+func (e *AppendPartialError) Handle(err error) (*AppendPartialError, error) {
if err == nil {
- return nil
+ return e, nil
}
+ // Fast, alloc-free path first for non-partial error cases.
+ if !errors.Is(err, e) {
+ return e, err
+ }
var pErr *AppendPartialError
if !errors.As(err, &pErr) {
- return err
+ return e, err
+ }
+
+ if e == nil {
+ // Lazy allocation.
+ e = &AppendPartialError{}
}
e.ExemplarErrors = append(e.ExemplarErrors, pErr.ExemplarErrors...)
- return nil
+ return e, nil
}
var _ error = &AppendPartialError{}
diff --git a/util/teststorage/appender.go b/util/teststorage/appender.go
index dc0825f98f..d2d550be2e 100644
--- a/util/teststorage/appender.go
+++ b/util/teststorage/appender.go
@@ -185,6 +185,7 @@ type Appendable struct {
appendErrFn func(ls labels.Labels) error // If non-nil, inject appender error on every Append, AppendHistogram and ST zero calls.
appendExemplarsError error // If non-nil, inject exemplar error.
commitErr error // If non-nil, inject commit error.
+ skipRecording bool // If true, Appendable won't record samples, useful for benchmarks.
mtx sync.Mutex
openAppenders atomic.Int32 // Guard against multi-appender use.
@@ -222,6 +223,13 @@ func (a *Appendable) WithErrs(appendErrFn func(ls labels.Labels) error, appendEx
return a
}
+// SkipRecording enables or disables recording appended samples.
+// If skipped, Appendable allocs less, but Result*() methods will give always empty results. This is useful for benchmarking.
+func (a *Appendable) SkipRecording(skipRecording bool) *Appendable {
+ a.skipRecording = skipRecording
+ return a
+}
+
// PendingSamples returns pending samples (samples appended without commit).
func (a *Appendable) PendingSamples() []Sample {
a.mtx.Lock()
@@ -335,8 +343,10 @@ func (a *baseAppender) Commit() error {
}
a.a.mtx.Lock()
- a.a.resultSamples = append(a.a.resultSamples, a.a.pendingSamples...)
- a.a.pendingSamples = a.a.pendingSamples[:0]
+ if !a.a.skipRecording {
+ a.a.resultSamples = append(a.a.resultSamples, a.a.pendingSamples...)
+ a.a.pendingSamples = a.a.pendingSamples[:0]
+ }
a.err = errClosedAppender
a.a.mtx.Unlock()
@@ -353,8 +363,10 @@ func (a *baseAppender) Rollback() error {
defer a.a.openAppenders.Dec()
a.a.mtx.Lock()
- a.a.rolledbackSamples = append(a.a.rolledbackSamples, a.a.pendingSamples...)
- a.a.pendingSamples = a.a.pendingSamples[:0]
+ if !a.a.skipRecording {
+ a.a.rolledbackSamples = append(a.a.rolledbackSamples, a.a.pendingSamples...)
+ a.a.pendingSamples = a.a.pendingSamples[:0]
+ }
a.err = errClosedAppender
a.a.mtx.Unlock()
@@ -548,37 +560,37 @@ func (a *appenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64
}
}
- var (
- es []exemplar.Exemplar
- partialErr error
- )
+ var partialErr error
+ if !a.a.skipRecording {
+ var es []exemplar.Exemplar
- if len(opts.Exemplars) > 0 {
- if a.a.appendExemplarsError != nil {
- var exErrs []error
- for range opts.Exemplars {
- exErrs = append(exErrs, a.a.appendExemplarsError)
+ if len(opts.Exemplars) > 0 {
+ if a.a.appendExemplarsError != nil {
+ var exErrs []error
+ for range opts.Exemplars {
+ exErrs = append(exErrs, a.a.appendExemplarsError)
+ }
+ if len(exErrs) > 0 {
+ partialErr = &storage.AppendPartialError{ExemplarErrors: exErrs}
+ }
+ } else {
+ // As per AppenderV2 interface, opts.Exemplar slice is unsafe for reuse.
+ es = make([]exemplar.Exemplar, len(opts.Exemplars))
+ copy(es, opts.Exemplars)
}
- if len(exErrs) > 0 {
- partialErr = &storage.AppendPartialError{ExemplarErrors: exErrs}
- }
- } else {
- // As per AppenderV2 interface, opts.Exemplar slice is unsafe for reuse.
- es = make([]exemplar.Exemplar, len(opts.Exemplars))
- copy(es, opts.Exemplars)
}
- }
- a.a.mtx.Lock()
- a.a.pendingSamples = append(a.a.pendingSamples, Sample{
- MF: opts.MetricFamilyName,
- M: opts.Metadata,
- L: ls,
- ST: st, T: t,
- V: v, H: h, FH: fh,
- ES: es,
- })
- a.a.mtx.Unlock()
+ a.a.mtx.Lock()
+ a.a.pendingSamples = append(a.a.pendingSamples, Sample{
+ MF: opts.MetricFamilyName,
+ M: opts.Metadata,
+ L: ls,
+ ST: st, T: t,
+ V: v, H: h, FH: fh,
+ ES: es,
+ })
+ a.a.mtx.Unlock()
+ }
if a.next != nil {
ref, err = a.next.Append(ref, ls, st, t, v, h, fh, opts)
From 0495130e06d18503d7a0f2eb10f5d6ff6c5cb1a9 Mon Sep 17 00:00:00 2001
From: Abu
Date: Mon, 2 Feb 2026 14:32:40 +0530
Subject: [PATCH 102/165] tests: improve error handling in main_test.go
- Add t.Helper() to getCurrentGaugeValuesFor helper function for better
error attribution in test failures
- Add require.NoError checks for os.WriteFile calls in TestRuntimeGOGCConfig
and TestHeadCompactionWhileScraping to catch file write failures
- Strengthen error handling in TestDocumentation to assert command success
rather than silently continuing on failure
- Improve log message clarity in TestAgentSuccessfulStartup to accurately
describe early exit scenario
These changes improve test reliability and follow Go testing best practices.
Signed-off-by: Abu
---
cmd/prometheus/main_test.go | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go
index 6765bae900..38dfd3f2da 100644
--- a/cmd/prometheus/main_test.go
+++ b/cmd/prometheus/main_test.go
@@ -395,6 +395,7 @@ func TestTimeMetrics(t *testing.T) {
}
func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames ...string) map[string]float64 {
+ t.Helper()
f, err := reg.Gather()
require.NoError(t, err)
@@ -426,7 +427,7 @@ func TestAgentSuccessfulStartup(t *testing.T) {
go func() { done <- prom.Wait() }()
select {
case err := <-done:
- t.Logf("prometheus agent should be still running: %v", err)
+ t.Logf("prometheus agent exited early: %v", err)
actualExitStatus = prom.ProcessState.ExitCode()
case <-time.After(startupTime):
prom.Process.Kill()
@@ -571,12 +572,7 @@ func TestDocumentation(t *testing.T) {
var stdout bytes.Buffer
cmd.Stdout = &stdout
- if err := cmd.Run(); err != nil {
- var exitError *exec.ExitError
- if errors.As(err, &exitError) && exitError.ExitCode() != 0 {
- fmt.Println("Command failed with non-zero exit code")
- }
- }
+ require.NoError(t, cmd.Run(), "failed to generate CLI documentation via --write-documentation")
generatedContent := strings.ReplaceAll(stdout.String(), filepath.Base(promPath), strings.TrimSuffix(filepath.Base(promPath), ".test"))
@@ -753,7 +749,7 @@ global:
configFile := filepath.Join(tmpDir, "prometheus.yml")
port := testutil.RandomUnprivilegedPort(t)
- os.WriteFile(configFile, []byte(tc.config), 0o777)
+ require.NoError(t, os.WriteFile(configFile, []byte(tc.config), 0o777))
prom := prometheusCommandWithLogging(
t,
configFile,
@@ -801,7 +797,7 @@ global:
newConfig := `
runtime:
gogc: 99`
- os.WriteFile(configFile, []byte(newConfig), 0o777)
+ require.NoError(t, os.WriteFile(configFile, []byte(newConfig), 0o777))
reloadPrometheusConfig(t, reloadURL)
ensureGOGCValue(99.0)
})
@@ -834,7 +830,7 @@ scrape_configs:
static_configs:
- targets: ['localhost:%d']
`, port, port)
- os.WriteFile(configFile, []byte(config), 0o777)
+ require.NoError(t, os.WriteFile(configFile, []byte(config), 0o777))
prom := prometheusCommandWithLogging(
t,
@@ -995,7 +991,7 @@ func TestRemoteWrite_ReshardingWithoutDeadlock(t *testing.T) {
config := fmt.Sprintf(`
global:
# Using a smaller interval may cause the scrape to time out.
- scrape_interval: 1s
+ scrape_interval: 1s
scrape_configs:
- job_name: 'self'
static_configs:
From eefa6178fb30b907691f8f017f4a2f54498e1be4 Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Mon, 2 Feb 2026 09:13:02 +0000
Subject: [PATCH 103/165] fix: fix rare race on empty head.initialized() vs
head.initTime() (#17963)
* fix: fix rare race on empty head.initized() vs head.initTime()
Relates to https://github.com/prometheus/prometheus/issues/17941
Signed-off-by: bwplotka
* Apply suggestions from code review
Co-authored-by: Owen Williams
Signed-off-by: Bartlomiej Plotka
* addressed comments
Signed-off-by: bwplotka
---------
Signed-off-by: bwplotka
Signed-off-by: Bartlomiej Plotka
Co-authored-by: Owen Williams
---
tsdb/head_append.go | 12 +-
tsdb/head_append_v2_test.go | 198 +------------------
tsdb/head_test.go | 368 ++++++++++++++++++++----------------
3 files changed, 221 insertions(+), 357 deletions(-)
diff --git a/tsdb/head_append.go b/tsdb/head_append.go
index c171079509..005d20b720 100644
--- a/tsdb/head_append.go
+++ b/tsdb/head_append.go
@@ -19,6 +19,7 @@ import (
"fmt"
"log/slog"
"math"
+ "time"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
@@ -117,10 +118,19 @@ func (a *initAppender) AppendSTZeroSample(ref storage.SeriesRef, lset labels.Lab
// for a completely fresh head with an empty WAL.
func (h *Head) initTime(t int64) {
if !h.minTime.CompareAndSwap(math.MaxInt64, t) {
+ // Concurrent appends that are initializing.
+ // Wait until h.maxTime is swapped to avoid minTime/maxTime races.
+ antiDeadlockTimeout := time.After(500 * time.Millisecond)
+ for h.maxTime.Load() == math.MinInt64 {
+ select {
+ case <-antiDeadlockTimeout:
+ return
+ default:
+ }
+ }
return
}
// Ensure that max time is initialized to at least the min time we just set.
- // Concurrent appenders may already have set it to a higher value.
h.maxTime.CompareAndSwap(math.MinInt64, t)
}
diff --git a/tsdb/head_append_v2_test.go b/tsdb/head_append_v2_test.go
index 6bb88bf16e..082d756e60 100644
--- a/tsdb/head_append_v2_test.go
+++ b/tsdb/head_append_v2_test.go
@@ -37,7 +37,6 @@ import (
dto "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
- "golang.org/x/sync/errgroup"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
@@ -56,207 +55,14 @@ import (
"github.com/prometheus/prometheus/util/testutil/synctest"
)
-// TODO(bwplotka): Ensure non-ported tests are not deleted from db_test.go when removing AppenderV1 flow (#17632),
+// TODO(bwplotka): Ensure non-ported tests are not deleted from head_test.go when removing AppenderV1 flow (#17632),
// for example:
// * TestChunkNotFoundHeadGCRace
// * TestHeadSeriesChunkRace
// * TestHeadLabelValuesWithMatchers
// * TestHeadLabelNamesWithMatchers
// * TestHeadShardedPostings
-
-// TestHeadAppenderV2_HighConcurrencyReadAndWrite generates 1000 series with a step of 15s and fills a whole block with samples,
-// this means in total it generates 4000 chunks because with a step of 15s there are 4 chunks per block per series.
-// While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the
-// returned results are correct.
-func TestHeadAppenderV2_HighConcurrencyReadAndWrite(t *testing.T) {
- head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
- defer func() {
- require.NoError(t, head.Close())
- }()
-
- seriesCnt := 1000
- readConcurrency := 2
- writeConcurrency := 10
- startTs := uint64(DefaultBlockDuration) // start at the second block relative to the unix epoch.
- qryRange := uint64(5 * time.Minute.Milliseconds())
- step := uint64(15 * time.Second / time.Millisecond)
- endTs := startTs + uint64(DefaultBlockDuration)
-
- labelSets := make([]labels.Labels, seriesCnt)
- for i := range seriesCnt {
- labelSets[i] = labels.FromStrings("seriesId", strconv.Itoa(i))
- }
-
- head.Init(0)
-
- g, ctx := errgroup.WithContext(context.Background())
- whileNotCanceled := func(f func() (bool, error)) error {
- for ctx.Err() == nil {
- cont, err := f()
- if err != nil {
- return err
- }
- if !cont {
- return nil
- }
- }
- return nil
- }
-
- // Create one channel for each write worker, the channels will be used by the coordinator
- // go routine to coordinate which timestamps each write worker has to write.
- writerTsCh := make([]chan uint64, writeConcurrency)
- for writerTsChIdx := range writerTsCh {
- writerTsCh[writerTsChIdx] = make(chan uint64)
- }
-
- // workerReadyWg is used to synchronize the start of the test,
- // we only start the test once all workers signal that they're ready.
- var workerReadyWg sync.WaitGroup
- workerReadyWg.Add(writeConcurrency + readConcurrency)
-
- // Start the write workers.
- for wid := range writeConcurrency {
- // Create copy of workerID to be used by worker routine.
- workerID := wid
-
- g.Go(func() error {
- // The label sets which this worker will write.
- workerLabelSets := labelSets[(seriesCnt/writeConcurrency)*workerID : (seriesCnt/writeConcurrency)*(workerID+1)]
-
- // Signal that this worker is ready.
- workerReadyWg.Done()
-
- return whileNotCanceled(func() (bool, error) {
- ts, ok := <-writerTsCh[workerID]
- if !ok {
- return false, nil
- }
-
- app := head.AppenderV2(ctx)
- for i := range workerLabelSets {
- // We also use the timestamp as the sample value.
- _, err := app.Append(0, workerLabelSets[i], 0, int64(ts), float64(ts), nil, nil, storage.AOptions{})
- if err != nil {
- return false, fmt.Errorf("Error when appending to head: %w", err)
- }
- }
-
- return true, app.Commit()
- })
- })
- }
-
- // queryHead is a helper to query the head for a given time range and labelset.
- queryHead := func(mint, maxt uint64, label labels.Label) (map[string][]chunks.Sample, error) {
- q, err := NewBlockQuerier(head, int64(mint), int64(maxt))
- if err != nil {
- return nil, err
- }
- return query(t, q, labels.MustNewMatcher(labels.MatchEqual, label.Name, label.Value)), nil
- }
-
- // readerTsCh will be used by the coordinator go routine to coordinate which timestamps the reader should read.
- readerTsCh := make(chan uint64)
-
- // Start the read workers.
- for wid := range readConcurrency {
- // Create copy of threadID to be used by worker routine.
- workerID := wid
-
- g.Go(func() error {
- querySeriesRef := (seriesCnt / readConcurrency) * workerID
-
- // Signal that this worker is ready.
- workerReadyWg.Done()
-
- return whileNotCanceled(func() (bool, error) {
- ts, ok := <-readerTsCh
- if !ok {
- return false, nil
- }
-
- querySeriesRef = (querySeriesRef + 1) % seriesCnt
- lbls := labelSets[querySeriesRef]
- // lbls has a single entry; extract it so we can run a query.
- var lbl labels.Label
- lbls.Range(func(l labels.Label) {
- lbl = l
- })
- samples, err := queryHead(ts-qryRange, ts, lbl)
- if err != nil {
- return false, err
- }
-
- if len(samples) != 1 {
- return false, fmt.Errorf("expected 1 series, got %d", len(samples))
- }
-
- series := lbls.String()
- expectSampleCnt := qryRange/step + 1
- if expectSampleCnt != uint64(len(samples[series])) {
- return false, fmt.Errorf("expected %d samples, got %d", expectSampleCnt, len(samples[series]))
- }
-
- for sampleIdx, sample := range samples[series] {
- expectedValue := ts - qryRange + (uint64(sampleIdx) * step)
- if sample.T() != int64(expectedValue) {
- return false, fmt.Errorf("expected sample %d to have ts %d, got %d", sampleIdx, expectedValue, sample.T())
- }
- if sample.F() != float64(expectedValue) {
- return false, fmt.Errorf("expected sample %d to have value %d, got %f", sampleIdx, expectedValue, sample.F())
- }
- }
-
- return true, nil
- })
- })
- }
-
- // Start the coordinator go routine.
- g.Go(func() error {
- currTs := startTs
-
- defer func() {
- // End of the test, close all channels to stop the workers.
- for _, ch := range writerTsCh {
- close(ch)
- }
- close(readerTsCh)
- }()
-
- // Wait until all workers are ready to start the test.
- workerReadyWg.Wait()
- return whileNotCanceled(func() (bool, error) {
- // Send the current timestamp to each of the writers.
- for _, ch := range writerTsCh {
- select {
- case ch <- currTs:
- case <-ctx.Done():
- return false, nil
- }
- }
-
- // Once data for at least has been ingested, send the current timestamp to the readers.
- if currTs > startTs+qryRange {
- select {
- case readerTsCh <- currTs - step:
- case <-ctx.Done():
- return false, nil
- }
- }
-
- currTs += step
- if currTs > endTs {
- return false, nil
- }
-
- return true, nil
- })
- })
-
- require.NoError(t, g.Wait())
-}
+// * TestHead_HighConcurrencyReadAndWrite
func TestHeadAppenderV2_WALMultiRef(t *testing.T) {
head, w := newTestHead(t, 1000, compression.None, false)
diff --git a/tsdb/head_test.go b/tsdb/head_test.go
index aee61602ff..7b8ae0ecbd 100644
--- a/tsdb/head_test.go
+++ b/tsdb/head_test.go
@@ -44,6 +44,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
@@ -471,195 +472,242 @@ func BenchmarkLoadRealWLs(b *testing.B) {
}
}
+// TestHead_InitAppenderRace_ErrOutOfBounds tests against init races with maxTime vs minTime on empty head concurrent appends.
+// See: https://github.com/prometheus/prometheus/pull/17963
+func TestHead_InitAppenderRace_ErrOutOfBounds(t *testing.T) {
+ head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
+ require.NoError(t, head.Init(0))
+
+ ts := timestamp.FromTime(time.Now())
+ appendCycles := 100
+
+ g, ctx := errgroup.WithContext(t.Context())
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ for i := range 100 {
+ g.Go(func() error {
+ appends := 0
+ wg.Wait()
+ for ctx.Err() == nil && appends < appendCycles {
+ appends++
+ app := head.Appender(t.Context())
+ if _, err := app.Append(0, labels.FromStrings("__name__", strconv.Itoa(i)), ts, float64(ts)); err != nil {
+ return fmt.Errorf("error when appending to head: %w", err)
+ }
+ if err := app.Rollback(); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ wg.Done()
+ require.NoError(t, g.Wait())
+}
+
// TestHead_HighConcurrencyReadAndWrite generates 1000 series with a step of 15s and fills a whole block with samples,
// this means in total it generates 4000 chunks because with a step of 15s there are 4 chunks per block per series.
// While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the
// returned results are correct.
func TestHead_HighConcurrencyReadAndWrite(t *testing.T) {
- head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
+ for _, appV2 := range []bool{false, true} {
+ t.Run(fmt.Sprintf("appV2=%v", appV2), func(t *testing.T) {
+ head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
- seriesCnt := 1000
- readConcurrency := 2
- writeConcurrency := 10
- startTs := uint64(DefaultBlockDuration) // start at the second block relative to the unix epoch.
- qryRange := uint64(5 * time.Minute.Milliseconds())
- step := uint64(15 * time.Second / time.Millisecond)
- endTs := startTs + uint64(DefaultBlockDuration)
+ seriesCnt := 1000
+ readConcurrency := 2
+ writeConcurrency := 10
+ startTs := uint64(DefaultBlockDuration) // Start at the second block relative to the unix epoch.
+ qryRange := uint64(5 * time.Minute.Milliseconds())
+ step := uint64(15 * time.Second / time.Millisecond)
+ endTs := startTs + uint64(DefaultBlockDuration)
- labelSets := make([]labels.Labels, seriesCnt)
- for i := range seriesCnt {
- labelSets[i] = labels.FromStrings("seriesId", strconv.Itoa(i))
- }
-
- head.Init(0)
-
- g, ctx := errgroup.WithContext(context.Background())
- whileNotCanceled := func(f func() (bool, error)) error {
- for ctx.Err() == nil {
- cont, err := f()
- if err != nil {
- return err
+ labelSets := make([]labels.Labels, seriesCnt)
+ for i := range seriesCnt {
+ labelSets[i] = labels.FromStrings("seriesId", strconv.Itoa(i))
}
- if !cont {
+ require.NoError(t, head.Init(0))
+
+ g, ctx := errgroup.WithContext(t.Context())
+ whileNotCanceled := func(f func() (bool, error)) error {
+ for ctx.Err() == nil {
+ cont, err := f()
+ if err != nil {
+ return err
+ }
+ if !cont {
+ return nil
+ }
+ }
return nil
}
- }
- return nil
- }
- // Create one channel for each write worker, the channels will be used by the coordinator
- // go routine to coordinate which timestamps each write worker has to write.
- writerTsCh := make([]chan uint64, writeConcurrency)
- for writerTsChIdx := range writerTsCh {
- writerTsCh[writerTsChIdx] = make(chan uint64)
- }
+ // Create one channel for each write worker, the channels will be used by the coordinator
+ // go routine to coordinate which timestamps each write worker has to write.
+ writerTsCh := make([]chan uint64, writeConcurrency)
+ for writerTsChIdx := range writerTsCh {
+ writerTsCh[writerTsChIdx] = make(chan uint64)
+ }
- // workerReadyWg is used to synchronize the start of the test,
- // we only start the test once all workers signal that they're ready.
- var workerReadyWg sync.WaitGroup
- workerReadyWg.Add(writeConcurrency + readConcurrency)
+ // workerReadyWg is used to synchronize the start of the test,
+ // we only start the test once all workers signal that they're ready.
+ var workerReadyWg sync.WaitGroup
+ workerReadyWg.Add(writeConcurrency + readConcurrency)
- // Start the write workers.
- for wid := range writeConcurrency {
- // Create copy of workerID to be used by worker routine.
- workerID := wid
+ // Start the write workers.
+ for wid := range writeConcurrency {
+ // Create copy of workerID to be used by worker routine.
+ workerID := wid
- g.Go(func() error {
- // The label sets which this worker will write.
- workerLabelSets := labelSets[(seriesCnt/writeConcurrency)*workerID : (seriesCnt/writeConcurrency)*(workerID+1)]
+ g.Go(func() error {
+ // The label sets which this worker will write.
+ workerLabelSets := labelSets[(seriesCnt/writeConcurrency)*workerID : (seriesCnt/writeConcurrency)*(workerID+1)]
- // Signal that this worker is ready.
- workerReadyWg.Done()
+ // Signal that this worker is ready.
+ workerReadyWg.Done()
- return whileNotCanceled(func() (bool, error) {
- ts, ok := <-writerTsCh[workerID]
- if !ok {
- return false, nil
- }
+ return whileNotCanceled(func() (bool, error) {
+ ts, ok := <-writerTsCh[workerID]
+ if !ok {
+ return false, nil
+ }
- app := head.Appender(ctx)
- for i := range workerLabelSets {
- // We also use the timestamp as the sample value.
- _, err := app.Append(0, workerLabelSets[i], int64(ts), float64(ts))
- if err != nil {
- return false, fmt.Errorf("Error when appending to head: %w", err)
- }
- }
+ if appV2 {
+ app := head.AppenderV2(ctx)
+ for i := range workerLabelSets {
+ // We also use the timestamp as the sample value.
+ if _, err := app.Append(0, workerLabelSets[i], 0, int64(ts), float64(ts), nil, nil, storage.AOptions{}); err != nil {
+ return false, fmt.Errorf("error when appending (V2) to head: %w", err)
+ }
+ }
+ return true, app.Commit()
+ }
- return true, app.Commit()
- })
- })
- }
-
- // queryHead is a helper to query the head for a given time range and labelset.
- queryHead := func(mint, maxt uint64, label labels.Label) (map[string][]chunks.Sample, error) {
- q, err := NewBlockQuerier(head, int64(mint), int64(maxt))
- if err != nil {
- return nil, err
- }
- return query(t, q, labels.MustNewMatcher(labels.MatchEqual, label.Name, label.Value)), nil
- }
-
- // readerTsCh will be used by the coordinator go routine to coordinate which timestamps the reader should read.
- readerTsCh := make(chan uint64)
-
- // Start the read workers.
- for wid := range readConcurrency {
- // Create copy of threadID to be used by worker routine.
- workerID := wid
-
- g.Go(func() error {
- querySeriesRef := (seriesCnt / readConcurrency) * workerID
-
- // Signal that this worker is ready.
- workerReadyWg.Done()
-
- return whileNotCanceled(func() (bool, error) {
- ts, ok := <-readerTsCh
- if !ok {
- return false, nil
- }
-
- querySeriesRef = (querySeriesRef + 1) % seriesCnt
- lbls := labelSets[querySeriesRef]
- // lbls has a single entry; extract it so we can run a query.
- var lbl labels.Label
- lbls.Range(func(l labels.Label) {
- lbl = l
+ app := head.Appender(ctx)
+ for i := range workerLabelSets {
+ // We also use the timestamp as the sample value.
+ if _, err := app.Append(0, workerLabelSets[i], int64(ts), float64(ts)); err != nil {
+ return false, fmt.Errorf("error when appending to head: %w", err)
+ }
+ }
+ return true, app.Commit()
+ })
})
- samples, err := queryHead(ts-qryRange, ts, lbl)
+ }
+
+ // queryHead is a helper to query the head for a given time range and labelset.
+ queryHead := func(mint, maxt uint64, label labels.Label) (map[string][]chunks.Sample, error) {
+ q, err := NewBlockQuerier(head, int64(mint), int64(maxt))
if err != nil {
- return false, err
+ return nil, err
}
+ return query(t, q, labels.MustNewMatcher(labels.MatchEqual, label.Name, label.Value)), nil
+ }
- if len(samples) != 1 {
- return false, fmt.Errorf("expected 1 series, got %d", len(samples))
- }
+ // readerTsCh will be used by the coordinator go routine to coordinate which timestamps the reader should read.
+ readerTsCh := make(chan uint64)
- series := lbls.String()
- expectSampleCnt := qryRange/step + 1
- if expectSampleCnt != uint64(len(samples[series])) {
- return false, fmt.Errorf("expected %d samples, got %d", expectSampleCnt, len(samples[series]))
- }
+ // Start the read workers.
+ for wid := range readConcurrency {
+ // Create copy of threadID to be used by worker routine.
+ workerID := wid
- for sampleIdx, sample := range samples[series] {
- expectedValue := ts - qryRange + (uint64(sampleIdx) * step)
- if sample.T() != int64(expectedValue) {
- return false, fmt.Errorf("expected sample %d to have ts %d, got %d", sampleIdx, expectedValue, sample.T())
+ g.Go(func() error {
+ querySeriesRef := (seriesCnt / readConcurrency) * workerID
+
+ // Signal that this worker is ready.
+ workerReadyWg.Done()
+
+ return whileNotCanceled(func() (bool, error) {
+ ts, ok := <-readerTsCh
+ if !ok {
+ return false, nil
+ }
+
+ querySeriesRef = (querySeriesRef + 1) % seriesCnt
+ lbls := labelSets[querySeriesRef]
+ // lbls has a single entry; extract it so we can run a query.
+ var lbl labels.Label
+ lbls.Range(func(l labels.Label) {
+ lbl = l
+ })
+ samples, err := queryHead(ts-qryRange, ts, lbl)
+ if err != nil {
+ return false, err
+ }
+
+ if len(samples) != 1 {
+ return false, fmt.Errorf("expected 1 series, got %d", len(samples))
+ }
+
+ series := lbls.String()
+ expectSampleCnt := qryRange/step + 1
+ if expectSampleCnt != uint64(len(samples[series])) {
+ return false, fmt.Errorf("expected %d samples, got %d", expectSampleCnt, len(samples[series]))
+ }
+
+ for sampleIdx, sample := range samples[series] {
+ expectedValue := ts - qryRange + (uint64(sampleIdx) * step)
+ if sample.T() != int64(expectedValue) {
+ return false, fmt.Errorf("expected sample %d to have ts %d, got %d", sampleIdx, expectedValue, sample.T())
+ }
+ if sample.F() != float64(expectedValue) {
+ return false, fmt.Errorf("expected sample %d to have value %d, got %f", sampleIdx, expectedValue, sample.F())
+ }
+ }
+
+ return true, nil
+ })
+ })
+ }
+
+ // Start the coordinator go routine.
+ g.Go(func() error {
+ currTs := startTs
+
+ defer func() {
+ // End of the test, close all channels to stop the workers.
+ for _, ch := range writerTsCh {
+ close(ch)
}
- if sample.F() != float64(expectedValue) {
- return false, fmt.Errorf("expected sample %d to have value %d, got %f", sampleIdx, expectedValue, sample.F())
- }
- }
+ close(readerTsCh)
+ }()
- return true, nil
+ // Wait until all workers are ready to start the test.
+ workerReadyWg.Wait()
+
+ return whileNotCanceled(func() (bool, error) {
+ // Send the current timestamp to each of the writers.
+ for _, ch := range writerTsCh {
+ select {
+ case ch <- currTs:
+ case <-ctx.Done():
+ return false, nil
+ }
+ }
+
+ // Once data for at least has been ingested, send the current timestamp to the readers.
+ if currTs > startTs+qryRange {
+ select {
+ case readerTsCh <- currTs - step:
+ case <-ctx.Done():
+ return false, nil
+ }
+ }
+
+ currTs += step
+ if currTs > endTs {
+ return false, nil
+ }
+
+ return true, nil
+ })
})
+
+ require.NoError(t, g.Wait())
})
}
-
- // Start the coordinator go routine.
- g.Go(func() error {
- currTs := startTs
-
- defer func() {
- // End of the test, close all channels to stop the workers.
- for _, ch := range writerTsCh {
- close(ch)
- }
- close(readerTsCh)
- }()
-
- // Wait until all workers are ready to start the test.
- workerReadyWg.Wait()
- return whileNotCanceled(func() (bool, error) {
- // Send the current timestamp to each of the writers.
- for _, ch := range writerTsCh {
- select {
- case ch <- currTs:
- case <-ctx.Done():
- return false, nil
- }
- }
-
- // Once data for at least has been ingested, send the current timestamp to the readers.
- if currTs > startTs+qryRange {
- select {
- case readerTsCh <- currTs - step:
- case <-ctx.Done():
- return false, nil
- }
- }
-
- currTs += step
- if currTs > endTs {
- return false, nil
- }
-
- return true, nil
- })
- })
-
- require.NoError(t, g.Wait())
}
func TestHead_ReadWAL(t *testing.T) {
From 55193c30585147e53f96033a888fb9bd4551f195 Mon Sep 17 00:00:00 2001
From: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
Date: Mon, 2 Feb 2026 11:54:35 +0100
Subject: [PATCH 104/165] promql: fix smoothed interpolation across counter
resets
Fix incorrect interpolation when counter resets occur in smoothed range
selector evaluation. Previously, the asymmetric handling of counter
resets (y1=0 on left edge, y2+=y1 on right edge) produced wrong values.
Now uniformly set y1=0 when a counter reset is detected, correctly
modeling the counter as starting from 0 post-reset.
This fixes rate calculations across counter resets. For example,
rate(metric[10s] smoothed) where metric goes from 100 to 10 (a reset)
now correctly computes 0.666... by treating the counter as resetting
to 0 rather than producing inflated values from the old behavior.
Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
---
promql/engine.go | 2 +-
promql/functions.go | 17 ++++++-----------
promql/functions_internal_test.go | 10 +++++-----
.../promqltest/testdata/extended_vectors.test | 8 ++++++++
4 files changed, 20 insertions(+), 17 deletions(-)
diff --git a/promql/engine.go b/promql/engine.go
index b609dc4f0a..afe82bc38f 100644
--- a/promql/engine.go
+++ b/promql/engine.go
@@ -1667,7 +1667,7 @@ func (ev *evaluator) smoothSeries(series []storage.Series, offset time.Duration)
// Interpolate between prev and next.
// TODO: detect if the sample is a counter, based on __type__ or metadata.
prev, next := floats[i-1], floats[i]
- val := interpolate(prev, next, ts, false, false)
+ val := interpolate(prev, next, ts, false)
ss.Floats = append(ss.Floats, FPoint{F: val, T: ts})
case i > 0:
diff --git a/promql/functions.go b/promql/functions.go
index 9c04392232..aad02370f8 100644
--- a/promql/functions.go
+++ b/promql/functions.go
@@ -70,7 +70,7 @@ func funcTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (
// it returns the interpolated value at the left boundary; otherwise, it returns the first sample's value.
func pickOrInterpolateLeft(floats []FPoint, first int, rangeStart int64, smoothed, isCounter bool) float64 {
if smoothed && floats[first].T < rangeStart {
- return interpolate(floats[first], floats[first+1], rangeStart, isCounter, true)
+ return interpolate(floats[first], floats[first+1], rangeStart, isCounter)
}
return floats[first].F
}
@@ -80,25 +80,20 @@ func pickOrInterpolateLeft(floats []FPoint, first int, rangeStart int64, smoothe
// it returns the interpolated value at the right boundary; otherwise, it returns the last sample's value.
func pickOrInterpolateRight(floats []FPoint, last int, rangeEnd int64, smoothed, isCounter bool) float64 {
if smoothed && last > 0 && floats[last].T > rangeEnd {
- return interpolate(floats[last-1], floats[last], rangeEnd, isCounter, false)
+ return interpolate(floats[last-1], floats[last], rangeEnd, isCounter)
}
return floats[last].F
}
// interpolate performs linear interpolation between two points.
-// If isCounter is true and there is a counter reset:
-// - on the left edge, it sets the value to 0.
-// - on the right edge, it adds the left value to the right value.
+// If isCounter is true and there is a counter reset, it models the counter
+// as starting from 0 (post-reset) by setting y1 to 0.
// It then calculates the interpolated value at the given timestamp.
-func interpolate(p1, p2 FPoint, t int64, isCounter, leftEdge bool) float64 {
+func interpolate(p1, p2 FPoint, t int64, isCounter bool) float64 {
y1 := p1.F
y2 := p2.F
if isCounter && y2 < y1 {
- if leftEdge {
- y1 = 0
- } else {
- y2 += y1
- }
+ y1 = 0
}
return y1 + (y2-y1)*float64(t-p1.T)/float64(p2.T-p1.T)
diff --git a/promql/functions_internal_test.go b/promql/functions_internal_test.go
index bb52e4976b..9efd9c3c2e 100644
--- a/promql/functions_internal_test.go
+++ b/promql/functions_internal_test.go
@@ -108,13 +108,13 @@ func TestInterpolate(t *testing.T) {
{FPoint{T: 1, F: 100}, FPoint{T: 2, F: 200}, 1, false, 100},
{FPoint{T: 0, F: 100}, FPoint{T: 2, F: 200}, 1, false, 150},
{FPoint{T: 0, F: 200}, FPoint{T: 2, F: 100}, 1, false, 150},
- {FPoint{T: 0, F: 200}, FPoint{T: 2, F: 0}, 1, true, 200},
- {FPoint{T: 0, F: 200}, FPoint{T: 2, F: 100}, 1, true, 250},
- {FPoint{T: 0, F: 500}, FPoint{T: 2, F: 100}, 1, true, 550},
- {FPoint{T: 0, F: 500}, FPoint{T: 10, F: 0}, 1, true, 500},
+ {FPoint{T: 0, F: 200}, FPoint{T: 2, F: 0}, 1, true, 0},
+ {FPoint{T: 0, F: 200}, FPoint{T: 2, F: 100}, 1, true, 50},
+ {FPoint{T: 0, F: 500}, FPoint{T: 2, F: 100}, 1, true, 50},
+ {FPoint{T: 0, F: 500}, FPoint{T: 10, F: 0}, 1, true, 0},
}
for _, test := range tests {
- result := interpolate(test.p1, test.p2, test.t, test.isCounter, false)
+ result := interpolate(test.p1, test.p2, test.t, test.isCounter)
require.Equal(t, test.expected, result)
}
}
diff --git a/promql/promqltest/testdata/extended_vectors.test b/promql/promqltest/testdata/extended_vectors.test
index 8f431dcfd3..0bc1140522 100644
--- a/promql/promqltest/testdata/extended_vectors.test
+++ b/promql/promqltest/testdata/extended_vectors.test
@@ -358,6 +358,14 @@ load 1m
eval instant at 2m15s increase(metric[2m] smoothed)
{} 12
+# Smoothed rate interpolation across a counter reset.
+clear
+load 15s
+ metric 100 10
+
+eval instant at 12s rate(metric[10s] smoothed)
+ {} 0.666666666666667
+
clear
eval instant at 1m deriv(foo[3m] smoothed)
expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with deriv
From 7429a75932876b3affd403b27fb941bcb5418213 Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Mon, 2 Feb 2026 12:12:06 +0000
Subject: [PATCH 105/165] scrape: switch scrape manager to AppenderV2 (#17978)
Signed-off-by: bwplotka
---
cmd/prometheus/main.go | 33 ++++++++++-----------------------
1 file changed, 10 insertions(+), 23 deletions(-)
diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go
index 02808bd652..c5ff339656 100644
--- a/cmd/prometheus/main.go
+++ b/cmd/prometheus/main.go
@@ -885,29 +885,16 @@ func main() {
os.Exit(1)
}
- var scrapeManager *scrape.Manager
- {
- // TODO(bwplotka): Switch to AppendableV2 by default.
- // See: https://github.com/prometheus/prometheus/issues/17632
- var (
- scrapeAppendable storage.Appendable = fanoutStorage
- scrapeAppendableV2 storage.AppendableV2
- )
- if cfg.tsdb.EnableSTStorage {
- scrapeAppendable = nil
- scrapeAppendableV2 = fanoutStorage
- }
- scrapeManager, err = scrape.NewManager(
- &cfg.scrape,
- logger.With("component", "scrape manager"),
- logging.NewJSONFileLogger,
- scrapeAppendable, scrapeAppendableV2,
- prometheus.DefaultRegisterer,
- )
- if err != nil {
- logger.Error("failed to create a scrape manager", "err", err)
- os.Exit(1)
- }
+ scrapeManager, err := scrape.NewManager(
+ &cfg.scrape,
+ logger.With("component", "scrape manager"),
+ logging.NewJSONFileLogger,
+ nil, fanoutStorage,
+ prometheus.DefaultRegisterer,
+ )
+ if err != nil {
+ logger.Error("failed to create a scrape manager", "err", err)
+ os.Exit(1)
}
var (
From 848b16d6863ef24360df9ef3fd8130d549e5508f Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Mon, 2 Feb 2026 12:44:11 +0000
Subject: [PATCH 106/165] test: Add benchmark without storage + fix
skipRecording mock feature (#17987)
* test: Add benchmark without storage
Signed-off-by: bwplotka
make bench fair
Signed-off-by: bwplotka
tmp
Signed-off-by: bwplotka
* Apply suggestions from code review
Co-authored-by: Arve Knudsen
Signed-off-by: Bartlomiej Plotka
---------
Signed-off-by: bwplotka
Signed-off-by: Bartlomiej Plotka
Co-authored-by: Arve Knudsen
---
scrape/scrape_test.go | 116 +++++++++++++++++++++--------------
util/teststorage/appender.go | 81 +++++++++++++-----------
2 files changed, 114 insertions(+), 83 deletions(-)
diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go
index b29b445d01..cab2b2918a 100644
--- a/scrape/scrape_test.go
+++ b/scrape/scrape_test.go
@@ -1725,48 +1725,67 @@ func TestScrapeLoopAppend_WithStorage(t *testing.T) {
// BenchmarkScrapeLoopAppend benchmarks scrape appends for typical cases.
//
-// Benchmark compares append function run across 4 dimensions:
-// * `appV2`: appender V1 or V2
-// * `appendMetadataToWAL`: metadata-wal-records feature enabled or not
-// *`data`: different sizes of metrics scraped e.g. one big gauge metric family
+// Benchmark compares append function run across 5 dimensions:
+// * `withStorage`: without storage isolates the benchmark to the scrape loop append code. With storage is an
+// integration benchmark with the TSDB head appender code. For acceptance criteria run with storage, without for debugging.
+// * `appV2`: appender V1 or V2.
+// * `appendMetadataToWAL`: metadata-wal-records feature enabled or not (problematic feature we might need to change
+// soon, see https://github.com/prometheus/prometheus/issues/15911.
+// * `data`: different sizes of metrics scraped e.g. one big gauge metric family
// with a thousand series and more realistic scenario with common types.
-// *`fmt`: different scrape formats which will benchmark different parsers e.g.
+// * `fmt`: different scrape formats which will benchmark different parsers e.g.
// promtext, omtext and promproto.
//
-// Recommended CLI invocation:
+// NOTE: withStorage=true uses sync.Pool buffers which is heavily non-deterministic and shared across go routines.
+// As a result, it's recommended to run dimensions you want to compare with in e.g. separate go tool invocations.
+// Recommended CLI invocation(s):
/*
- export bench=append && go test ./scrape/... \
- -run '^$' -bench '^BenchmarkScrapeLoopAppend$' \
+ # Acceptance: With storage with V1 and V2 in separate process:
+ export bench=appendV1 && go test ./scrape/... \
+ -run '^$' -bench '^BenchmarkScrapeLoopAppend/withStorage=true/appV2=false/$' \
+ -benchtime 2s -count 6 -cpu 2 -timeout 999m \
+ | tee ${bench}.txt
+
+ export bench=appendV2 && go test ./scrape/... \
+ -run '^$' -bench '^BenchmarkScrapeLoopAppend/withStorage=true/appV2=true/$' \
+ -benchtime 2s -count 6 -cpu 2 -timeout 999m \
+ | tee ${bench}.txt
+
+ # For debugging scrape overheads:
+ export bench=appendNoStorage && go test ./scrape/... \
+ -run '^$' -bench '^BenchmarkScrapeLoopAppend/withStorage=false/$' \
-benchtime 2s -count 6 -cpu 2 -timeout 999m \
| tee ${bench}.txt
*/
func BenchmarkScrapeLoopAppend(b *testing.B) {
- for _, appV2 := range []bool{false, true} {
- for _, appendMetadataToWAL := range []bool{false, true} {
- for _, data := range []struct {
- name string
- parsableText []byte
- }{
- {name: "1Fam2000Gauges", parsableText: makeTestGauges(2000)}, // ~68.1 KB, ~77.9 KB in proto.
- {name: "237FamsAllTypes", parsableText: readTextParseTestMetrics(b)}, // ~185.7 KB, ~70.6 KB in proto.
- } {
- b.Run(fmt.Sprintf("appV2=%v/appendMetadataToWAL=%v/data=%v", appV2, appendMetadataToWAL, data.name), func(b *testing.B) {
- metricsProto := promTextToProto(b, data.parsableText)
+ for _, withStorage := range []bool{false, true} {
+ for _, appV2 := range []bool{false, true} {
+ for _, appendMetadataToWAL := range []bool{false, true} {
+ for _, data := range []struct {
+ name string
+ parsableText []byte
+ }{
+ {name: "1Fam2000Gauges", parsableText: makeTestGauges(2000)}, // ~68.1 KB, ~77.9 KB in proto.
+ {name: "237FamsAllTypes", parsableText: readTextParseTestMetrics(b)}, // ~185.7 KB, ~70.6 KB in proto.
+ } {
+ b.Run(fmt.Sprintf("withStorage=%v/appV2=%v/appendMetadataToWAL=%v/data=%v", withStorage, appV2, appendMetadataToWAL, data.name), func(b *testing.B) {
+ metricsProto := promTextToProto(b, data.parsableText)
- for _, bcase := range []struct {
- name string
- contentType string
- parsable []byte
- }{
- {name: "PromText", contentType: "text/plain", parsable: data.parsableText},
- {name: "OMText", contentType: "application/openmetrics-text", parsable: data.parsableText},
- {name: "PromProto", contentType: "application/vnd.google.protobuf", parsable: metricsProto},
- } {
- b.Run(fmt.Sprintf("fmt=%v", bcase.name), func(b *testing.B) {
- benchScrapeLoopAppend(b, appV2, bcase.parsable, bcase.contentType, appendMetadataToWAL, false)
- })
- }
- })
+ for _, bcase := range []struct {
+ name string
+ contentType string
+ parsable []byte
+ }{
+ {name: "PromText", contentType: "text/plain", parsable: data.parsableText},
+ {name: "OMText", contentType: "application/openmetrics-text", parsable: data.parsableText},
+ {name: "PromProto", contentType: "application/vnd.google.protobuf", parsable: metricsProto},
+ } {
+ b.Run(fmt.Sprintf("fmt=%v", bcase.name), func(b *testing.B) {
+ benchScrapeLoopAppend(b, withStorage, appV2, bcase.parsable, bcase.contentType, appendMetadataToWAL, false)
+ })
+ }
+ })
+ }
}
}
}
@@ -1774,30 +1793,32 @@ func BenchmarkScrapeLoopAppend(b *testing.B) {
func benchScrapeLoopAppend(
b *testing.B,
+ withStorage bool,
appV2 bool,
parsable []byte,
contentType string,
appendMetadataToWAL bool,
enableExemplarStorage bool,
) {
- // Need a full storage for correct Add/AddFast semantics.
- s := teststorage.New(b, func(opt *tsdb.Options) {
- opt.EnableMetadataWALRecords = appendMetadataToWAL
- if enableExemplarStorage {
- opt.EnableExemplarStorage = true
- opt.MaxExemplars = 1e5
- }
- })
-
- sl, _ := newTestScrapeLoop(b, withAppendable(s, appV2), func(sl *scrapeLoop) {
+ var a compatAppendable = teststorage.NewAppendable().SkipRecording(true) // Make it noop for benchmark purposes.
+ if withStorage {
+ a = teststorage.New(b, func(opt *tsdb.Options) {
+ opt.EnableMetadataWALRecords = appendMetadataToWAL
+ if enableExemplarStorage {
+ opt.EnableExemplarStorage = true
+ opt.MaxExemplars = 1e5
+ }
+ })
+ }
+ sl, _ := newTestScrapeLoop(b, withAppendable(a, appV2), func(sl *scrapeLoop) {
sl.appendMetadataToWAL = appendMetadataToWAL
})
- app := sl.appender()
ts := time.Time{}
b.ReportAllocs()
b.ResetTimer()
for b.Loop() {
+ app := sl.appender()
ts = ts.Add(time.Second)
_, _, _, err := app.append(parsable, contentType, ts)
if err != nil {
@@ -1808,7 +1829,6 @@ func benchScrapeLoopAppend(
if err := app.Rollback(); err != nil {
b.Fatal(err)
}
- app = sl.appender()
}
}
@@ -1827,7 +1847,7 @@ func BenchmarkScrapeLoopAppend_HistogramsWithExemplars(b *testing.B) {
for _, appV2 := range []bool{false, true} {
b.Run(fmt.Sprintf("appV2=%v", appV2), func(b *testing.B) {
parsable := makeTestHistogramsWithExemplars(100) // ~255.8 KB in OM text.
- benchScrapeLoopAppend(b, appV2, parsable, "application/openmetrics-text", false, true)
+ benchScrapeLoopAppend(b, true, appV2, parsable, "application/openmetrics-text", false, true)
})
}
}
@@ -3398,7 +3418,9 @@ metric: <
}
sl.alwaysScrapeClassicHist = test.alwaysScrapeClassicHist
// This test does not care about metadata.
- // TODO(bwplotka): Add metadata expectations and turn it on.
+ // Having this true would mean we need to add metadata to sample
+ // expectations.
+ // TODO(bwplotka): Add cases for append metadata to WAL and pass metadata
sl.appendMetadataToWAL = false
})
app := sl.appender()
diff --git a/util/teststorage/appender.go b/util/teststorage/appender.go
index d2d550be2e..f1d336c243 100644
--- a/util/teststorage/appender.go
+++ b/util/teststorage/appender.go
@@ -409,9 +409,11 @@ func (a *appender) Append(ref storage.SeriesRef, ls labels.Labels, t int64, v fl
}
}
- a.a.mtx.Lock()
- a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, V: v})
- a.a.mtx.Unlock()
+ if !a.a.skipRecording {
+ a.a.mtx.Lock()
+ a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, V: v})
+ a.a.mtx.Unlock()
+ }
if a.next != nil {
return a.next.Append(ref, ls, t, v)
@@ -445,9 +447,11 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, ls labels.Labels, t in
}
}
- a.a.mtx.Lock()
- a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, H: h, FH: fh})
- a.a.mtx.Unlock()
+ if !a.a.skipRecording {
+ a.a.mtx.Lock()
+ a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, H: h, FH: fh})
+ a.a.mtx.Unlock()
+ }
if a.next != nil {
return a.next.AppendHistogram(ref, ls, t, h, fh)
@@ -463,23 +467,26 @@ func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exem
if a.a.appendExemplarsError != nil {
return 0, a.a.appendExemplarsError
}
- var appended bool
- a.a.mtx.Lock()
- // NOTE(bwplotka): Eventually exemplar has to be attached to a series and soon
- // the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective
- // with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632
- i := len(a.a.pendingSamples) - 1
- for ; i >= 0; i-- { // Attach exemplars to the last matching sample.
- if labels.Equal(l, a.a.pendingSamples[i].L) {
- a.a.pendingSamples[i].ES = append(a.a.pendingSamples[i].ES, e)
- appended = true
- break
+ if !a.a.skipRecording {
+ var appended bool
+
+ a.a.mtx.Lock()
+ // NOTE(bwplotka): Eventually exemplar has to be attached to a series and soon
+ // the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective
+ // with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632
+ i := len(a.a.pendingSamples) - 1
+ for ; i >= 0; i-- { // Attach exemplars to the last matching sample.
+ if labels.Equal(l, a.a.pendingSamples[i].L) {
+ a.a.pendingSamples[i].ES = append(a.a.pendingSamples[i].ES, e)
+ appended = true
+ break
+ }
+ }
+ a.a.mtx.Unlock()
+ if !appended {
+ return 0, fmt.Errorf("teststorage.appender: exemplar appender without series; ref %v; l %v; exemplar: %v", ref, l, e)
}
- }
- a.a.mtx.Unlock()
- if !appended {
- return 0, fmt.Errorf("teststorage.appender: exemplar appender without series; ref %v; l %v; exemplar: %v", ref, l, e)
}
if a.next != nil {
@@ -504,23 +511,25 @@ func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m meta
return 0, err
}
- var updated bool
+ if !a.a.skipRecording {
+ var updated bool
- a.a.mtx.Lock()
- // NOTE(bwplotka): Eventually metadata has to be attached to a series and soon
- // the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective
- // with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632
- i := len(a.a.pendingSamples) - 1
- for ; i >= 0; i-- { // Attach metadata to the last matching sample.
- if labels.Equal(l, a.a.pendingSamples[i].L) {
- a.a.pendingSamples[i].M = m
- updated = true
- break
+ a.a.mtx.Lock()
+ // NOTE(bwplotka): Eventually metadata has to be attached to a series and soon
+ // the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective
+ // with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632
+ i := len(a.a.pendingSamples) - 1
+ for ; i >= 0; i-- { // Attach metadata to the last matching sample.
+ if labels.Equal(l, a.a.pendingSamples[i].L) {
+ a.a.pendingSamples[i].M = m
+ updated = true
+ break
+ }
+ }
+ a.a.mtx.Unlock()
+ if !updated {
+ return 0, fmt.Errorf("teststorage.appender: metadata update without series; ref %v; l %v; m: %v", ref, l, m)
}
- }
- a.a.mtx.Unlock()
- if !updated {
- return 0, fmt.Errorf("teststorage.appender: metadata update without series; ref %v; l %v; m: %v", ref, l, m)
}
if a.next != nil {
From 076369fad0a553a76ab0f470e1f0d3027225af0d Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Mon, 2 Feb 2026 14:36:00 +0000
Subject: [PATCH 107/165] refactor: move OTLP handler to separate file (#17990)
Signed-off-by: bwplotka
---
storage/remote/write_handler.go | 199 ----
storage/remote/write_otlp_handler.go | 227 ++++
storage/remote/write_otlp_handler_test.go | 1272 +++++++++++++++++++++
storage/remote/write_test.go | 1247 --------------------
4 files changed, 1499 insertions(+), 1446 deletions(-)
create mode 100644 storage/remote/write_otlp_handler.go
create mode 100644 storage/remote/write_otlp_handler_test.go
diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go
index c29896b843..bd507fc241 100644
--- a/storage/remote/write_handler.go
+++ b/storage/remote/write_handler.go
@@ -23,18 +23,11 @@ import (
"time"
"github.com/gogo/protobuf/proto"
- deltatocumulative "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor"
remoteapi "github.com/prometheus/client_golang/exp/api/remote"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
- "go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/collector/consumer"
- "go.opentelemetry.io/collector/pdata/pmetric"
- "go.opentelemetry.io/collector/processor"
- "go.opentelemetry.io/otel/metric/noop"
- "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
@@ -43,7 +36,6 @@ import (
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/schema"
"github.com/prometheus/prometheus/storage"
- otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
)
type writeHandler struct {
@@ -491,197 +483,6 @@ func (*writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage
return ref, err
}
-type OTLPOptions struct {
- // Convert delta samples to their cumulative equivalent by aggregating in-memory
- ConvertDelta bool
- // Store the raw delta samples as metrics with unknown type (we don't have a proper type for delta yet, therefore
- // marking the metric type as unknown for now).
- // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
- NativeDelta bool
- // LookbackDelta is the query lookback delta.
- // Used to calculate the target_info sample timestamp interval.
- LookbackDelta time.Duration
- // Add type and unit labels to the metrics.
- EnableTypeAndUnitLabels bool
- // IngestSTZeroSample enables writing zero samples based on the start time
- // of metrics.
- IngestSTZeroSample bool
- // AppendMetadata enables writing metadata to WAL when metadata-wal-records feature is enabled.
- AppendMetadata bool
-}
-
-// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
-// writes them to the provided appendable.
-func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler {
- if opts.NativeDelta && opts.ConvertDelta {
- // This should be validated when iterating through feature flags, so not expected to fail here.
- panic("cannot enable native delta ingestion and delta2cumulative conversion at the same time")
- }
-
- ex := &rwExporter{
- logger: logger,
- appendable: appendable,
- config: configFunc,
- allowDeltaTemporality: opts.NativeDelta,
- lookbackDelta: opts.LookbackDelta,
- ingestSTZeroSample: opts.IngestSTZeroSample,
- enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels,
- appendMetadata: opts.AppendMetadata,
- // Register metrics.
- metrics: otlptranslator.NewCombinedAppenderMetrics(reg),
- }
-
- wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex}
-
- if opts.ConvertDelta {
- fac := deltatocumulative.NewFactory()
- set := processor.Settings{
- ID: component.NewID(fac.Type()),
- TelemetrySettings: component.TelemetrySettings{MeterProvider: noop.NewMeterProvider()},
- }
- d2c, err := fac.CreateMetrics(context.Background(), set, fac.CreateDefaultConfig(), wh.defaultConsumer)
- if err != nil {
- // fac.CreateMetrics directly calls [deltatocumulativeprocessor.createMetricsProcessor],
- // which only errors if:
- // - cfg.(type) != *Config
- // - telemetry.New fails due to bad set.TelemetrySettings
- //
- // both cannot be the case, as we pass a valid *Config and valid TelemetrySettings.
- // as such, we assume this error to never occur.
- // if it is, our assumptions are broken in which case a panic seems acceptable.
- panic(fmt.Errorf("failed to create metrics processor: %w", err))
- }
- if err := d2c.Start(context.Background(), nil); err != nil {
- // deltatocumulative does not error on start. see above for panic reasoning
- panic(err)
- }
- wh.d2cConsumer = d2c
- }
-
- return wh
-}
-
-type rwExporter struct {
- logger *slog.Logger
- appendable storage.Appendable
- config func() config.Config
- allowDeltaTemporality bool
- lookbackDelta time.Duration
- ingestSTZeroSample bool
- enableTypeAndUnitLabels bool
- appendMetadata bool
-
- // Metrics.
- metrics otlptranslator.CombinedAppenderMetrics
-}
-
-func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
- otlpCfg := rw.config().OTLPConfig
- app := &remoteWriteAppender{
- Appender: rw.appendable.Appender(ctx),
- maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
- }
- combinedAppender := otlptranslator.NewCombinedAppender(app, rw.logger, rw.ingestSTZeroSample, rw.appendMetadata, rw.metrics)
- converter := otlptranslator.NewPrometheusConverter(combinedAppender)
- annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{
- AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(),
- AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(),
- PromoteResourceAttributes: otlptranslator.NewPromoteResourceAttributes(otlpCfg),
- KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes,
- ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB,
- PromoteScopeMetadata: otlpCfg.PromoteScopeMetadata,
- AllowDeltaTemporality: rw.allowDeltaTemporality,
- LookbackDelta: rw.lookbackDelta,
- EnableTypeAndUnitLabels: rw.enableTypeAndUnitLabels,
- LabelNameUnderscoreSanitization: otlpCfg.LabelNameUnderscoreSanitization,
- LabelNamePreserveMultipleUnderscores: otlpCfg.LabelNamePreserveMultipleUnderscores,
- })
-
- defer func() {
- if err != nil {
- _ = app.Rollback()
- return
- }
- err = app.Commit()
- }()
- ws, _ := annots.AsStrings("", 0, 0)
- if len(ws) > 0 {
- rw.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws)
- }
- return err
-}
-
-func (*rwExporter) Capabilities() consumer.Capabilities {
- return consumer.Capabilities{MutatesData: false}
-}
-
-type otlpWriteHandler struct {
- logger *slog.Logger
-
- defaultConsumer consumer.Metrics // stores deltas as-is
- d2cConsumer consumer.Metrics // converts deltas to cumulative
-}
-
-func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- req, err := DecodeOTLPWriteRequest(r)
- if err != nil {
- h.logger.Error("Error decoding OTLP write request", "err", err.Error())
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
-
- md := req.Metrics()
- // If deltatocumulative conversion enabled AND delta samples exist, use slower conversion path.
- // While deltatocumulative can also accept cumulative metrics (and then just forwards them as-is), it currently
- // holds a sync.Mutex when entering ConsumeMetrics. This is slow and not necessary when ingesting cumulative metrics.
- if h.d2cConsumer != nil && hasDelta(md) {
- err = h.d2cConsumer.ConsumeMetrics(r.Context(), md)
- } else {
- // Otherwise use default consumer (alongside cumulative samples, this will accept delta samples and write as-is
- // if native-delta-support is enabled).
- err = h.defaultConsumer.ConsumeMetrics(r.Context(), md)
- }
-
- switch {
- case err == nil:
- case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
- // Indicated an out of order sample is a bad request to prevent retries.
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- default:
- h.logger.Error("Error appending remote write", "err", err.Error())
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- w.WriteHeader(http.StatusOK)
-}
-
-func hasDelta(md pmetric.Metrics) bool {
- for i := range md.ResourceMetrics().Len() {
- sms := md.ResourceMetrics().At(i).ScopeMetrics()
- for i := range sms.Len() {
- ms := sms.At(i).Metrics()
- for i := range ms.Len() {
- temporality := pmetric.AggregationTemporalityUnspecified
- m := ms.At(i)
- switch ms.At(i).Type() {
- case pmetric.MetricTypeSum:
- temporality = m.Sum().AggregationTemporality()
- case pmetric.MetricTypeExponentialHistogram:
- temporality = m.ExponentialHistogram().AggregationTemporality()
- case pmetric.MetricTypeHistogram:
- temporality = m.Histogram().AggregationTemporality()
- }
- if temporality == pmetric.AggregationTemporalityDelta {
- return true
- }
- }
- }
- }
- return false
-}
-
type remoteWriteAppender struct {
storage.Appender
diff --git a/storage/remote/write_otlp_handler.go b/storage/remote/write_otlp_handler.go
new file mode 100644
index 0000000000..489a7b574a
--- /dev/null
+++ b/storage/remote/write_otlp_handler.go
@@ -0,0 +1,227 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "time"
+
+ deltatocumulative "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor"
+ "github.com/prometheus/client_golang/prometheus"
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+ "go.opentelemetry.io/collector/processor"
+ "go.opentelemetry.io/otel/metric/noop"
+
+ "github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/timestamp"
+ "github.com/prometheus/prometheus/storage"
+ otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
+)
+
+type OTLPOptions struct {
+ // Convert delta samples to their cumulative equivalent by aggregating in-memory
+ ConvertDelta bool
+ // Store the raw delta samples as metrics with unknown type (we don't have a proper type for delta yet, therefore
+ // marking the metric type as unknown for now).
+ // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
+ NativeDelta bool
+ // LookbackDelta is the query lookback delta.
+ // Used to calculate the target_info sample timestamp interval.
+ LookbackDelta time.Duration
+ // Add type and unit labels to the metrics.
+ EnableTypeAndUnitLabels bool
+ // IngestSTZeroSample enables writing zero samples based on the start time
+ // of metrics.
+ IngestSTZeroSample bool
+ // AppendMetadata enables writing metadata to WAL when metadata-wal-records feature is enabled.
+ AppendMetadata bool
+}
+
+// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
+// writes them to the provided appendable.
+func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler {
+ if opts.NativeDelta && opts.ConvertDelta {
+ // This should be validated when iterating through feature flags, so not expected to fail here.
+ panic("cannot enable native delta ingestion and delta2cumulative conversion at the same time")
+ }
+
+ ex := &rwExporter{
+ logger: logger,
+ appendable: appendable,
+ config: configFunc,
+ allowDeltaTemporality: opts.NativeDelta,
+ lookbackDelta: opts.LookbackDelta,
+ ingestSTZeroSample: opts.IngestSTZeroSample,
+ enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels,
+ appendMetadata: opts.AppendMetadata,
+ // Register metrics.
+ metrics: otlptranslator.NewCombinedAppenderMetrics(reg),
+ }
+
+ wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex}
+
+ if opts.ConvertDelta {
+ fac := deltatocumulative.NewFactory()
+ set := processor.Settings{
+ ID: component.NewID(fac.Type()),
+ TelemetrySettings: component.TelemetrySettings{MeterProvider: noop.NewMeterProvider()},
+ }
+ d2c, err := fac.CreateMetrics(context.Background(), set, fac.CreateDefaultConfig(), wh.defaultConsumer)
+ if err != nil {
+ // fac.CreateMetrics directly calls [deltatocumulativeprocessor.createMetricsProcessor],
+ // which only errors if:
+ // - cfg.(type) != *Config
+ // - telemetry.New fails due to bad set.TelemetrySettings
+ //
+ // both cannot be the case, as we pass a valid *Config and valid TelemetrySettings.
+ // as such, we assume this error to never occur.
+ // if it is, our assumptions are broken in which case a panic seems acceptable.
+ panic(fmt.Errorf("failed to create metrics processor: %w", err))
+ }
+ if err := d2c.Start(context.Background(), nil); err != nil {
+ // deltatocumulative does not error on start. see above for panic reasoning
+ panic(err)
+ }
+ wh.d2cConsumer = d2c
+ }
+
+ return wh
+}
+
+type rwExporter struct {
+ logger *slog.Logger
+ appendable storage.Appendable
+ config func() config.Config
+ allowDeltaTemporality bool
+ lookbackDelta time.Duration
+ ingestSTZeroSample bool
+ enableTypeAndUnitLabels bool
+ appendMetadata bool
+
+ // Metrics.
+ metrics otlptranslator.CombinedAppenderMetrics
+}
+
+func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
+ otlpCfg := rw.config().OTLPConfig
+ app := &remoteWriteAppender{
+ Appender: rw.appendable.Appender(ctx),
+ maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
+ }
+ combinedAppender := otlptranslator.NewCombinedAppender(app, rw.logger, rw.ingestSTZeroSample, rw.appendMetadata, rw.metrics)
+ converter := otlptranslator.NewPrometheusConverter(combinedAppender)
+ annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{
+ AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(),
+ AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(),
+ PromoteResourceAttributes: otlptranslator.NewPromoteResourceAttributes(otlpCfg),
+ KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes,
+ ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB,
+ PromoteScopeMetadata: otlpCfg.PromoteScopeMetadata,
+ AllowDeltaTemporality: rw.allowDeltaTemporality,
+ LookbackDelta: rw.lookbackDelta,
+ EnableTypeAndUnitLabels: rw.enableTypeAndUnitLabels,
+ LabelNameUnderscoreSanitization: otlpCfg.LabelNameUnderscoreSanitization,
+ LabelNamePreserveMultipleUnderscores: otlpCfg.LabelNamePreserveMultipleUnderscores,
+ })
+
+ defer func() {
+ if err != nil {
+ _ = app.Rollback()
+ return
+ }
+ err = app.Commit()
+ }()
+ ws, _ := annots.AsStrings("", 0, 0)
+ if len(ws) > 0 {
+ rw.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws)
+ }
+ return err
+}
+
+func (*rwExporter) Capabilities() consumer.Capabilities {
+ return consumer.Capabilities{MutatesData: false}
+}
+
+type otlpWriteHandler struct {
+ logger *slog.Logger
+
+ defaultConsumer consumer.Metrics // stores deltas as-is
+ d2cConsumer consumer.Metrics // converts deltas to cumulative
+}
+
+func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ req, err := DecodeOTLPWriteRequest(r)
+ if err != nil {
+ h.logger.Error("Error decoding OTLP write request", "err", err.Error())
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ md := req.Metrics()
+ // If deltatocumulative conversion enabled AND delta samples exist, use slower conversion path.
+ // While deltatocumulative can also accept cumulative metrics (and then just forwards them as-is), it currently
+ // holds a sync.Mutex when entering ConsumeMetrics. This is slow and not necessary when ingesting cumulative metrics.
+ if h.d2cConsumer != nil && hasDelta(md) {
+ err = h.d2cConsumer.ConsumeMetrics(r.Context(), md)
+ } else {
+ // Otherwise use default consumer (alongside cumulative samples, this will accept delta samples and write as-is
+ // if native-delta-support is enabled).
+ err = h.defaultConsumer.ConsumeMetrics(r.Context(), md)
+ }
+
+ switch {
+ case err == nil:
+ case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
+ // Indicated an out of order sample is a bad request to prevent retries.
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ default:
+ h.logger.Error("Error appending remote write", "err", err.Error())
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.WriteHeader(http.StatusOK)
+}
+
+func hasDelta(md pmetric.Metrics) bool {
+ for i := range md.ResourceMetrics().Len() {
+ sms := md.ResourceMetrics().At(i).ScopeMetrics()
+ for i := range sms.Len() {
+ ms := sms.At(i).Metrics()
+ for i := range ms.Len() {
+ temporality := pmetric.AggregationTemporalityUnspecified
+ m := ms.At(i)
+ switch ms.At(i).Type() {
+ case pmetric.MetricTypeSum:
+ temporality = m.Sum().AggregationTemporality()
+ case pmetric.MetricTypeExponentialHistogram:
+ temporality = m.ExponentialHistogram().AggregationTemporality()
+ case pmetric.MetricTypeHistogram:
+ temporality = m.Histogram().AggregationTemporality()
+ }
+ if temporality == pmetric.AggregationTemporalityDelta {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
diff --git a/storage/remote/write_otlp_handler_test.go b/storage/remote/write_otlp_handler_test.go
new file mode 100644
index 0000000000..e6788b4366
--- /dev/null
+++ b/storage/remote/write_otlp_handler_test.go
@@ -0,0 +1,1272 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "log/slog"
+ "math/rand/v2"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/otlptranslator"
+ "github.com/stretchr/testify/require"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+ "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
+
+ "github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/storage"
+)
+
+func TestOTLPWriteHandler(t *testing.T) {
+ timestamp := time.Now()
+ var zeroTime time.Time
+ exportRequest := generateOTLPWriteRequest(timestamp, zeroTime)
+ for _, testCase := range []struct {
+ name string
+ otlpCfg config.OTLPConfig
+ typeAndUnitLabels bool
+ expectedSamples []mockSample
+ expectedMetadata []mockMetadata
+ }{
+ {
+ name: "NoTranslation/NoTypeAndUnitLabels",
+ otlpCfg: config.OTLPConfig{
+ TranslationStrategy: otlptranslator.NoTranslation,
+ },
+ expectedSamples: []mockSample{
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 10.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 1,
+ },
+ },
+ expectedMetadata: []mockMetadata{
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
+ },
+ },
+ },
+ {
+ name: "NoTranslation/WithTypeAndUnitLabels",
+ otlpCfg: config.OTLPConfig{
+ TranslationStrategy: otlptranslator.NoTranslation,
+ },
+ typeAndUnitLabels: true,
+ expectedSamples: []mockSample{
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 10.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 1,
+ },
+ },
+ expectedMetadata: []mockMetadata{
+ {
+ // Metadata labels follow series labels.
+ l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
+ },
+ },
+ },
+ {
+ name: "UnderscoreEscapingWithSuffixes/NoTypeAndUnitLabels",
+ otlpCfg: config.OTLPConfig{
+ TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
+ },
+ expectedSamples: []mockSample{
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 10.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 1,
+ },
+ },
+ expectedMetadata: []mockMetadata{
+ // All get _bytes unit suffix and counter also gets _total.
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
+ },
+ },
+ },
+ {
+ name: "UnderscoreEscapingWithoutSuffixes",
+ otlpCfg: config.OTLPConfig{
+ TranslationStrategy: otlptranslator.UnderscoreEscapingWithoutSuffixes,
+ },
+ expectedSamples: []mockSample{
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 10.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 1,
+ },
+ },
+ expectedMetadata: []mockMetadata{
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_gauge", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
+ },
+ },
+ },
+ {
+ name: "UnderscoreEscapingWithSuffixes/WithTypeAndUnitLabels",
+ otlpCfg: config.OTLPConfig{
+ TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
+ },
+ typeAndUnitLabels: true,
+ expectedSamples: []mockSample{
+ {
+ l: labels.New(labels.Label{Name: "__name__", Value: "test_counter_bytes_total"},
+ labels.Label{Name: "__type__", Value: "counter"},
+ labels.Label{Name: "__unit__", Value: "bytes"},
+ labels.Label{Name: "foo_bar", Value: "baz"},
+ labels.Label{Name: "instance", Value: "test-instance"},
+ labels.Label{Name: "job", Value: "test-service"}),
+ t: timestamp.UnixMilli(),
+ v: 10.0,
+ },
+ {
+ l: labels.New(
+ labels.Label{Name: "__name__", Value: "target_info"},
+ labels.Label{Name: "host_name", Value: "test-host"},
+ labels.Label{Name: "instance", Value: "test-instance"},
+ labels.Label{Name: "job", Value: "test-service"},
+ ),
+ t: timestamp.UnixMilli(),
+ v: 1,
+ },
+ },
+ expectedMetadata: []mockMetadata{
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "__type__", "gauge", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
+ },
+ },
+ },
+ {
+ name: "NoUTF8EscapingWithSuffixes/NoTypeAndUnitLabels",
+ otlpCfg: config.OTLPConfig{
+ TranslationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes,
+ },
+ expectedSamples: []mockSample{
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 10.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 1,
+ },
+ },
+ expectedMetadata: []mockMetadata{
+ // All get _bytes unit suffix and counter also gets _total.
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
+ },
+ },
+ },
+ {
+ name: "NoUTF8EscapingWithSuffixes/WithTypeAndUnitLabels",
+ otlpCfg: config.OTLPConfig{
+ TranslationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes,
+ },
+ typeAndUnitLabels: true,
+ expectedSamples: []mockSample{
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 10.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 1,
+ },
+ },
+ expectedMetadata: []mockMetadata{
+ // All get _bytes unit suffix and counter also gets _total.
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
+ m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
+ },
+ },
+ },
+ } {
+ t.Run(testCase.name, func(t *testing.T) {
+ otlpOpts := OTLPOptions{
+ EnableTypeAndUnitLabels: testCase.typeAndUnitLabels,
+ AppendMetadata: true,
+ }
+ appendable := handleOTLP(t, exportRequest, testCase.otlpCfg, otlpOpts)
+ for _, sample := range testCase.expectedSamples {
+ requireContainsSample(t, appendable.samples, sample)
+ }
+ for _, meta := range testCase.expectedMetadata {
+ requireContainsMetadata(t, appendable.metadata, meta)
+ }
+ require.Len(t, appendable.samples, 12) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count)
+ require.Len(t, appendable.histograms, 1) // 1 (exponential histogram)
+ require.Len(t, appendable.metadata, 13) // for each float and histogram sample
+ require.Len(t, appendable.exemplars, 1) // 1 (exemplar)
+ })
+ }
+}
+
+// Check that start time is ingested if ingestSTZeroSample is enabled
+// and the start time is actually set (non-zero).
+func TestOTLPWriteHandler_StartTime(t *testing.T) {
+ timestamp := time.Now()
+ startTime := timestamp.Add(-1 * time.Millisecond)
+ var zeroTime time.Time
+
+ expectedSamples := []mockSample{
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 10.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 10.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 30.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 12.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
+ t: timestamp.UnixMilli(),
+ v: 2.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
+ t: timestamp.UnixMilli(),
+ v: 4.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
+ t: timestamp.UnixMilli(),
+ v: 6.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
+ t: timestamp.UnixMilli(),
+ v: 8.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
+ t: timestamp.UnixMilli(),
+ v: 10.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
+ t: timestamp.UnixMilli(),
+ v: 12.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
+ t: timestamp.UnixMilli(),
+ v: 12.0,
+ },
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ v: 1.0,
+ },
+ }
+ expectedHistograms := []mockHistogram{
+ {
+ l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
+ t: timestamp.UnixMilli(),
+ h: &histogram.Histogram{
+ Schema: 2,
+ ZeroThreshold: 1e-128,
+ ZeroCount: 2,
+ Count: 10,
+ Sum: 30,
+ PositiveSpans: []histogram.Span{{Offset: 1, Length: 5}},
+ PositiveBuckets: []int64{2, 0, 0, 0, 0},
+ },
+ },
+ }
+
+ expectedSamplesWithSTZero := make([]mockSample, 0, len(expectedSamples)*2-1) // All samples will get ST zero, except target_info.
+ for _, s := range expectedSamples {
+ if s.l.Get(model.MetricNameLabel) != "target_info" {
+ expectedSamplesWithSTZero = append(expectedSamplesWithSTZero, mockSample{
+ l: s.l.Copy(),
+ t: startTime.UnixMilli(),
+ v: 0,
+ })
+ }
+ expectedSamplesWithSTZero = append(expectedSamplesWithSTZero, s)
+ }
+ expectedHistogramsWithSTZero := make([]mockHistogram, 0, len(expectedHistograms)*2)
+ for _, s := range expectedHistograms {
+ if s.l.Get(model.MetricNameLabel) != "target_info" {
+ expectedHistogramsWithSTZero = append(expectedHistogramsWithSTZero, mockHistogram{
+ l: s.l.Copy(),
+ t: startTime.UnixMilli(),
+ h: &histogram.Histogram{},
+ })
+ }
+ expectedHistogramsWithSTZero = append(expectedHistogramsWithSTZero, s)
+ }
+
+ for _, testCase := range []struct {
+ name string
+ otlpOpts OTLPOptions
+ startTime time.Time
+ expectSTZero bool
+ expectedSamples []mockSample
+ expectedHistograms []mockHistogram
+ }{
+ {
+ name: "IngestSTZero=false/startTime=0",
+ otlpOpts: OTLPOptions{
+ IngestSTZeroSample: false,
+ },
+ startTime: zeroTime,
+ expectedSamples: expectedSamples,
+ expectedHistograms: expectedHistograms,
+ },
+ {
+ name: "IngestSTZero=true/startTime=0",
+ otlpOpts: OTLPOptions{
+ IngestSTZeroSample: true,
+ },
+ startTime: zeroTime,
+ expectedSamples: expectedSamples,
+ expectedHistograms: expectedHistograms,
+ },
+ {
+ name: "IngestSTZero=false/startTime=ts-1ms",
+ otlpOpts: OTLPOptions{
+ IngestSTZeroSample: false,
+ },
+ startTime: startTime,
+ expectedSamples: expectedSamples,
+ expectedHistograms: expectedHistograms,
+ },
+ {
+ name: "IngestSTZero=true/startTime=ts-1ms",
+ otlpOpts: OTLPOptions{
+ IngestSTZeroSample: true,
+ },
+ startTime: startTime,
+ expectedSamples: expectedSamplesWithSTZero,
+ expectedHistograms: expectedHistogramsWithSTZero,
+ },
+ } {
+ t.Run(testCase.name, func(t *testing.T) {
+ exportRequest := generateOTLPWriteRequest(timestamp, testCase.startTime)
+ appendable := handleOTLP(t, exportRequest, config.OTLPConfig{
+ TranslationStrategy: otlptranslator.NoTranslation,
+ }, testCase.otlpOpts)
+ for i, expect := range testCase.expectedSamples {
+ actual := appendable.samples[i]
+ require.True(t, labels.Equal(expect.l, actual.l), "sample labels,pos=%v", i)
+ require.Equal(t, expect.t, actual.t, "sample timestamp,pos=%v", i)
+ require.Equal(t, expect.v, actual.v, "sample value,pos=%v", i)
+ }
+ for i, expect := range testCase.expectedHistograms {
+ actual := appendable.histograms[i]
+ require.True(t, labels.Equal(expect.l, actual.l), "histogram labels,pos=%v", i)
+ require.Equal(t, expect.t, actual.t, "histogram timestamp,pos=%v", i)
+ require.True(t, expect.h.Equals(actual.h), "histogram value,pos=%v", i)
+ }
+ require.Len(t, appendable.samples, len(testCase.expectedSamples))
+ require.Len(t, appendable.histograms, len(testCase.expectedHistograms))
+ })
+ }
+}
+
+func requireContainsSample(t *testing.T, actual []mockSample, expected mockSample) {
+ t.Helper()
+
+ for _, got := range actual {
+ if labels.Equal(expected.l, got.l) && expected.t == got.t && expected.v == got.v {
+ return
+ }
+ }
+ require.Fail(t, fmt.Sprintf("Sample not found: \n"+
+ "expected: %v\n"+
+ "actual : %v", expected, actual))
+}
+
+func requireContainsMetadata(t *testing.T, actual []mockMetadata, expected mockMetadata) {
+ t.Helper()
+
+ for _, got := range actual {
+ if labels.Equal(expected.l, got.l) && expected.m.Type == got.m.Type && expected.m.Unit == got.m.Unit && expected.m.Help == got.m.Help {
+ return
+ }
+ }
+ require.Fail(t, fmt.Sprintf("Metadata not found: \n"+
+ "expected: %v\n"+
+ "actual : %v", expected, actual))
+}
+
+func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg config.OTLPConfig, otlpOpts OTLPOptions) *mockAppendable {
+ buf, err := exportRequest.MarshalProto()
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("", "", bytes.NewReader(buf))
+ require.NoError(t, err)
+ req.Header.Set("Content-Type", "application/x-protobuf")
+
+ log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
+ appendable := &mockAppendable{}
+ handler := NewOTLPWriteHandler(log, nil, appendable, func() config.Config {
+ return config.Config{
+ OTLPConfig: otlpCfg,
+ }
+ }, otlpOpts)
+ recorder := httptest.NewRecorder()
+ handler.ServeHTTP(recorder, req)
+
+ resp := recorder.Result()
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+
+ return appendable
+}
+
+func generateOTLPWriteRequest(timestamp, startTime time.Time) pmetricotlp.ExportRequest {
+ d := pmetric.NewMetrics()
+
+ // Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram
+ // with resource attributes: service.name="test-service", service.instance.id="test-instance", host.name="test-host"
+ // with metric attribute: foo.bar="baz"
+
+ resourceMetric := d.ResourceMetrics().AppendEmpty()
+ resourceMetric.Resource().Attributes().PutStr("service.name", "test-service")
+ resourceMetric.Resource().Attributes().PutStr("service.instance.id", "test-instance")
+ resourceMetric.Resource().Attributes().PutStr("host.name", "test-host")
+
+ scopeMetric := resourceMetric.ScopeMetrics().AppendEmpty()
+
+ // Generate One Counter
+ counterMetric := scopeMetric.Metrics().AppendEmpty()
+ counterMetric.SetName("test.counter")
+ counterMetric.SetDescription("test-counter-description")
+ counterMetric.SetUnit("By")
+ counterMetric.SetEmptySum()
+ counterMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+ counterMetric.Sum().SetIsMonotonic(true)
+
+ counterDataPoint := counterMetric.Sum().DataPoints().AppendEmpty()
+ counterDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
+ counterDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
+ counterDataPoint.SetDoubleValue(10.0)
+ counterDataPoint.Attributes().PutStr("foo.bar", "baz")
+
+ counterExemplar := counterDataPoint.Exemplars().AppendEmpty()
+
+ counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
+ counterExemplar.SetDoubleValue(10.0)
+ counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7})
+ counterExemplar.SetTraceID(pcommon.TraceID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15})
+
+ // Generate One Gauge
+ gaugeMetric := scopeMetric.Metrics().AppendEmpty()
+ gaugeMetric.SetName("test.gauge")
+ gaugeMetric.SetDescription("test-gauge-description")
+ gaugeMetric.SetUnit("By")
+ gaugeMetric.SetEmptyGauge()
+
+ gaugeDataPoint := gaugeMetric.Gauge().DataPoints().AppendEmpty()
+ gaugeDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
+ gaugeDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
+ gaugeDataPoint.SetDoubleValue(10.0)
+ gaugeDataPoint.Attributes().PutStr("foo.bar", "baz")
+
+ // Generate One Histogram
+ histogramMetric := scopeMetric.Metrics().AppendEmpty()
+ histogramMetric.SetName("test.histogram")
+ histogramMetric.SetDescription("test-histogram-description")
+ histogramMetric.SetUnit("By")
+ histogramMetric.SetEmptyHistogram()
+ histogramMetric.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+
+ histogramDataPoint := histogramMetric.Histogram().DataPoints().AppendEmpty()
+ histogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
+ histogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
+ histogramDataPoint.ExplicitBounds().FromRaw([]float64{0.0, 1.0, 2.0, 3.0, 4.0, 5.0})
+ histogramDataPoint.BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2, 2})
+ histogramDataPoint.SetCount(12)
+ histogramDataPoint.SetSum(30.0)
+ histogramDataPoint.Attributes().PutStr("foo.bar", "baz")
+
+ // Generate One Exponential-Histogram
+ exponentialHistogramMetric := scopeMetric.Metrics().AppendEmpty()
+ exponentialHistogramMetric.SetName("test.exponential.histogram")
+ exponentialHistogramMetric.SetDescription("test-exponential-histogram-description")
+ exponentialHistogramMetric.SetUnit("By")
+ exponentialHistogramMetric.SetEmptyExponentialHistogram()
+ exponentialHistogramMetric.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+
+ exponentialHistogramDataPoint := exponentialHistogramMetric.ExponentialHistogram().DataPoints().AppendEmpty()
+ exponentialHistogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
+ exponentialHistogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
+ exponentialHistogramDataPoint.SetScale(2.0)
+ exponentialHistogramDataPoint.Positive().BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2})
+ exponentialHistogramDataPoint.SetZeroCount(2)
+ exponentialHistogramDataPoint.SetCount(10)
+ exponentialHistogramDataPoint.SetSum(30.0)
+ exponentialHistogramDataPoint.Attributes().PutStr("foo.bar", "baz")
+
+ return pmetricotlp.NewExportRequestFromMetrics(d)
+}
+
+func TestOTLPDelta(t *testing.T) {
+ log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
+ appendable := &mockAppendable{}
+ cfg := func() config.Config {
+ return config.Config{OTLPConfig: config.DefaultOTLPConfig}
+ }
+ handler := NewOTLPWriteHandler(log, nil, appendable, cfg, OTLPOptions{ConvertDelta: true})
+
+ md := pmetric.NewMetrics()
+ ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics()
+
+ m := ms.AppendEmpty()
+ m.SetName("some.delta.total")
+
+ sum := m.SetEmptySum()
+ sum.SetAggregationTemporality(pmetric.AggregationTemporalityDelta)
+
+ ts := time.Date(2000, 1, 2, 3, 4, 0, 0, time.UTC)
+ for i := range 3 {
+ dp := sum.DataPoints().AppendEmpty()
+ dp.SetIntValue(int64(i))
+ dp.SetTimestamp(pcommon.NewTimestampFromTime(ts.Add(time.Duration(i) * time.Second)))
+ }
+
+ proto, err := pmetricotlp.NewExportRequestFromMetrics(md).MarshalProto()
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("", "", bytes.NewReader(proto))
+ require.NoError(t, err)
+ req.Header.Set("Content-Type", "application/x-protobuf")
+
+ rec := httptest.NewRecorder()
+ handler.ServeHTTP(rec, req)
+ require.Equal(t, http.StatusOK, rec.Result().StatusCode)
+
+ ls := labels.FromStrings("__name__", "some_delta_total")
+ milli := func(sec int) int64 {
+ return time.Date(2000, 1, 2, 3, 4, sec, 0, time.UTC).UnixMilli()
+ }
+
+ want := []mockSample{
+ {t: milli(0), l: ls, v: 0}, // +0
+ {t: milli(1), l: ls, v: 1}, // +1
+ {t: milli(2), l: ls, v: 3}, // +2
+ }
+ if diff := cmp.Diff(want, appendable.samples, cmp.Exporter(func(reflect.Type) bool { return true })); diff != "" {
+ t.Fatal(diff)
+ }
+}
+
+func BenchmarkOTLP(b *testing.B) {
+ start := time.Date(2000, 1, 2, 3, 4, 5, 0, time.UTC)
+
+ type Type struct {
+ name string
+ data func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric
+ }
+ types := []Type{{
+ name: "sum",
+ data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
+ cumul := make(map[int]float64)
+ return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
+ m := pmetric.NewMetric()
+ sum := m.SetEmptySum()
+ sum.SetAggregationTemporality(mode)
+ dps := sum.DataPoints()
+ for id := range dpc {
+ dp := dps.AppendEmpty()
+ dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
+ dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
+ dp.Attributes().PutStr("id", strconv.Itoa(id))
+ v := float64(rand.IntN(100)) / 10
+ switch mode {
+ case pmetric.AggregationTemporalityDelta:
+ dp.SetDoubleValue(v)
+ case pmetric.AggregationTemporalityCumulative:
+ cumul[id] += v
+ dp.SetDoubleValue(cumul[id])
+ }
+ }
+ return []pmetric.Metric{m}
+ }
+ }(),
+ }, {
+ name: "histogram",
+ data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
+ bounds := [4]float64{1, 10, 100, 1000}
+ type state struct {
+ counts [4]uint64
+ count uint64
+ sum float64
+ }
+ var cumul []state
+ return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
+ if cumul == nil {
+ cumul = make([]state, dpc)
+ }
+ m := pmetric.NewMetric()
+ hist := m.SetEmptyHistogram()
+ hist.SetAggregationTemporality(mode)
+ dps := hist.DataPoints()
+ for id := range dpc {
+ dp := dps.AppendEmpty()
+ dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
+ dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
+ dp.Attributes().PutStr("id", strconv.Itoa(id))
+ dp.ExplicitBounds().FromRaw(bounds[:])
+
+ var obs *state
+ switch mode {
+ case pmetric.AggregationTemporalityDelta:
+ obs = new(state)
+ case pmetric.AggregationTemporalityCumulative:
+ obs = &cumul[id]
+ }
+
+ for i := range obs.counts {
+ v := uint64(rand.IntN(10))
+ obs.counts[i] += v
+ obs.count++
+ obs.sum += float64(v)
+ }
+
+ dp.SetCount(obs.count)
+ dp.SetSum(obs.sum)
+ dp.BucketCounts().FromRaw(obs.counts[:])
+ }
+ return []pmetric.Metric{m}
+ }
+ }(),
+ }, {
+ name: "exponential",
+ data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
+ type state struct {
+ counts [4]uint64
+ count uint64
+ sum float64
+ }
+ var cumul []state
+ return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
+ if cumul == nil {
+ cumul = make([]state, dpc)
+ }
+ m := pmetric.NewMetric()
+ ex := m.SetEmptyExponentialHistogram()
+ ex.SetAggregationTemporality(mode)
+ dps := ex.DataPoints()
+ for id := range dpc {
+ dp := dps.AppendEmpty()
+ dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
+ dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
+ dp.Attributes().PutStr("id", strconv.Itoa(id))
+ dp.SetScale(2)
+
+ var obs *state
+ switch mode {
+ case pmetric.AggregationTemporalityDelta:
+ obs = new(state)
+ case pmetric.AggregationTemporalityCumulative:
+ obs = &cumul[id]
+ }
+
+ for i := range obs.counts {
+ v := uint64(rand.IntN(10))
+ obs.counts[i] += v
+ obs.count++
+ obs.sum += float64(v)
+ }
+
+ dp.Positive().BucketCounts().FromRaw(obs.counts[:])
+ dp.SetCount(obs.count)
+ dp.SetSum(obs.sum)
+ }
+
+ return []pmetric.Metric{m}
+ }
+ }(),
+ }}
+
+ modes := []struct {
+ name string
+ data func(func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, int) []pmetric.Metric
+ }{{
+ name: "cumulative",
+ data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
+ return data(pmetric.AggregationTemporalityCumulative, 10, epoch)
+ },
+ }, {
+ name: "delta",
+ data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
+ return data(pmetric.AggregationTemporalityDelta, 10, epoch)
+ },
+ }, {
+ name: "mixed",
+ data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
+ cumul := data(pmetric.AggregationTemporalityCumulative, 5, epoch)
+ delta := data(pmetric.AggregationTemporalityDelta, 5, epoch)
+ out := append(cumul, delta...)
+ rand.Shuffle(len(out), func(i, j int) { out[i], out[j] = out[j], out[i] })
+ return out
+ },
+ }}
+
+ configs := []struct {
+ name string
+ opts OTLPOptions
+ }{
+ {name: "default"},
+ {name: "convert", opts: OTLPOptions{ConvertDelta: true}},
+ }
+
+ Workers := runtime.GOMAXPROCS(0)
+ for _, cs := range types {
+ for _, mode := range modes {
+ for _, cfg := range configs {
+ b.Run(fmt.Sprintf("type=%s/temporality=%s/cfg=%s", cs.name, mode.name, cfg.name), func(b *testing.B) {
+ if !cfg.opts.ConvertDelta && (mode.name == "delta" || mode.name == "mixed") {
+ b.Skip("not possible")
+ }
+
+ var total int
+
+ // reqs is a [b.N]*http.Request, divided across the workers.
+ // deltatocumulative requires timestamps to be strictly in
+ // order on a per-series basis. to ensure this, each reqs[k]
+ // contains samples of differently named series, sorted
+ // strictly in time order
+ reqs := make([][]*http.Request, Workers)
+ for n := range b.N {
+ k := n % Workers
+
+ md := pmetric.NewMetrics()
+ ms := md.ResourceMetrics().AppendEmpty().
+ ScopeMetrics().AppendEmpty().
+ Metrics()
+
+ for i, m := range mode.data(cs.data, n) {
+ m.SetName(fmt.Sprintf("benchmark_%d_%d", k, i))
+ m.MoveTo(ms.AppendEmpty())
+ }
+
+ total += sampleCount(md)
+
+ ex := pmetricotlp.NewExportRequestFromMetrics(md)
+ data, err := ex.MarshalProto()
+ require.NoError(b, err)
+
+ req, err := http.NewRequest("", "", bytes.NewReader(data))
+ require.NoError(b, err)
+ req.Header.Set("Content-Type", "application/x-protobuf")
+
+ reqs[k] = append(reqs[k], req)
+ }
+
+ log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
+ mock := new(mockAppendable)
+ appendable := syncAppendable{Appendable: mock, lock: new(sync.Mutex)}
+ cfgfn := func() config.Config {
+ return config.Config{OTLPConfig: config.DefaultOTLPConfig}
+ }
+ handler := NewOTLPWriteHandler(log, nil, appendable, cfgfn, cfg.opts)
+
+ fail := make(chan struct{})
+ done := make(chan struct{})
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ // we use multiple workers to mimic a real-world scenario
+ // where multiple OTel collectors are sending their
+ // time-series in parallel.
+ // this is necessary to exercise potential lock-contention
+ // in this benchmark
+ for k := range Workers {
+ go func() {
+ rec := httptest.NewRecorder()
+ for _, req := range reqs[k] {
+ handler.ServeHTTP(rec, req)
+ if rec.Result().StatusCode != http.StatusOK {
+ fail <- struct{}{}
+ return
+ }
+ }
+ done <- struct{}{}
+ }()
+ }
+
+ for range Workers {
+ select {
+ case <-fail:
+ b.FailNow()
+ case <-done:
+ }
+ }
+
+ require.Equal(b, total, len(mock.samples)+len(mock.histograms))
+ })
+ }
+ }
+ }
+}
+
+func sampleCount(md pmetric.Metrics) int {
+ var total int
+ rms := md.ResourceMetrics()
+ for i := range rms.Len() {
+ sms := rms.At(i).ScopeMetrics()
+ for i := range sms.Len() {
+ ms := sms.At(i).Metrics()
+ for i := range ms.Len() {
+ m := ms.At(i)
+ switch m.Type() {
+ case pmetric.MetricTypeSum:
+ total += m.Sum().DataPoints().Len()
+ case pmetric.MetricTypeGauge:
+ total += m.Gauge().DataPoints().Len()
+ case pmetric.MetricTypeHistogram:
+ dps := m.Histogram().DataPoints()
+ for i := range dps.Len() {
+ total += dps.At(i).BucketCounts().Len()
+ total++ // le=+Inf series
+ total++ // _sum series
+ total++ // _count series
+ }
+ case pmetric.MetricTypeExponentialHistogram:
+ total += m.ExponentialHistogram().DataPoints().Len()
+ case pmetric.MetricTypeSummary:
+ total += m.Summary().DataPoints().Len()
+ }
+ }
+ }
+ }
+ return total
+}
+
+type syncAppendable struct {
+ lock sync.Locker
+ storage.Appendable
+}
+
+type syncAppender struct {
+ lock sync.Locker
+ storage.Appender
+}
+
+func (s syncAppendable) Appender(ctx context.Context) storage.Appender {
+ return syncAppender{Appender: s.Appendable.Appender(ctx), lock: s.lock}
+}
+
+func (s syncAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ return s.Appender.Append(ref, l, t, v)
+}
+
+func (s syncAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, f *histogram.FloatHistogram) (storage.SeriesRef, error) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ return s.Appender.AppendHistogram(ref, l, t, h, f)
+}
diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go
index 099a2f1cab..1b1b86ff1e 100644
--- a/storage/remote/write_test.go
+++ b/storage/remote/write_test.go
@@ -14,40 +14,20 @@
package remote
import (
- "bytes"
- "context"
"errors"
- "fmt"
- "log/slog"
- "math/rand/v2"
- "net/http"
- "net/http/httptest"
"net/url"
- "os"
- "reflect"
- "runtime"
- "strconv"
- "sync"
"testing"
"time"
- "github.com/google/go-cmp/cmp"
remoteapi "github.com/prometheus/client_golang/exp/api/remote"
"github.com/prometheus/client_golang/prometheus"
common_config "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
- "github.com/prometheus/otlptranslator"
"github.com/stretchr/testify/require"
- "go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
- "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"github.com/prometheus/prometheus/config"
- "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
- "github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/relabel"
- "github.com/prometheus/prometheus/storage"
)
func testRemoteWriteConfig() *config.RemoteWriteConfig {
@@ -385,1233 +365,6 @@ func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) {
require.NoError(t, s.Close())
}
-func TestOTLPWriteHandler(t *testing.T) {
- timestamp := time.Now()
- var zeroTime time.Time
- exportRequest := generateOTLPWriteRequest(timestamp, zeroTime)
- for _, testCase := range []struct {
- name string
- otlpCfg config.OTLPConfig
- typeAndUnitLabels bool
- expectedSamples []mockSample
- expectedMetadata []mockMetadata
- }{
- {
- name: "NoTranslation/NoTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.NoTranslation,
- },
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "NoTranslation/WithTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.NoTranslation,
- },
- typeAndUnitLabels: true,
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- {
- // Metadata labels follow series labels.
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "UnderscoreEscapingWithSuffixes/NoTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
- },
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- // All get _bytes unit suffix and counter also gets _total.
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "UnderscoreEscapingWithoutSuffixes",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.UnderscoreEscapingWithoutSuffixes,
- },
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_gauge", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "UnderscoreEscapingWithSuffixes/WithTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
- },
- typeAndUnitLabels: true,
- expectedSamples: []mockSample{
- {
- l: labels.New(labels.Label{Name: "__name__", Value: "test_counter_bytes_total"},
- labels.Label{Name: "__type__", Value: "counter"},
- labels.Label{Name: "__unit__", Value: "bytes"},
- labels.Label{Name: "foo_bar", Value: "baz"},
- labels.Label{Name: "instance", Value: "test-instance"},
- labels.Label{Name: "job", Value: "test-service"}),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.New(
- labels.Label{Name: "__name__", Value: "target_info"},
- labels.Label{Name: "host_name", Value: "test-host"},
- labels.Label{Name: "instance", Value: "test-instance"},
- labels.Label{Name: "job", Value: "test-service"},
- ),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "__type__", "gauge", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "NoUTF8EscapingWithSuffixes/NoTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes,
- },
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- // All get _bytes unit suffix and counter also gets _total.
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "NoUTF8EscapingWithSuffixes/WithTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes,
- },
- typeAndUnitLabels: true,
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- // All get _bytes unit suffix and counter also gets _total.
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- } {
- t.Run(testCase.name, func(t *testing.T) {
- otlpOpts := OTLPOptions{
- EnableTypeAndUnitLabels: testCase.typeAndUnitLabels,
- AppendMetadata: true,
- }
- appendable := handleOTLP(t, exportRequest, testCase.otlpCfg, otlpOpts)
- for _, sample := range testCase.expectedSamples {
- requireContainsSample(t, appendable.samples, sample)
- }
- for _, meta := range testCase.expectedMetadata {
- requireContainsMetadata(t, appendable.metadata, meta)
- }
- require.Len(t, appendable.samples, 12) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count)
- require.Len(t, appendable.histograms, 1) // 1 (exponential histogram)
- require.Len(t, appendable.metadata, 13) // for each float and histogram sample
- require.Len(t, appendable.exemplars, 1) // 1 (exemplar)
- })
- }
-}
-
-// Check that start time is ingested if ingestSTZeroSample is enabled
-// and the start time is actually set (non-zero).
-func TestOTLPWriteHandler_StartTime(t *testing.T) {
- timestamp := time.Now()
- startTime := timestamp.Add(-1 * time.Millisecond)
- var zeroTime time.Time
-
- expectedSamples := []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 30.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 12.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- t: timestamp.UnixMilli(),
- v: 2.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- t: timestamp.UnixMilli(),
- v: 4.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- t: timestamp.UnixMilli(),
- v: 6.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- t: timestamp.UnixMilli(),
- v: 8.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- t: timestamp.UnixMilli(),
- v: 12.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- t: timestamp.UnixMilli(),
- v: 12.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1.0,
- },
- }
- expectedHistograms := []mockHistogram{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- h: &histogram.Histogram{
- Schema: 2,
- ZeroThreshold: 1e-128,
- ZeroCount: 2,
- Count: 10,
- Sum: 30,
- PositiveSpans: []histogram.Span{{Offset: 1, Length: 5}},
- PositiveBuckets: []int64{2, 0, 0, 0, 0},
- },
- },
- }
-
- expectedSamplesWithSTZero := make([]mockSample, 0, len(expectedSamples)*2-1) // All samples will get ST zero, except target_info.
- for _, s := range expectedSamples {
- if s.l.Get(model.MetricNameLabel) != "target_info" {
- expectedSamplesWithSTZero = append(expectedSamplesWithSTZero, mockSample{
- l: s.l.Copy(),
- t: startTime.UnixMilli(),
- v: 0,
- })
- }
- expectedSamplesWithSTZero = append(expectedSamplesWithSTZero, s)
- }
- expectedHistogramsWithSTZero := make([]mockHistogram, 0, len(expectedHistograms)*2)
- for _, s := range expectedHistograms {
- if s.l.Get(model.MetricNameLabel) != "target_info" {
- expectedHistogramsWithSTZero = append(expectedHistogramsWithSTZero, mockHistogram{
- l: s.l.Copy(),
- t: startTime.UnixMilli(),
- h: &histogram.Histogram{},
- })
- }
- expectedHistogramsWithSTZero = append(expectedHistogramsWithSTZero, s)
- }
-
- for _, testCase := range []struct {
- name string
- otlpOpts OTLPOptions
- startTime time.Time
- expectSTZero bool
- expectedSamples []mockSample
- expectedHistograms []mockHistogram
- }{
- {
- name: "IngestSTZero=false/startTime=0",
- otlpOpts: OTLPOptions{
- IngestSTZeroSample: false,
- },
- startTime: zeroTime,
- expectedSamples: expectedSamples,
- expectedHistograms: expectedHistograms,
- },
- {
- name: "IngestSTZero=true/startTime=0",
- otlpOpts: OTLPOptions{
- IngestSTZeroSample: true,
- },
- startTime: zeroTime,
- expectedSamples: expectedSamples,
- expectedHistograms: expectedHistograms,
- },
- {
- name: "IngestSTZero=false/startTime=ts-1ms",
- otlpOpts: OTLPOptions{
- IngestSTZeroSample: false,
- },
- startTime: startTime,
- expectedSamples: expectedSamples,
- expectedHistograms: expectedHistograms,
- },
- {
- name: "IngestSTZero=true/startTime=ts-1ms",
- otlpOpts: OTLPOptions{
- IngestSTZeroSample: true,
- },
- startTime: startTime,
- expectedSamples: expectedSamplesWithSTZero,
- expectedHistograms: expectedHistogramsWithSTZero,
- },
- } {
- t.Run(testCase.name, func(t *testing.T) {
- exportRequest := generateOTLPWriteRequest(timestamp, testCase.startTime)
- appendable := handleOTLP(t, exportRequest, config.OTLPConfig{
- TranslationStrategy: otlptranslator.NoTranslation,
- }, testCase.otlpOpts)
- for i, expect := range testCase.expectedSamples {
- actual := appendable.samples[i]
- require.True(t, labels.Equal(expect.l, actual.l), "sample labels,pos=%v", i)
- require.Equal(t, expect.t, actual.t, "sample timestamp,pos=%v", i)
- require.Equal(t, expect.v, actual.v, "sample value,pos=%v", i)
- }
- for i, expect := range testCase.expectedHistograms {
- actual := appendable.histograms[i]
- require.True(t, labels.Equal(expect.l, actual.l), "histogram labels,pos=%v", i)
- require.Equal(t, expect.t, actual.t, "histogram timestamp,pos=%v", i)
- require.True(t, expect.h.Equals(actual.h), "histogram value,pos=%v", i)
- }
- require.Len(t, appendable.samples, len(testCase.expectedSamples))
- require.Len(t, appendable.histograms, len(testCase.expectedHistograms))
- })
- }
-}
-
-func requireContainsSample(t *testing.T, actual []mockSample, expected mockSample) {
- t.Helper()
-
- for _, got := range actual {
- if labels.Equal(expected.l, got.l) && expected.t == got.t && expected.v == got.v {
- return
- }
- }
- require.Fail(t, fmt.Sprintf("Sample not found: \n"+
- "expected: %v\n"+
- "actual : %v", expected, actual))
-}
-
-func requireContainsMetadata(t *testing.T, actual []mockMetadata, expected mockMetadata) {
- t.Helper()
-
- for _, got := range actual {
- if labels.Equal(expected.l, got.l) && expected.m.Type == got.m.Type && expected.m.Unit == got.m.Unit && expected.m.Help == got.m.Help {
- return
- }
- }
- require.Fail(t, fmt.Sprintf("Metadata not found: \n"+
- "expected: %v\n"+
- "actual : %v", expected, actual))
-}
-
-func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg config.OTLPConfig, otlpOpts OTLPOptions) *mockAppendable {
- buf, err := exportRequest.MarshalProto()
- require.NoError(t, err)
-
- req, err := http.NewRequest("", "", bytes.NewReader(buf))
- require.NoError(t, err)
- req.Header.Set("Content-Type", "application/x-protobuf")
-
- log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
- appendable := &mockAppendable{}
- handler := NewOTLPWriteHandler(log, nil, appendable, func() config.Config {
- return config.Config{
- OTLPConfig: otlpCfg,
- }
- }, otlpOpts)
- recorder := httptest.NewRecorder()
- handler.ServeHTTP(recorder, req)
-
- resp := recorder.Result()
- require.Equal(t, http.StatusOK, resp.StatusCode)
-
- return appendable
-}
-
-func generateOTLPWriteRequest(timestamp, startTime time.Time) pmetricotlp.ExportRequest {
- d := pmetric.NewMetrics()
-
- // Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram
- // with resource attributes: service.name="test-service", service.instance.id="test-instance", host.name="test-host"
- // with metric attribute: foo.bar="baz"
-
- resourceMetric := d.ResourceMetrics().AppendEmpty()
- resourceMetric.Resource().Attributes().PutStr("service.name", "test-service")
- resourceMetric.Resource().Attributes().PutStr("service.instance.id", "test-instance")
- resourceMetric.Resource().Attributes().PutStr("host.name", "test-host")
-
- scopeMetric := resourceMetric.ScopeMetrics().AppendEmpty()
-
- // Generate One Counter
- counterMetric := scopeMetric.Metrics().AppendEmpty()
- counterMetric.SetName("test.counter")
- counterMetric.SetDescription("test-counter-description")
- counterMetric.SetUnit("By")
- counterMetric.SetEmptySum()
- counterMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- counterMetric.Sum().SetIsMonotonic(true)
-
- counterDataPoint := counterMetric.Sum().DataPoints().AppendEmpty()
- counterDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
- counterDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
- counterDataPoint.SetDoubleValue(10.0)
- counterDataPoint.Attributes().PutStr("foo.bar", "baz")
-
- counterExemplar := counterDataPoint.Exemplars().AppendEmpty()
-
- counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
- counterExemplar.SetDoubleValue(10.0)
- counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7})
- counterExemplar.SetTraceID(pcommon.TraceID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15})
-
- // Generate One Gauge
- gaugeMetric := scopeMetric.Metrics().AppendEmpty()
- gaugeMetric.SetName("test.gauge")
- gaugeMetric.SetDescription("test-gauge-description")
- gaugeMetric.SetUnit("By")
- gaugeMetric.SetEmptyGauge()
-
- gaugeDataPoint := gaugeMetric.Gauge().DataPoints().AppendEmpty()
- gaugeDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
- gaugeDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
- gaugeDataPoint.SetDoubleValue(10.0)
- gaugeDataPoint.Attributes().PutStr("foo.bar", "baz")
-
- // Generate One Histogram
- histogramMetric := scopeMetric.Metrics().AppendEmpty()
- histogramMetric.SetName("test.histogram")
- histogramMetric.SetDescription("test-histogram-description")
- histogramMetric.SetUnit("By")
- histogramMetric.SetEmptyHistogram()
- histogramMetric.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
-
- histogramDataPoint := histogramMetric.Histogram().DataPoints().AppendEmpty()
- histogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
- histogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
- histogramDataPoint.ExplicitBounds().FromRaw([]float64{0.0, 1.0, 2.0, 3.0, 4.0, 5.0})
- histogramDataPoint.BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2, 2})
- histogramDataPoint.SetCount(12)
- histogramDataPoint.SetSum(30.0)
- histogramDataPoint.Attributes().PutStr("foo.bar", "baz")
-
- // Generate One Exponential-Histogram
- exponentialHistogramMetric := scopeMetric.Metrics().AppendEmpty()
- exponentialHistogramMetric.SetName("test.exponential.histogram")
- exponentialHistogramMetric.SetDescription("test-exponential-histogram-description")
- exponentialHistogramMetric.SetUnit("By")
- exponentialHistogramMetric.SetEmptyExponentialHistogram()
- exponentialHistogramMetric.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
-
- exponentialHistogramDataPoint := exponentialHistogramMetric.ExponentialHistogram().DataPoints().AppendEmpty()
- exponentialHistogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
- exponentialHistogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
- exponentialHistogramDataPoint.SetScale(2.0)
- exponentialHistogramDataPoint.Positive().BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2})
- exponentialHistogramDataPoint.SetZeroCount(2)
- exponentialHistogramDataPoint.SetCount(10)
- exponentialHistogramDataPoint.SetSum(30.0)
- exponentialHistogramDataPoint.Attributes().PutStr("foo.bar", "baz")
-
- return pmetricotlp.NewExportRequestFromMetrics(d)
-}
-
-func TestOTLPDelta(t *testing.T) {
- log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
- appendable := &mockAppendable{}
- cfg := func() config.Config {
- return config.Config{OTLPConfig: config.DefaultOTLPConfig}
- }
- handler := NewOTLPWriteHandler(log, nil, appendable, cfg, OTLPOptions{ConvertDelta: true})
-
- md := pmetric.NewMetrics()
- ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics()
-
- m := ms.AppendEmpty()
- m.SetName("some.delta.total")
-
- sum := m.SetEmptySum()
- sum.SetAggregationTemporality(pmetric.AggregationTemporalityDelta)
-
- ts := time.Date(2000, 1, 2, 3, 4, 0, 0, time.UTC)
- for i := range 3 {
- dp := sum.DataPoints().AppendEmpty()
- dp.SetIntValue(int64(i))
- dp.SetTimestamp(pcommon.NewTimestampFromTime(ts.Add(time.Duration(i) * time.Second)))
- }
-
- proto, err := pmetricotlp.NewExportRequestFromMetrics(md).MarshalProto()
- require.NoError(t, err)
-
- req, err := http.NewRequest("", "", bytes.NewReader(proto))
- require.NoError(t, err)
- req.Header.Set("Content-Type", "application/x-protobuf")
-
- rec := httptest.NewRecorder()
- handler.ServeHTTP(rec, req)
- require.Equal(t, http.StatusOK, rec.Result().StatusCode)
-
- ls := labels.FromStrings("__name__", "some_delta_total")
- milli := func(sec int) int64 {
- return time.Date(2000, 1, 2, 3, 4, sec, 0, time.UTC).UnixMilli()
- }
-
- want := []mockSample{
- {t: milli(0), l: ls, v: 0}, // +0
- {t: milli(1), l: ls, v: 1}, // +1
- {t: milli(2), l: ls, v: 3}, // +2
- }
- if diff := cmp.Diff(want, appendable.samples, cmp.Exporter(func(reflect.Type) bool { return true })); diff != "" {
- t.Fatal(diff)
- }
-}
-
-func BenchmarkOTLP(b *testing.B) {
- start := time.Date(2000, 1, 2, 3, 4, 5, 0, time.UTC)
-
- type Type struct {
- name string
- data func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric
- }
- types := []Type{{
- name: "sum",
- data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
- cumul := make(map[int]float64)
- return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
- m := pmetric.NewMetric()
- sum := m.SetEmptySum()
- sum.SetAggregationTemporality(mode)
- dps := sum.DataPoints()
- for id := range dpc {
- dp := dps.AppendEmpty()
- dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
- dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
- dp.Attributes().PutStr("id", strconv.Itoa(id))
- v := float64(rand.IntN(100)) / 10
- switch mode {
- case pmetric.AggregationTemporalityDelta:
- dp.SetDoubleValue(v)
- case pmetric.AggregationTemporalityCumulative:
- cumul[id] += v
- dp.SetDoubleValue(cumul[id])
- }
- }
- return []pmetric.Metric{m}
- }
- }(),
- }, {
- name: "histogram",
- data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
- bounds := [4]float64{1, 10, 100, 1000}
- type state struct {
- counts [4]uint64
- count uint64
- sum float64
- }
- var cumul []state
- return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
- if cumul == nil {
- cumul = make([]state, dpc)
- }
- m := pmetric.NewMetric()
- hist := m.SetEmptyHistogram()
- hist.SetAggregationTemporality(mode)
- dps := hist.DataPoints()
- for id := range dpc {
- dp := dps.AppendEmpty()
- dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
- dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
- dp.Attributes().PutStr("id", strconv.Itoa(id))
- dp.ExplicitBounds().FromRaw(bounds[:])
-
- var obs *state
- switch mode {
- case pmetric.AggregationTemporalityDelta:
- obs = new(state)
- case pmetric.AggregationTemporalityCumulative:
- obs = &cumul[id]
- }
-
- for i := range obs.counts {
- v := uint64(rand.IntN(10))
- obs.counts[i] += v
- obs.count++
- obs.sum += float64(v)
- }
-
- dp.SetCount(obs.count)
- dp.SetSum(obs.sum)
- dp.BucketCounts().FromRaw(obs.counts[:])
- }
- return []pmetric.Metric{m}
- }
- }(),
- }, {
- name: "exponential",
- data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
- type state struct {
- counts [4]uint64
- count uint64
- sum float64
- }
- var cumul []state
- return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
- if cumul == nil {
- cumul = make([]state, dpc)
- }
- m := pmetric.NewMetric()
- ex := m.SetEmptyExponentialHistogram()
- ex.SetAggregationTemporality(mode)
- dps := ex.DataPoints()
- for id := range dpc {
- dp := dps.AppendEmpty()
- dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
- dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
- dp.Attributes().PutStr("id", strconv.Itoa(id))
- dp.SetScale(2)
-
- var obs *state
- switch mode {
- case pmetric.AggregationTemporalityDelta:
- obs = new(state)
- case pmetric.AggregationTemporalityCumulative:
- obs = &cumul[id]
- }
-
- for i := range obs.counts {
- v := uint64(rand.IntN(10))
- obs.counts[i] += v
- obs.count++
- obs.sum += float64(v)
- }
-
- dp.Positive().BucketCounts().FromRaw(obs.counts[:])
- dp.SetCount(obs.count)
- dp.SetSum(obs.sum)
- }
-
- return []pmetric.Metric{m}
- }
- }(),
- }}
-
- modes := []struct {
- name string
- data func(func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, int) []pmetric.Metric
- }{{
- name: "cumulative",
- data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
- return data(pmetric.AggregationTemporalityCumulative, 10, epoch)
- },
- }, {
- name: "delta",
- data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
- return data(pmetric.AggregationTemporalityDelta, 10, epoch)
- },
- }, {
- name: "mixed",
- data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
- cumul := data(pmetric.AggregationTemporalityCumulative, 5, epoch)
- delta := data(pmetric.AggregationTemporalityDelta, 5, epoch)
- out := append(cumul, delta...)
- rand.Shuffle(len(out), func(i, j int) { out[i], out[j] = out[j], out[i] })
- return out
- },
- }}
-
- configs := []struct {
- name string
- opts OTLPOptions
- }{
- {name: "default"},
- {name: "convert", opts: OTLPOptions{ConvertDelta: true}},
- }
-
- Workers := runtime.GOMAXPROCS(0)
- for _, cs := range types {
- for _, mode := range modes {
- for _, cfg := range configs {
- b.Run(fmt.Sprintf("type=%s/temporality=%s/cfg=%s", cs.name, mode.name, cfg.name), func(b *testing.B) {
- if !cfg.opts.ConvertDelta && (mode.name == "delta" || mode.name == "mixed") {
- b.Skip("not possible")
- }
-
- var total int
-
- // reqs is a [b.N]*http.Request, divided across the workers.
- // deltatocumulative requires timestamps to be strictly in
- // order on a per-series basis. to ensure this, each reqs[k]
- // contains samples of differently named series, sorted
- // strictly in time order
- reqs := make([][]*http.Request, Workers)
- for n := range b.N {
- k := n % Workers
-
- md := pmetric.NewMetrics()
- ms := md.ResourceMetrics().AppendEmpty().
- ScopeMetrics().AppendEmpty().
- Metrics()
-
- for i, m := range mode.data(cs.data, n) {
- m.SetName(fmt.Sprintf("benchmark_%d_%d", k, i))
- m.MoveTo(ms.AppendEmpty())
- }
-
- total += sampleCount(md)
-
- ex := pmetricotlp.NewExportRequestFromMetrics(md)
- data, err := ex.MarshalProto()
- require.NoError(b, err)
-
- req, err := http.NewRequest("", "", bytes.NewReader(data))
- require.NoError(b, err)
- req.Header.Set("Content-Type", "application/x-protobuf")
-
- reqs[k] = append(reqs[k], req)
- }
-
- log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
- mock := new(mockAppendable)
- appendable := syncAppendable{Appendable: mock, lock: new(sync.Mutex)}
- cfgfn := func() config.Config {
- return config.Config{OTLPConfig: config.DefaultOTLPConfig}
- }
- handler := NewOTLPWriteHandler(log, nil, appendable, cfgfn, cfg.opts)
-
- fail := make(chan struct{})
- done := make(chan struct{})
-
- b.ResetTimer()
- b.ReportAllocs()
-
- // we use multiple workers to mimic a real-world scenario
- // where multiple OTel collectors are sending their
- // time-series in parallel.
- // this is necessary to exercise potential lock-contention
- // in this benchmark
- for k := range Workers {
- go func() {
- rec := httptest.NewRecorder()
- for _, req := range reqs[k] {
- handler.ServeHTTP(rec, req)
- if rec.Result().StatusCode != http.StatusOK {
- fail <- struct{}{}
- return
- }
- }
- done <- struct{}{}
- }()
- }
-
- for range Workers {
- select {
- case <-fail:
- b.FailNow()
- case <-done:
- }
- }
-
- require.Equal(b, total, len(mock.samples)+len(mock.histograms))
- })
- }
- }
- }
-}
-
-func sampleCount(md pmetric.Metrics) int {
- var total int
- rms := md.ResourceMetrics()
- for i := range rms.Len() {
- sms := rms.At(i).ScopeMetrics()
- for i := range sms.Len() {
- ms := sms.At(i).Metrics()
- for i := range ms.Len() {
- m := ms.At(i)
- switch m.Type() {
- case pmetric.MetricTypeSum:
- total += m.Sum().DataPoints().Len()
- case pmetric.MetricTypeGauge:
- total += m.Gauge().DataPoints().Len()
- case pmetric.MetricTypeHistogram:
- dps := m.Histogram().DataPoints()
- for i := range dps.Len() {
- total += dps.At(i).BucketCounts().Len()
- total++ // le=+Inf series
- total++ // _sum series
- total++ // _count series
- }
- case pmetric.MetricTypeExponentialHistogram:
- total += m.ExponentialHistogram().DataPoints().Len()
- case pmetric.MetricTypeSummary:
- total += m.Summary().DataPoints().Len()
- }
- }
- }
- }
- return total
-}
-
-type syncAppendable struct {
- lock sync.Locker
- storage.Appendable
-}
-
-type syncAppender struct {
- lock sync.Locker
- storage.Appender
-}
-
-func (s syncAppendable) Appender(ctx context.Context) storage.Appender {
- return syncAppender{Appender: s.Appendable.Appender(ctx), lock: s.lock}
-}
-
-func (s syncAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
- s.lock.Lock()
- defer s.lock.Unlock()
- return s.Appender.Append(ref, l, t, v)
-}
-
-func (s syncAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, f *histogram.FloatHistogram) (storage.SeriesRef, error) {
- s.lock.Lock()
- defer s.lock.Unlock()
- return s.Appender.AppendHistogram(ref, l, t, h, f)
-}
-
func TestWriteStorage_CanRegisterMetricsAfterClosing(t *testing.T) {
dir := t.TempDir()
reg := prometheus.NewPedanticRegistry()
From 44d772b4e78ecb8cc3b8ac3cee54204f6fe32242 Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Mon, 2 Feb 2026 15:56:11 +0000
Subject: [PATCH 108/165] refactor: use Appender mock for tests (#17992)
Signed-off-by: bwplotka
---
storage/remote/write_handler.go | 2 +
storage/remote/write_handler_test.go | 1 +
storage/remote/write_otlp_handler_test.go | 765 ++++++----------------
3 files changed, 201 insertions(+), 567 deletions(-)
diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go
index bd507fc241..a72712a535 100644
--- a/storage/remote/write_handler.go
+++ b/storage/remote/write_handler.go
@@ -483,6 +483,8 @@ func (*writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage
return ref, err
}
+// TODO(bwplotka): Consider exposing timeLimitAppender and bucketLimitAppender appenders from scrape/target.go
+// to DRY, they do the same.
type remoteWriteAppender struct {
storage.Appender
diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go
index ac75d56095..2cf1217933 100644
--- a/storage/remote/write_handler_test.go
+++ b/storage/remote/write_handler_test.go
@@ -1267,6 +1267,7 @@ func genSeriesWithSample(numSeries int, ts int64) []prompb.TimeSeries {
return series
}
+// TODO(bwplotka): Delete and switch all to teststorage.Appendable.
type mockAppendable struct {
latestSample map[uint64]int64
samples []mockSample
diff --git a/storage/remote/write_otlp_handler_test.go b/storage/remote/write_otlp_handler_test.go
index e6788b4366..57c0b2ab22 100644
--- a/storage/remote/write_otlp_handler_test.go
+++ b/storage/remote/write_otlp_handler_test.go
@@ -15,7 +15,6 @@ package remote
import (
"bytes"
- "context"
"fmt"
"log/slog"
"math/rand/v2"
@@ -25,7 +24,6 @@ import (
"reflect"
"runtime"
"strconv"
- "sync"
"testing"
"time"
@@ -38,93 +36,118 @@ import (
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
- "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/model/timestamp"
+ "github.com/prometheus/prometheus/util/teststorage"
)
+type sample = teststorage.Sample
+
func TestOTLPWriteHandler(t *testing.T) {
- timestamp := time.Now()
- var zeroTime time.Time
- exportRequest := generateOTLPWriteRequest(timestamp, zeroTime)
+ ts := time.Now()
+
+ // Expected samples passed via OTLP request without details (labels for now) that
+ // depend on translation or type and unit labels options.
+ expectedSamplesWithoutLabelsFn := func() []sample {
+ return []sample{
+ {
+ M: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
+ V: 10.0, T: timestamp.FromTime(ts), ES: []exemplar.Exemplar{
+ {
+ Labels: labels.FromStrings("span_id", "0001020304050607", "trace_id", "000102030405060708090a0b0c0d0e0f"),
+ Value: 10, Ts: timestamp.FromTime(ts), HasTs: true,
+ },
+ },
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
+ V: 10.0, T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 30.0, T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 12.0, T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 2.0, T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 4.0, T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 6.0, T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 8.0, T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 10.0, T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 12.0, T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 12.0, T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
+ H: &histogram.Histogram{
+ Count: 10,
+ Sum: 30.0,
+ Schema: 2,
+ ZeroThreshold: 1e-128,
+ ZeroCount: 2,
+ PositiveSpans: []histogram.Span{{Offset: 1, Length: 5}},
+ PositiveBuckets: []int64{2, 0, 0, 0, 0},
+ }, T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, V: 1, T: timestamp.FromTime(ts),
+ },
+ }
+ }
+
+ exportRequest := generateOTLPWriteRequest(ts, time.Time{})
for _, testCase := range []struct {
name string
otlpCfg config.OTLPConfig
typeAndUnitLabels bool
- expectedSamples []mockSample
- expectedMetadata []mockMetadata
+ // NOTE: This is a slice of samples, not []labels.Labels because metric family detail will be added once
+ // OTLP handler moves to AppenderV2.
+ expectedLabels []sample
}{
{
name: "NoTranslation/NoTypeAndUnitLabels",
otlpCfg: config.OTLPConfig{
TranslationStrategy: otlptranslator.NoTranslation,
},
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
+ expectedLabels: []sample{
+ {L: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service")},
},
},
{
@@ -133,145 +156,42 @@ func TestOTLPWriteHandler(t *testing.T) {
TranslationStrategy: otlptranslator.NoTranslation,
},
typeAndUnitLabels: true,
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- {
- // Metadata labels follow series labels.
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
+ expectedLabels: []sample{
+ {L: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.gauge", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service")},
},
},
+ // For the following cases, skip type and unit cases, it has nothing todo with translation.
{
- name: "UnderscoreEscapingWithSuffixes/NoTypeAndUnitLabels",
+ name: "UnderscoreEscapingWithSuffixes",
otlpCfg: config.OTLPConfig{
TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
},
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- // All get _bytes unit suffix and counter also gets _total.
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
+ expectedLabels: []sample{
+ {L: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service")},
},
},
{
@@ -279,301 +199,42 @@ func TestOTLPWriteHandler(t *testing.T) {
otlpCfg: config.OTLPConfig{
TranslationStrategy: otlptranslator.UnderscoreEscapingWithoutSuffixes,
},
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_gauge", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
+ expectedLabels: []sample{
+ {L: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_gauge", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service")},
},
},
{
- name: "UnderscoreEscapingWithSuffixes/WithTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
- },
- typeAndUnitLabels: true,
- expectedSamples: []mockSample{
- {
- l: labels.New(labels.Label{Name: "__name__", Value: "test_counter_bytes_total"},
- labels.Label{Name: "__type__", Value: "counter"},
- labels.Label{Name: "__unit__", Value: "bytes"},
- labels.Label{Name: "foo_bar", Value: "baz"},
- labels.Label{Name: "instance", Value: "test-instance"},
- labels.Label{Name: "job", Value: "test-service"}),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.New(
- labels.Label{Name: "__name__", Value: "target_info"},
- labels.Label{Name: "host_name", Value: "test-host"},
- labels.Label{Name: "instance", Value: "test-instance"},
- labels.Label{Name: "job", Value: "test-service"},
- ),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "__type__", "gauge", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "NoUTF8EscapingWithSuffixes/NoTypeAndUnitLabels",
+ name: "NoUTF8EscapingWithSuffixes",
otlpCfg: config.OTLPConfig{
TranslationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes,
},
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- // All get _bytes unit suffix and counter also gets _total.
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "NoUTF8EscapingWithSuffixes/WithTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes,
- },
- typeAndUnitLabels: true,
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- // All get _bytes unit suffix and counter also gets _total.
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
+ expectedLabels: []sample{
+ // TODO: Counter MF name looks likea bug. Uncovered in unrelated refactor. fix it.
+ {L: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {L: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {L: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service")},
},
},
} {
@@ -583,22 +244,22 @@ func TestOTLPWriteHandler(t *testing.T) {
AppendMetadata: true,
}
appendable := handleOTLP(t, exportRequest, testCase.otlpCfg, otlpOpts)
- for _, sample := range testCase.expectedSamples {
- requireContainsSample(t, appendable.samples, sample)
+
+ // Compile final expected samples.
+ expectedSamples := expectedSamplesWithoutLabelsFn()
+ for i, s := range testCase.expectedLabels {
+ expectedSamples[i].L = s.L
+ expectedSamples[i].MF = s.MF
}
- for _, meta := range testCase.expectedMetadata {
- requireContainsMetadata(t, appendable.metadata, meta)
- }
- require.Len(t, appendable.samples, 12) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count)
- require.Len(t, appendable.histograms, 1) // 1 (exponential histogram)
- require.Len(t, appendable.metadata, 13) // for each float and histogram sample
- require.Len(t, appendable.exemplars, 1) // 1 (exemplar)
+ teststorage.RequireEqual(t, expectedSamples, appendable.ResultSamples())
})
}
}
// Check that start time is ingested if ingestSTZeroSample is enabled
// and the start time is actually set (non-zero).
+// TODO(bwplotka): This test is still using old mockAppender. Keeping like this as this test
+// will be removed when OTLP handling switches to AppenderV2.
func TestOTLPWriteHandler_StartTime(t *testing.T) {
timestamp := time.Now()
startTime := timestamp.Add(-1 * time.Millisecond)
@@ -752,9 +413,29 @@ func TestOTLPWriteHandler_StartTime(t *testing.T) {
} {
t.Run(testCase.name, func(t *testing.T) {
exportRequest := generateOTLPWriteRequest(timestamp, testCase.startTime)
- appendable := handleOTLP(t, exportRequest, config.OTLPConfig{
- TranslationStrategy: otlptranslator.NoTranslation,
+
+ buf, err := exportRequest.MarshalProto()
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("", "", bytes.NewReader(buf))
+ require.NoError(t, err)
+ req.Header.Set("Content-Type", "application/x-protobuf")
+
+ log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
+ appendable := &mockAppendable{}
+ handler := NewOTLPWriteHandler(log, nil, appendable, func() config.Config {
+ return config.Config{
+ OTLPConfig: config.OTLPConfig{
+ TranslationStrategy: otlptranslator.NoTranslation,
+ },
+ }
}, testCase.otlpOpts)
+ recorder := httptest.NewRecorder()
+ handler.ServeHTTP(recorder, req)
+
+ resp := recorder.Result()
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+
for i, expect := range testCase.expectedSamples {
actual := appendable.samples[i]
require.True(t, labels.Equal(expect.l, actual.l), "sample labels,pos=%v", i)
@@ -773,33 +454,9 @@ func TestOTLPWriteHandler_StartTime(t *testing.T) {
}
}
-func requireContainsSample(t *testing.T, actual []mockSample, expected mockSample) {
+func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg config.OTLPConfig, otlpOpts OTLPOptions) *teststorage.Appendable {
t.Helper()
- for _, got := range actual {
- if labels.Equal(expected.l, got.l) && expected.t == got.t && expected.v == got.v {
- return
- }
- }
- require.Fail(t, fmt.Sprintf("Sample not found: \n"+
- "expected: %v\n"+
- "actual : %v", expected, actual))
-}
-
-func requireContainsMetadata(t *testing.T, actual []mockMetadata, expected mockMetadata) {
- t.Helper()
-
- for _, got := range actual {
- if labels.Equal(expected.l, got.l) && expected.m.Type == got.m.Type && expected.m.Unit == got.m.Unit && expected.m.Help == got.m.Help {
- return
- }
- }
- require.Fail(t, fmt.Sprintf("Metadata not found: \n"+
- "expected: %v\n"+
- "actual : %v", expected, actual))
-}
-
-func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg config.OTLPConfig, otlpOpts OTLPOptions) *mockAppendable {
buf, err := exportRequest.MarshalProto()
require.NoError(t, err)
@@ -808,7 +465,7 @@ func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg c
req.Header.Set("Content-Type", "application/x-protobuf")
log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
- appendable := &mockAppendable{}
+ appendable := teststorage.NewAppendable()
handler := NewOTLPWriteHandler(log, nil, appendable, func() config.Config {
return config.Config{
OTLPConfig: otlpCfg,
@@ -912,7 +569,7 @@ func generateOTLPWriteRequest(timestamp, startTime time.Time) pmetricotlp.Export
func TestOTLPDelta(t *testing.T) {
log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
- appendable := &mockAppendable{}
+ appendable := teststorage.NewAppendable()
cfg := func() config.Config {
return config.Config{OTLPConfig: config.DefaultOTLPConfig}
}
@@ -950,12 +607,12 @@ func TestOTLPDelta(t *testing.T) {
return time.Date(2000, 1, 2, 3, 4, sec, 0, time.UTC).UnixMilli()
}
- want := []mockSample{
- {t: milli(0), l: ls, v: 0}, // +0
- {t: milli(1), l: ls, v: 1}, // +1
- {t: milli(2), l: ls, v: 3}, // +2
+ want := []sample{
+ {T: milli(0), L: ls, V: 0}, // +0
+ {T: milli(1), L: ls, V: 1}, // +1
+ {T: milli(2), L: ls, V: 3}, // +2
}
- if diff := cmp.Diff(want, appendable.samples, cmp.Exporter(func(reflect.Type) bool { return true })); diff != "" {
+ if diff := cmp.Diff(want, appendable.ResultSamples(), cmp.Exporter(func(reflect.Type) bool { return true })); diff != "" {
t.Fatal(diff)
}
}
@@ -1165,8 +822,8 @@ func BenchmarkOTLP(b *testing.B) {
}
log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
- mock := new(mockAppendable)
- appendable := syncAppendable{Appendable: mock, lock: new(sync.Mutex)}
+
+ appendable := teststorage.NewAppendable()
cfgfn := func() config.Config {
return config.Config{OTLPConfig: config.DefaultOTLPConfig}
}
@@ -1205,7 +862,7 @@ func BenchmarkOTLP(b *testing.B) {
}
}
- require.Equal(b, total, len(mock.samples)+len(mock.histograms))
+ require.Len(b, appendable.ResultSamples(), total)
})
}
}
@@ -1244,29 +901,3 @@ func sampleCount(md pmetric.Metrics) int {
}
return total
}
-
-type syncAppendable struct {
- lock sync.Locker
- storage.Appendable
-}
-
-type syncAppender struct {
- lock sync.Locker
- storage.Appender
-}
-
-func (s syncAppendable) Appender(ctx context.Context) storage.Appender {
- return syncAppender{Appender: s.Appendable.Appender(ctx), lock: s.lock}
-}
-
-func (s syncAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
- s.lock.Lock()
- defer s.lock.Unlock()
- return s.Appender.Append(ref, l, t, v)
-}
-
-func (s syncAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, f *histogram.FloatHistogram) (storage.SeriesRef, error) {
- s.lock.Lock()
- defer s.lock.Unlock()
- return s.Appender.AppendHistogram(ref, l, t, h, f)
-}
From 1d5486caaa39cc42f017e14d8b353ed8883eb595 Mon Sep 17 00:00:00 2001
From: Bryan Boreham
Date: Tue, 3 Feb 2026 08:52:11 +0000
Subject: [PATCH 109/165] Explain more about using Prometheus as a Go library
Signed-off-by: Bryan Boreham
---
README.md | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/README.md b/README.md
index 7b04a51cee..030a827952 100644
--- a/README.md
+++ b/README.md
@@ -159,6 +159,15 @@ produce a fully working image when run locally.
## Using Prometheus as a Go Library
+Within the Prometheus project, repositories such as [prometheus/common](https://github.com/prometheus/common) and
+[prometheus/client-golang](https://github.com/prometheus/client-golang) are designed as re-usable libraries.
+
+The [prometheus/prometheus](https://github.com/prometheus/prometheus) repository builds a stand-alone program and is not
+designed for use as a library. We are aware that people do use parts as such,
+and we do not put any deliberate inconvenience in the way, but we want you to be
+aware that no care has been taken to make it work well as a library. For instance,
+you may encounter errors that only surface when used as a library.
+
### Remote Write
We are publishing our Remote Write protobuf independently at
From c0ad8537501b6d30f73195befe1b31d8cd36b614 Mon Sep 17 00:00:00 2001
From: zenador
Date: Tue, 3 Feb 2026 22:12:02 +0800
Subject: [PATCH 110/165] promql: Make it possible to add custom details in
annotations and summarise multiple of the same annotation (#15577)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Jeanette Tan
Signed-off-by: zenador
Co-authored-by: Björn Rabenstein
---
util/annotations/annotations.go | 97 ++++++++++++++++--------
util/annotations/annotations_test.go | 109 +++++++++++++++++++++++++++
2 files changed, 176 insertions(+), 30 deletions(-)
create mode 100644 util/annotations/annotations_test.go
diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go
index a68b2ba4fc..581e4987d1 100644
--- a/util/annotations/annotations.go
+++ b/util/annotations/annotations.go
@@ -16,7 +16,6 @@ package annotations
import (
"errors"
"fmt"
- "maps"
"github.com/prometheus/common/model"
@@ -43,12 +42,18 @@ func (a *Annotations) Add(err error) Annotations {
if *a == nil {
*a = Annotations{}
}
+ if prevErr, exists := (*a)[err.Error()]; exists {
+ var anErr annoError
+ if errors.As(err, &anErr) {
+ err = anErr.Merge(prevErr)
+ }
+ }
(*a)[err.Error()] = err
return *a
}
-// Merge adds the contents of the second annotation to the first, modifying
-// the first in-place, and returns the merged first Annotation for convenience.
+// Merge adds the contents of the second set of Annotations to the first, modifying
+// the first in-place, and returns the merged first Annotations for convenience.
func (a *Annotations) Merge(aa Annotations) Annotations {
if *a == nil {
if aa == nil {
@@ -56,7 +61,15 @@ func (a *Annotations) Merge(aa Annotations) Annotations {
}
*a = Annotations{}
}
- maps.Copy((*a), aa)
+ for key, val := range aa {
+ if prevVal, exists := (*a)[key]; exists {
+ var anErr annoError
+ if errors.As(val, &anErr) {
+ val = anErr.Merge(prevVal)
+ }
+ }
+ (*a)[key] = val
+ }
return *a
}
@@ -81,10 +94,9 @@ func (a Annotations) AsStrings(query string, maxWarnings, maxInfos int) (warning
warnSkipped := 0
infoSkipped := 0
for _, err := range a {
- var anErr annoErr
+ var anErr annoError
if errors.As(err, &anErr) {
- anErr.Query = query
- err = anErr
+ anErr.SetQuery(query)
}
switch {
case errors.Is(err, PromQLInfo):
@@ -157,23 +169,48 @@ var (
MismatchedCustomBucketsHistogramsInfo = fmt.Errorf("%w: mismatched custom buckets were reconciled during", PromQLInfo)
)
+// annoError extends the standard error interface to provide additional functionality
+// for PromQL annotations, allowing them to be merged with other similar errors.
+type annoError interface {
+ error
+ // Necessary so we can use errors.Is() to disambiguate between warning and info.
+ Unwrap() error
+ // Necessary when we want to show position info. Also, this is only called at the end when we call
+ // AsStrings(), so before that we deduplicate based on the raw error string when query is empty,
+ // and the full error string with details will only be shown in the end when query is set.
+ SetQuery(string)
+ // We can define custom merge functions to merge individual annotations of the same type if they have
+ // the same raw error string.
+ Merge(error) error
+}
+
type annoErr struct {
PositionRange posrange.PositionRange
Err error
Query string
}
-func (e annoErr) Error() string {
+func (e *annoErr) Error() string {
if e.Query == "" {
return e.Err.Error()
}
return fmt.Sprintf("%s (%s)", e.Err, e.PositionRange.StartPosInput(e.Query, 0))
}
-func (e annoErr) Unwrap() error {
+func (e *annoErr) Unwrap() error {
return e.Err
}
+func (e *annoErr) SetQuery(query string) {
+ e.Query = query
+}
+
+// We do not merge generic annotations, instead we just ignore the provided error
+// and return the original.
+func (e *annoErr) Merge(_ error) error {
+ return e
+}
+
func maybeAddMetricName(anno error, metricName string) error {
if metricName == "" {
return anno
@@ -184,7 +221,7 @@ func maybeAddMetricName(anno error, metricName string) error {
// NewInvalidQuantileWarning is used when the user specifies an invalid quantile
// value, i.e. a float that is outside the range [0, 1] or NaN.
func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w, got %g", InvalidQuantileWarning, q),
}
@@ -193,7 +230,7 @@ func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error {
// NewInvalidRatioWarning is used when the user specifies an invalid ratio
// value, i.e. a float that is outside the range [-1, 1] or NaN.
func NewInvalidRatioWarning(q, to float64, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w, got %g, capping to %g", InvalidRatioWarning, q, to),
}
@@ -203,7 +240,7 @@ func NewInvalidRatioWarning(q, to float64, pos posrange.PositionRange) error {
// of a classic histogram.
func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRange) error {
anno := maybeAddMetricName(fmt.Errorf("%w of %q", BadBucketLabelWarning, label), metricName)
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: anno,
}
@@ -213,7 +250,7 @@ func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRan
// float samples and histogram samples for functions that do not support mixed
// samples.
func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w metric name %q", MixedFloatsHistogramsWarning, metricName),
}
@@ -222,7 +259,7 @@ func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRan
// NewMixedFloatsHistogramsAggWarning is used when the queried series includes both
// float samples and histogram samples in an aggregation.
func NewMixedFloatsHistogramsAggWarning(pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w aggregation", MixedFloatsHistogramsWarning),
}
@@ -231,7 +268,7 @@ func NewMixedFloatsHistogramsAggWarning(pos posrange.PositionRange) error {
// NewMixedClassicNativeHistogramsWarning is used when the queried series includes
// both classic and native histograms.
func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: maybeAddMetricName(MixedClassicNativeHistogramsWarning, metricName),
}
@@ -240,7 +277,7 @@ func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.Posi
// NewNativeHistogramNotCounterWarning is used when histogramRate is called
// with isCounter set to true on a gauge histogram.
func NewNativeHistogramNotCounterWarning(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", NativeHistogramNotCounterWarning, metricName),
}
@@ -249,7 +286,7 @@ func NewNativeHistogramNotCounterWarning(metricName string, pos posrange.Positio
// NewNativeHistogramNotGaugeWarning is used when histogramRate is called
// with isCounter set to false on a counter histogram.
func NewNativeHistogramNotGaugeWarning(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", NativeHistogramNotGaugeWarning, metricName),
}
@@ -258,7 +295,7 @@ func NewNativeHistogramNotGaugeWarning(metricName string, pos posrange.PositionR
// NewMixedExponentialCustomHistogramsWarning is used when the queried series includes
// histograms with both exponential and custom buckets schemas.
func NewMixedExponentialCustomHistogramsWarning(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", MixedExponentialCustomHistogramsWarning, metricName),
}
@@ -267,7 +304,7 @@ func NewMixedExponentialCustomHistogramsWarning(metricName string, pos posrange.
// NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not
// have the suffixes _total, _sum, _count, or _bucket.
func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", PossibleNonCounterInfo, metricName),
}
@@ -276,7 +313,7 @@ func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) er
// NewPossibleNonCounterLabelInfo is used when a named counter metric with only float samples does not
// have the __type__ label set to "counter".
func NewPossibleNonCounterLabelInfo(metricName, typeLabel string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w, got %q: %q", PossibleNonCounterLabelInfo, typeLabel, metricName),
}
@@ -285,7 +322,7 @@ func NewPossibleNonCounterLabelInfo(metricName, typeLabel string, pos posrange.P
// NewHistogramQuantileForcedMonotonicityInfo is used when the input (classic histograms) to
// histogram_quantile needs to be forced to be monotonic.
func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: maybeAddMetricName(HistogramQuantileForcedMonotonicityInfo, metricName),
}
@@ -294,7 +331,7 @@ func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.
// NewIncompatibleTypesInBinOpInfo is used if binary operators act on a
// combination of types that doesn't work and therefore returns no result.
func NewIncompatibleTypesInBinOpInfo(lhsType, operator, rhsType string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q: %s %s %s", IncompatibleTypesInBinOpInfo, operator, lhsType, operator, rhsType),
}
@@ -303,7 +340,7 @@ func NewIncompatibleTypesInBinOpInfo(lhsType, operator, rhsType string, pos posr
// NewHistogramIgnoredInAggregationInfo is used when a histogram is ignored by
// an aggregation operator that cannot handle histograms.
func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %s aggregation", HistogramIgnoredInAggregationInfo, aggregation),
}
@@ -312,7 +349,7 @@ func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.Posit
// NewHistogramIgnoredInMixedRangeInfo is used when a histogram is ignored
// in a range vector which contains mix of floats and histograms.
func NewHistogramIgnoredInMixedRangeInfo(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", HistogramIgnoredInMixedRangeInfo, metricName),
}
@@ -321,28 +358,28 @@ func NewHistogramIgnoredInMixedRangeInfo(metricName string, pos posrange.Positio
// NewIncompatibleBucketLayoutInBinOpWarning is used if binary operators act on a
// combination of two incompatible histograms.
func NewIncompatibleBucketLayoutInBinOpWarning(operator string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %s", IncompatibleBucketLayoutInBinOpWarning, operator),
}
}
func NewNativeHistogramQuantileNaNResultInfo(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: maybeAddMetricName(NativeHistogramQuantileNaNResultInfo, metricName),
}
}
func NewNativeHistogramQuantileNaNSkewInfo(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: maybeAddMetricName(NativeHistogramQuantileNaNSkewInfo, metricName),
}
}
func NewNativeHistogramFractionNaNsInfo(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: maybeAddMetricName(NativeHistogramFractionNaNsInfo, metricName),
}
@@ -368,7 +405,7 @@ func (op HistogramOperation) String() string {
// NewHistogramCounterResetCollisionWarning is used when two counter histograms are added or subtracted where one has
// a CounterReset hint and the other has NotCounterReset.
func NewHistogramCounterResetCollisionWarning(pos posrange.PositionRange, operation HistogramOperation) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %s", HistogramCounterResetCollisionWarning, operation.String()),
}
@@ -377,7 +414,7 @@ func NewHistogramCounterResetCollisionWarning(pos posrange.PositionRange, operat
// NewMismatchedCustomBucketsHistogramsInfo is used when the queried series includes
// custom buckets histograms with mismatched custom bounds that cause reconciling.
func NewMismatchedCustomBucketsHistogramsInfo(pos posrange.PositionRange, operation HistogramOperation) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %s", MismatchedCustomBucketsHistogramsInfo, operation.String()),
}
diff --git a/util/annotations/annotations_test.go b/util/annotations/annotations_test.go
new file mode 100644
index 0000000000..e3caaae7eb
--- /dev/null
+++ b/util/annotations/annotations_test.go
@@ -0,0 +1,109 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package annotations
+
+import (
+ "errors"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/promql/parser/posrange"
+)
+
+func TestAnnotations_AsStrings(t *testing.T) {
+ var annos Annotations
+ pos := posrange.PositionRange{Start: 3, End: 8}
+
+ annos.Add(errors.New("this is a non-annotation error"))
+
+ annos.Add(NewInvalidRatioWarning(1.1, 100, pos))
+ annos.Add(NewInvalidRatioWarning(1.2, 123, pos))
+
+ annos.Add(newTestCustomWarning(1.5, pos, 12, 14))
+ annos.Add(newTestCustomWarning(1.5, pos, 10, 20))
+ annos.Add(newTestCustomWarning(1.5, pos, 5, 15))
+ annos.Add(newTestCustomWarning(1.5, pos, 12, 14))
+
+ annos.Add(NewHistogramIgnoredInAggregationInfo("sum", pos))
+
+ warnings, infos := annos.AsStrings("lorem ipsum dolor sit amet", 0, 0)
+ require.ElementsMatch(t, warnings, []string{
+ "this is a non-annotation error",
+ "PromQL warning: ratio value should be between -1 and 1, got 1.1, capping to 100 (1:4)",
+ "PromQL warning: ratio value should be between -1 and 1, got 1.2, capping to 123 (1:4)",
+ "PromQL warning: custom value set to 1.5, 4 instances with smallest 5 and biggest 20 (1:4)",
+ })
+ require.ElementsMatch(t, infos, []string{
+ "PromQL info: ignored histogram in sum aggregation (1:4)",
+ })
+}
+
+type testCustomError struct {
+ PositionRange posrange.PositionRange
+ Err error
+ Query string
+ Min []float64
+ Max []float64
+ Count int
+}
+
+func (e *testCustomError) Error() string {
+ if e.Query == "" {
+ return e.Err.Error()
+ }
+ return fmt.Sprintf("%s, %d instances with smallest %g and biggest %g (%s)", e.Err, e.Count+1, e.Min[0], e.Max[0], e.PositionRange.StartPosInput(e.Query, 0))
+}
+
+func (e *testCustomError) Unwrap() error {
+ return e.Err
+}
+
+func (e *testCustomError) SetQuery(query string) {
+ e.Query = query
+}
+
+func (e *testCustomError) Merge(other error) error {
+ o := &testCustomError{}
+ ok := errors.As(other, &o)
+ if !ok {
+ return e
+ }
+ if e.Err.Error() != o.Err.Error() || len(e.Min) != len(o.Min) || len(e.Max) != len(o.Max) {
+ return e
+ }
+ for i, aMin := range e.Min {
+ if aMin < o.Min[i] {
+ o.Min[i] = aMin
+ }
+ }
+ for i, aMax := range e.Max {
+ if aMax > o.Max[i] {
+ o.Max[i] = aMax
+ }
+ }
+ o.Count += e.Count + 1
+ return o
+}
+
+func newTestCustomWarning(q float64, pos posrange.PositionRange, smallest, largest float64) error {
+ testCustomWarning := fmt.Errorf("%w: custom value set to", PromQLWarning)
+ return &testCustomError{
+ PositionRange: pos,
+ Err: fmt.Errorf("%w %g", testCustomWarning, q),
+ Min: []float64{smallest},
+ Max: []float64{largest},
+ }
+}
From 51d33be87806e3dc0e889dee73358394c19701ff Mon Sep 17 00:00:00 2001
From: Arve Knudsen
Date: Tue, 3 Feb 2026 15:15:35 +0100
Subject: [PATCH 111/165] web: switch from gopkg.in/yaml to go.yaml.in/yaml
(#17979)
Replace gopkg.in/yaml.v2 and gopkg.in/yaml.v3 imports with
go.yaml.in/yaml/v2 and go.yaml.in/yaml/v3 respectively.
Signed-off-by: Arve Knudsen
---
.golangci.yml | 4 ++++
go.mod | 4 ++--
web/api/v1/openapi_golden_test.go | 2 +-
web/api/v1/openapi_test.go | 2 +-
4 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/.golangci.yml b/.golangci.yml
index 599a5e2b49..8cb3265f4f 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -102,6 +102,10 @@ linters:
desc: "Use github.com/klauspost/compress instead of zlib"
- pkg: "golang.org/x/exp/slices"
desc: "Use 'slices' instead."
+ - pkg: "gopkg.in/yaml.v2"
+ desc: "Use go.yaml.in/yaml/v2 instead of gopkg.in/yaml.v2"
+ - pkg: "gopkg.in/yaml.v3"
+ desc: "Use go.yaml.in/yaml/v3 instead of gopkg.in/yaml.v3"
errcheck:
exclude-functions:
# Don't flag lines such as "io.Copy(io.Discard, resp.Body)".
diff --git a/go.mod b/go.mod
index 0aa3658177..7ceb746720 100644
--- a/go.mod
+++ b/go.mod
@@ -96,7 +96,6 @@ require (
google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b
google.golang.org/grpc v1.78.0
google.golang.org/protobuf v1.36.11
- gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.34.3
k8s.io/apimachinery v0.34.3
k8s.io/client-go v0.34.3
@@ -124,6 +123,8 @@ require (
github.com/pb33f/ordered-map/v2 v2.3.0 // indirect
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)
@@ -246,7 +247,6 @@ require (
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
- gopkg.in/yaml.v2 v2.4.0
gotest.tools/v3 v3.0.3 // indirect
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
diff --git a/web/api/v1/openapi_golden_test.go b/web/api/v1/openapi_golden_test.go
index 6207fda81b..468d56e46d 100644
--- a/web/api/v1/openapi_golden_test.go
+++ b/web/api/v1/openapi_golden_test.go
@@ -20,7 +20,7 @@ import (
"testing"
"github.com/stretchr/testify/require"
- "gopkg.in/yaml.v3"
+ "go.yaml.in/yaml/v3"
"github.com/prometheus/prometheus/web/api/testhelpers"
)
diff --git a/web/api/v1/openapi_test.go b/web/api/v1/openapi_test.go
index 0d2f5cc83e..21547734c2 100644
--- a/web/api/v1/openapi_test.go
+++ b/web/api/v1/openapi_test.go
@@ -21,7 +21,7 @@ import (
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
- "gopkg.in/yaml.v2"
+ "go.yaml.in/yaml/v2"
)
// TestOpenAPIHTTPHandler verifies that the OpenAPI endpoint serves a valid specification
From 3c44ca757d1932e3fcca317dfc05ad66ce38ac57 Mon Sep 17 00:00:00 2001
From: Arve Knudsen
Date: Tue, 3 Feb 2026 16:58:02 +0100
Subject: [PATCH 112/165] storage: fix ToError() to return nil for empty
partial errors (#18002)
Signed-off-by: Arve Knudsen
---
storage/errors_test.go | 18 ++++++++++++++++++
storage/interface_append.go | 2 +-
2 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/storage/errors_test.go b/storage/errors_test.go
index 0e7277bf8b..706719d137 100644
--- a/storage/errors_test.go
+++ b/storage/errors_test.go
@@ -20,6 +20,24 @@ import (
"github.com/stretchr/testify/require"
)
+func TestAppendPartialErrorToError(t *testing.T) {
+ // nil receiver returns nil.
+ var nilErr *AppendPartialError
+ require.NoError(t, nilErr.ToError())
+
+ // Empty ExemplarErrors returns nil.
+ emptyErr := &AppendPartialError{}
+ require.NoError(t, emptyErr.ToError())
+
+ // Also test explicitly empty slice.
+ emptySliceErr := &AppendPartialError{ExemplarErrors: []error{}}
+ require.NoError(t, emptySliceErr.ToError())
+
+ // Non-empty ExemplarErrors returns the error.
+ nonEmptyErr := &AppendPartialError{ExemplarErrors: []error{ErrOutOfOrderExemplar}}
+ require.ErrorIs(t, nonEmptyErr.ToError(), nonEmptyErr)
+}
+
func TestErrDuplicateSampleForTimestamp(t *testing.T) {
// All errDuplicateSampleForTimestamp are ErrDuplicateSampleForTimestamp
require.ErrorIs(t, ErrDuplicateSampleForTimestamp, errDuplicateSampleForTimestamp{})
diff --git a/storage/interface_append.go b/storage/interface_append.go
index b5ee4b49c8..beb17f9e16 100644
--- a/storage/interface_append.go
+++ b/storage/interface_append.go
@@ -104,7 +104,7 @@ func (e *AppendPartialError) Error() string {
// ToError returns AppendPartialError as error, returning nil
// if there are no errors.
func (e *AppendPartialError) ToError() error {
- if e == nil {
+ if e == nil || len(e.ExemplarErrors) == 0 {
return nil
}
return e
From 7769495a4a3e95db7f439e2bd3b729b27e59c5af Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Tue, 3 Feb 2026 16:44:40 +0000
Subject: [PATCH 113/165] refactor: switch OTLP handler to AppendableV2
(#17996)
* refactor: switch OTLP handler to AppendableV2
Signed-off-by: bwplotka
* addressed comments
Signed-off-by: bwplotka
---------
Signed-off-by: bwplotka
---
.../combined_appender.go | 244 -----
.../combined_appender_test.go | 885 +-----------------
.../prometheusremotewrite/helper.go | 89 +-
.../prometheusremotewrite/helper_test.go | 11 +-
.../prometheusremotewrite/histograms.go | 35 +-
.../prometheusremotewrite/histograms_test.go | 5 +-
.../prometheusremotewrite/metrics_to_prw.go | 22 +-
.../metrics_to_prw_test.go | 149 ++-
.../number_data_points.go | 29 +-
.../number_data_points_test.go | 5 +-
storage/remote/write_handler.go | 24 +
storage/remote/write_otlp_handler.go | 93 +-
storage/remote/write_otlp_handler_test.go | 440 +++------
web/api/v1/api.go | 8 +-
web/api/v1/errors_test.go | 2 +-
web/api/v1/test_helpers.go | 2 +-
web/web.go | 9 +-
17 files changed, 453 insertions(+), 1599 deletions(-)
delete mode 100644 storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go
deleted file mode 100644
index 883b8d3142..0000000000
--- a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// TODO(krajorama): rename this package to otlpappender or similar, as it is
-// not specific to Prometheus remote write anymore.
-// Note otlptranslator is already used by prometheus/otlptranslator repo.
-package prometheusremotewrite
-
-import (
- "errors"
- "fmt"
- "log/slog"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
-
- "github.com/prometheus/prometheus/model/exemplar"
- "github.com/prometheus/prometheus/model/histogram"
- "github.com/prometheus/prometheus/model/labels"
- "github.com/prometheus/prometheus/model/metadata"
- "github.com/prometheus/prometheus/storage"
-)
-
-// Metadata extends metadata.Metadata with the metric family name.
-// OTLP calculates the metric family name for all metrics and uses
-// it for generating summary, histogram series by adding the magic
-// suffixes. The metric family name is passed down to the appender
-// in case the storage needs it for metadata updates.
-// Known user is Mimir that implements /api/v1/metadata and uses
-// Remote-Write 1.0 for this. Might be removed later if no longer
-// needed by any downstream project.
-type Metadata struct {
- metadata.Metadata
- MetricFamilyName string
-}
-
-// CombinedAppender is similar to storage.Appender, but combines updates to
-// metadata, created timestamps, exemplars and samples into a single call.
-type CombinedAppender interface {
- // AppendSample appends a sample and related exemplars, metadata, and
- // created timestamp to the storage.
- AppendSample(ls labels.Labels, meta Metadata, st, t int64, v float64, es []exemplar.Exemplar) error
- // AppendHistogram appends a histogram and related exemplars, metadata, and
- // created timestamp to the storage.
- AppendHistogram(ls labels.Labels, meta Metadata, st, t int64, h *histogram.Histogram, es []exemplar.Exemplar) error
-}
-
-// CombinedAppenderMetrics is for the metrics observed by the
-// combinedAppender implementation.
-type CombinedAppenderMetrics struct {
- samplesAppendedWithoutMetadata prometheus.Counter
- outOfOrderExemplars prometheus.Counter
-}
-
-func NewCombinedAppenderMetrics(reg prometheus.Registerer) CombinedAppenderMetrics {
- return CombinedAppenderMetrics{
- samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{
- Namespace: "prometheus",
- Subsystem: "api",
- Name: "otlp_appended_samples_without_metadata_total",
- Help: "The total number of samples ingested from OTLP without corresponding metadata.",
- }),
- outOfOrderExemplars: promauto.With(reg).NewCounter(prometheus.CounterOpts{
- Namespace: "prometheus",
- Subsystem: "api",
- Name: "otlp_out_of_order_exemplars_total",
- Help: "The total number of received OTLP exemplars which were rejected because they were out of order.",
- }),
- }
-}
-
-// NewCombinedAppender creates a combined appender that sets start times and
-// updates metadata for each series only once, and appends samples and
-// exemplars for each call.
-func NewCombinedAppender(app storage.Appender, logger *slog.Logger, ingestSTZeroSample, appendMetadata bool, metrics CombinedAppenderMetrics) CombinedAppender {
- return &combinedAppender{
- app: app,
- logger: logger,
- ingestSTZeroSample: ingestSTZeroSample,
- appendMetadata: appendMetadata,
- refs: make(map[uint64]seriesRef),
- samplesAppendedWithoutMetadata: metrics.samplesAppendedWithoutMetadata,
- outOfOrderExemplars: metrics.outOfOrderExemplars,
- }
-}
-
-type seriesRef struct {
- ref storage.SeriesRef
- st int64
- ls labels.Labels
- meta metadata.Metadata
-}
-
-type combinedAppender struct {
- app storage.Appender
- logger *slog.Logger
- samplesAppendedWithoutMetadata prometheus.Counter
- outOfOrderExemplars prometheus.Counter
- ingestSTZeroSample bool
- appendMetadata bool
- // Used to ensure we only update metadata and created timestamps once, and to share storage.SeriesRefs.
- // To detect hash collision it also stores the labels.
- // There is no overflow/conflict list, the TSDB will handle that part.
- refs map[uint64]seriesRef
-}
-
-func (b *combinedAppender) AppendSample(ls labels.Labels, meta Metadata, st, t int64, v float64, es []exemplar.Exemplar) (err error) {
- return b.appendFloatOrHistogram(ls, meta.Metadata, st, t, v, nil, es)
-}
-
-func (b *combinedAppender) AppendHistogram(ls labels.Labels, meta Metadata, st, t int64, h *histogram.Histogram, es []exemplar.Exemplar) (err error) {
- if h == nil {
- // Sanity check, we should never get here with a nil histogram.
- b.logger.Error("Received nil histogram in CombinedAppender.AppendHistogram", "series", ls.String())
- return errors.New("internal error, attempted to append nil histogram")
- }
- return b.appendFloatOrHistogram(ls, meta.Metadata, st, t, 0, h, es)
-}
-
-func (b *combinedAppender) appendFloatOrHistogram(ls labels.Labels, meta metadata.Metadata, st, t int64, v float64, h *histogram.Histogram, es []exemplar.Exemplar) (err error) {
- hash := ls.Hash()
- series, exists := b.refs[hash]
- ref := series.ref
- if exists && !labels.Equal(series.ls, ls) {
- // Hash collision. The series reference we stored is pointing to a
- // different series so we cannot use it, we need to reset the
- // reference and cache.
- // Note: we don't need to keep track of conflicts here,
- // the TSDB will handle that part when we pass 0 reference.
- exists = false
- ref = 0
- }
- updateRefs := !exists || series.st != st
- if updateRefs && st != 0 && st < t && b.ingestSTZeroSample {
- var newRef storage.SeriesRef
- if h != nil {
- newRef, err = b.app.AppendHistogramSTZeroSample(ref, ls, t, st, h, nil)
- } else {
- newRef, err = b.app.AppendSTZeroSample(ref, ls, t, st)
- }
- if err != nil {
- if !errors.Is(err, storage.ErrOutOfOrderST) && !errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
- // Even for the first sample OOO is a common scenario because
- // we can't tell if a ST was already ingested in a previous request.
- // We ignore the error.
- // ErrDuplicateSampleForTimestamp is also a common scenario because
- // unknown start times in Opentelemetry are indicated by setting
- // the start time to the same as the first sample time.
- // https://opentelemetry.io/docs/specs/otel/metrics/data-model/#cumulative-streams-handling-unknown-start-time
- b.logger.Warn("Error when appending ST from OTLP", "err", err, "series", ls.String(), "start_timestamp", st, "timestamp", t, "sample_type", sampleType(h))
- }
- } else {
- // We only use the returned reference on success as otherwise an
- // error of ST append could invalidate the series reference.
- ref = newRef
- }
- }
- {
- var newRef storage.SeriesRef
- if h != nil {
- newRef, err = b.app.AppendHistogram(ref, ls, t, h, nil)
- } else {
- newRef, err = b.app.Append(ref, ls, t, v)
- }
- if err != nil {
- // Although Append does not currently return ErrDuplicateSampleForTimestamp there is
- // a note indicating its inclusion in the future.
- if errors.Is(err, storage.ErrOutOfOrderSample) ||
- errors.Is(err, storage.ErrOutOfBounds) ||
- errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
- b.logger.Error("Error when appending sample from OTLP", "err", err.Error(), "series", ls.String(), "timestamp", t, "sample_type", sampleType(h))
- }
- } else {
- // If the append was successful, we can use the returned reference.
- ref = newRef
- }
- }
-
- if ref == 0 {
- // We cannot update metadata or add exemplars on non existent series.
- return err
- }
-
- metadataChanged := exists && (series.meta.Help != meta.Help || series.meta.Type != meta.Type || series.meta.Unit != meta.Unit)
-
- // Update cache if references changed or metadata changed.
- if updateRefs || metadataChanged {
- b.refs[hash] = seriesRef{
- ref: ref,
- st: st,
- ls: ls,
- meta: meta,
- }
- }
-
- // Update metadata in storage if enabled and needed.
- if b.appendMetadata && (!exists || metadataChanged) {
- // Only update metadata in WAL if the metadata-wal-records feature is enabled.
- // Without this feature, metadata is not persisted to WAL.
- _, err := b.app.UpdateMetadata(ref, ls, meta)
- if err != nil {
- b.samplesAppendedWithoutMetadata.Add(1)
- b.logger.Warn("Error while updating metadata from OTLP", "err", err)
- }
- }
-
- b.appendExemplars(ref, ls, es)
-
- return err
-}
-
-func sampleType(h *histogram.Histogram) string {
- if h == nil {
- return "float"
- }
- return "histogram"
-}
-
-func (b *combinedAppender) appendExemplars(ref storage.SeriesRef, ls labels.Labels, es []exemplar.Exemplar) storage.SeriesRef {
- var err error
- for _, e := range es {
- if ref, err = b.app.AppendExemplar(ref, ls, e); err != nil {
- switch {
- case errors.Is(err, storage.ErrOutOfOrderExemplar):
- b.outOfOrderExemplars.Add(1)
- b.logger.Debug("Out of order exemplar from OTLP", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
- default:
- // Since exemplar storage is still experimental, we don't fail the request on ingestion errors
- b.logger.Debug("Error while adding exemplar from OTLP", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
- }
- }
- }
- return ref
-}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go
index a1a17fe82b..69d11ed6bd 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go
@@ -14,31 +14,22 @@
package prometheusremotewrite
import (
- "bytes"
- "context"
"errors"
- "fmt"
- "math"
"testing"
- "time"
"github.com/google/go-cmp/cmp"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/common/model"
- "github.com/prometheus/common/promslog"
- "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/storage"
- "github.com/prometheus/prometheus/tsdb"
- "github.com/prometheus/prometheus/tsdb/chunkenc"
- "github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/testutil"
)
+// TODO(bwplotka): Move to teststorage.Appendable. This require slight refactor of tests and I couldn't do this before
+// switching to AppenderV2 (I would need to adjust AppenderV1 mock exemplar flow which is pointless since we don't plan
+// to use it). For now keeping tests diff small for confidence.
type mockCombinedAppender struct {
pendingSamples []combinedSample
pendingHistograms []combinedHistogram
@@ -67,30 +58,29 @@ type combinedHistogram struct {
es []exemplar.Exemplar
}
-func (m *mockCombinedAppender) AppendSample(ls labels.Labels, meta Metadata, st, t int64, v float64, es []exemplar.Exemplar) error {
+func (m *mockCombinedAppender) Append(_ storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, _ *histogram.FloatHistogram, opts storage.AOptions) (_ storage.SeriesRef, err error) {
+ if h != nil {
+ m.pendingHistograms = append(m.pendingHistograms, combinedHistogram{
+ metricFamilyName: opts.MetricFamilyName,
+ ls: ls,
+ meta: opts.Metadata,
+ t: t,
+ st: st,
+ h: h,
+ es: opts.Exemplars,
+ })
+ return 0, nil
+ }
m.pendingSamples = append(m.pendingSamples, combinedSample{
- metricFamilyName: meta.MetricFamilyName,
+ metricFamilyName: opts.MetricFamilyName,
ls: ls,
- meta: meta.Metadata,
+ meta: opts.Metadata,
t: t,
st: st,
v: v,
- es: es,
+ es: opts.Exemplars,
})
- return nil
-}
-
-func (m *mockCombinedAppender) AppendHistogram(ls labels.Labels, meta Metadata, st, t int64, h *histogram.Histogram, es []exemplar.Exemplar) error {
- m.pendingHistograms = append(m.pendingHistograms, combinedHistogram{
- metricFamilyName: meta.MetricFamilyName,
- ls: ls,
- meta: meta.Metadata,
- t: t,
- st: st,
- h: h,
- es: es,
- })
- return nil
+ return 0, nil
}
func (m *mockCombinedAppender) Commit() error {
@@ -101,837 +91,10 @@ func (m *mockCombinedAppender) Commit() error {
return nil
}
+func (*mockCombinedAppender) Rollback() error {
+ return errors.New("not implemented")
+}
+
func requireEqual(t testing.TB, expected, actual any, msgAndArgs ...any) {
testutil.RequireEqualWithOptions(t, expected, actual, []cmp.Option{cmp.AllowUnexported(combinedSample{}, combinedHistogram{})}, msgAndArgs...)
}
-
-// TestCombinedAppenderOnTSDB runs some basic tests on a real TSDB to check
-// that the combinedAppender works on a real TSDB.
-func TestCombinedAppenderOnTSDB(t *testing.T) {
- t.Run("ingestSTZeroSample=false", func(t *testing.T) { testCombinedAppenderOnTSDB(t, false) })
-
- t.Run("ingestSTZeroSample=true", func(t *testing.T) { testCombinedAppenderOnTSDB(t, true) })
-}
-
-func testCombinedAppenderOnTSDB(t *testing.T, ingestSTZeroSample bool) {
- t.Helper()
-
- now := time.Now()
-
- testExemplars := []exemplar.Exemplar{
- {
- Labels: labels.FromStrings("tracid", "122"),
- Value: 1337,
- },
- {
- Labels: labels.FromStrings("tracid", "132"),
- Value: 7777,
- },
- }
- expectedExemplars := []exemplar.QueryResult{
- {
- SeriesLabels: labels.FromStrings(
- model.MetricNameLabel, "test_bytes_total",
- "foo", "bar",
- ),
- Exemplars: testExemplars,
- },
- }
-
- seriesLabels := labels.FromStrings(
- model.MetricNameLabel, "test_bytes_total",
- "foo", "bar",
- )
- floatMetadata := Metadata{
- Metadata: metadata.Metadata{
- Type: model.MetricTypeCounter,
- Unit: "bytes",
- Help: "some help",
- },
- MetricFamilyName: "test_bytes_total",
- }
-
- histogramMetadata := Metadata{
- Metadata: metadata.Metadata{
- Type: model.MetricTypeHistogram,
- Unit: "bytes",
- Help: "some help",
- },
- MetricFamilyName: "test_bytes",
- }
-
- testCases := map[string]struct {
- appendFunc func(*testing.T, CombinedAppender)
- extraAppendFunc func(*testing.T, CombinedAppender)
- expectedSamples []sample
- expectedExemplars []exemplar.QueryResult
- expectedLogsForST []string
- }{
- "single float sample, zero ST": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.UnixMilli(), 42.0, testExemplars))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- },
- expectedExemplars: expectedExemplars,
- },
- "single float sample, very old ST": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 1, now.UnixMilli(), 42.0, nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- },
- expectedLogsForST: []string{
- "Error when appending ST from OTLP",
- "out of bound",
- },
- },
- "single float sample, normal ST": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-2*time.Minute).UnixMilli(), now.UnixMilli(), 42.0, nil))
- },
- expectedSamples: []sample{
- {
- stZero: true,
- t: now.Add(-2 * time.Minute).UnixMilli(),
- },
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- },
- },
- "single float sample, ST same time as sample": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), 42.0, nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- },
- },
- "two float samples in different messages, ST same time as first sample": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), 42.0, nil))
- },
- extraAppendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.Add(time.Second).UnixMilli(), 43.0, nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- {
- t: now.Add(time.Second).UnixMilli(),
- f: 43.0,
- },
- },
- },
- "single float sample, ST in the future of the sample": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), 42.0, nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- },
- },
- "single histogram sample, zero ST": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), testExemplars))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- },
- expectedExemplars: expectedExemplars,
- },
- "single histogram sample, very old ST": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 1, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- },
- expectedLogsForST: []string{
- "Error when appending ST from OTLP",
- "out of bound",
- },
- },
- "single histogram sample, normal ST": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.Add(-2*time.Minute).UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
- },
- expectedSamples: []sample{
- {
- stZero: true,
- t: now.Add(-2 * time.Minute).UnixMilli(),
- h: &histogram.Histogram{},
- },
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- },
- },
- "single histogram sample, ST same time as sample": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- },
- },
- "two histogram samples in different messages, ST same time as first sample": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
- },
- extraAppendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.Add(time.Second).UnixMilli(), tsdbutil.GenerateTestHistogram(43), nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- {
- t: now.Add(time.Second).UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(43),
- },
- },
- },
- "single histogram sample, ST in the future of the sample": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- },
- },
- "multiple float samples": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.UnixMilli(), 42.0, nil))
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.Add(15*time.Second).UnixMilli(), 62.0, nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- {
- t: now.Add(15 * time.Second).UnixMilli(),
- f: 62.0,
- },
- },
- },
- "multiple histogram samples": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.Add(15*time.Second).UnixMilli(), tsdbutil.GenerateTestHistogram(62), nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- {
- t: now.Add(15 * time.Second).UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(62),
- },
- },
- },
- "float samples with ST changing": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-4*time.Second).UnixMilli(), now.Add(-3*time.Second).UnixMilli(), 42.0, nil))
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-1*time.Second).UnixMilli(), now.UnixMilli(), 62.0, nil))
- },
- expectedSamples: []sample{
- {
- stZero: true,
- t: now.Add(-4 * time.Second).UnixMilli(),
- },
- {
- t: now.Add(-3 * time.Second).UnixMilli(),
- f: 42.0,
- },
- {
- stZero: true,
- t: now.Add(-1 * time.Second).UnixMilli(),
- },
- {
- t: now.UnixMilli(),
- f: 62.0,
- },
- },
- },
- }
-
- for name, tc := range testCases {
- t.Run(name, func(t *testing.T) {
- var expectedLogs []string
- if ingestSTZeroSample {
- expectedLogs = append(expectedLogs, tc.expectedLogsForST...)
- }
-
- dir := t.TempDir()
- opts := tsdb.DefaultOptions()
- opts.EnableExemplarStorage = true
- opts.MaxExemplars = 100
- db, err := tsdb.Open(dir, promslog.NewNopLogger(), prometheus.NewRegistry(), opts, nil)
- require.NoError(t, err)
-
- t.Cleanup(func() { db.Close() })
-
- var output bytes.Buffer
- logger := promslog.New(&promslog.Config{Writer: &output})
-
- ctx := context.Background()
- reg := prometheus.NewRegistry()
- cappMetrics := NewCombinedAppenderMetrics(reg)
- app := db.Appender(ctx)
- capp := NewCombinedAppender(app, logger, ingestSTZeroSample, false, cappMetrics)
- tc.appendFunc(t, capp)
- require.NoError(t, app.Commit())
-
- if tc.extraAppendFunc != nil {
- app = db.Appender(ctx)
- capp = NewCombinedAppender(app, logger, ingestSTZeroSample, false, cappMetrics)
- tc.extraAppendFunc(t, capp)
- require.NoError(t, app.Commit())
- }
-
- if len(expectedLogs) > 0 {
- for _, expectedLog := range expectedLogs {
- require.Contains(t, output.String(), expectedLog)
- }
- } else {
- require.Empty(t, output.String(), "unexpected log output")
- }
-
- q, err := db.Querier(int64(math.MinInt64), int64(math.MaxInt64))
- require.NoError(t, err)
-
- ss := q.Select(ctx, false, &storage.SelectHints{
- Start: int64(math.MinInt64),
- End: int64(math.MaxInt64),
- }, labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_bytes_total"))
-
- require.NoError(t, ss.Err())
-
- require.True(t, ss.Next())
- series := ss.At()
- it := series.Iterator(nil)
- for i, sample := range tc.expectedSamples {
- if !ingestSTZeroSample && sample.stZero {
- continue
- }
- if sample.h == nil {
- require.Equal(t, chunkenc.ValFloat, it.Next())
- ts, v := it.At()
- require.Equal(t, sample.t, ts, "sample ts %d", i)
- require.Equal(t, sample.f, v, "sample v %d", i)
- } else {
- require.Equal(t, chunkenc.ValHistogram, it.Next())
- ts, h := it.AtHistogram(nil)
- require.Equal(t, sample.t, ts, "sample ts %d", i)
- require.Equal(t, sample.h.Count, h.Count, "sample v %d", i)
- }
- }
- require.False(t, ss.Next())
-
- eq, err := db.ExemplarQuerier(ctx)
- require.NoError(t, err)
- exResult, err := eq.Select(int64(math.MinInt64), int64(math.MaxInt64), []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_bytes_total")})
- require.NoError(t, err)
- if tc.expectedExemplars == nil {
- tc.expectedExemplars = []exemplar.QueryResult{}
- }
- require.Equal(t, tc.expectedExemplars, exResult)
- })
- }
-}
-
-type sample struct {
- stZero bool
-
- t int64
- f float64
- h *histogram.Histogram
-}
-
-// TestCombinedAppenderSeriesRefs checks that the combined appender
-// correctly uses and updates the series references in the internal map.
-func TestCombinedAppenderSeriesRefs(t *testing.T) {
- seriesLabels := labels.FromStrings(
- model.MetricNameLabel, "test_bytes_total",
- "foo", "bar",
- )
-
- floatMetadata := Metadata{
- Metadata: metadata.Metadata{
- Type: model.MetricTypeCounter,
- Unit: "bytes",
- Help: "some help",
- },
- MetricFamilyName: "test_bytes_total",
- }
-
- t.Run("happy case with ST zero, reference is passed and reused", func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil))
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 3, 4, 62.0, []exemplar.Exemplar{
- {
- Labels: labels.FromStrings("tracid", "122"),
- Value: 1337,
- },
- }))
-
- require.Len(t, app.records, 5)
- requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
- ref := app.records[0].outRef
- require.NotZero(t, ref)
- requireEqualOpAndRef(t, "Append", ref, app.records[1])
- requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[2])
- requireEqualOpAndRef(t, "Append", ref, app.records[3])
- requireEqualOpAndRef(t, "AppendExemplar", ref, app.records[4])
- })
-
- t.Run("error on second ST ingest doesn't update the reference", func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil))
-
- app.appendSTZeroSampleError = errors.New("test error")
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 3, 4, 62.0, nil))
-
- require.Len(t, app.records, 4)
- requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
- ref := app.records[0].outRef
- require.NotZero(t, ref)
- requireEqualOpAndRef(t, "Append", ref, app.records[1])
- requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[2])
- require.Zero(t, app.records[2].outRef, "the second AppendSTZeroSample returned 0")
- requireEqualOpAndRef(t, "Append", ref, app.records[3])
- })
-
- t.Run("metadata, exemplars are not updated if append failed", func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
- app.appendError = errors.New("test error")
- require.Error(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 0, 1, 42.0, []exemplar.Exemplar{
- {
- Labels: labels.FromStrings("tracid", "122"),
- Value: 1337,
- },
- }))
-
- require.Len(t, app.records, 1)
- require.Equal(t, appenderRecord{
- op: "Append",
- ls: labels.FromStrings(model.MetricNameLabel, "test_bytes_total", "foo", "bar"),
- }, app.records[0])
- })
-
- t.Run("metadata, exemplars are updated if append failed but reference is valid", func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, true, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- newMetadata := floatMetadata
- newMetadata.Help = "some other help"
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil))
- app.appendError = errors.New("test error")
- require.Error(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 4, 62.0, []exemplar.Exemplar{
- {
- Labels: labels.FromStrings("tracid", "122"),
- Value: 1337,
- },
- }))
-
- require.Len(t, app.records, 7)
- requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
- ref := app.records[0].outRef
- require.NotZero(t, ref)
- requireEqualOpAndRef(t, "Append", ref, app.records[1])
- requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2])
- requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[3])
- requireEqualOpAndRef(t, "Append", ref, app.records[4])
- require.Zero(t, app.records[4].outRef, "the second Append returned 0")
- requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[5])
- requireEqualOpAndRef(t, "AppendExemplar", ref, app.records[6])
- })
-
- t.Run("simulate conflict with existing series", func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- ls := labels.FromStrings(
- model.MetricNameLabel, "test_bytes_total",
- "foo", "bar",
- )
-
- require.NoError(t, capp.AppendSample(ls, floatMetadata, 1, 2, 42.0, nil))
-
- hash := ls.Hash()
- cappImpl := capp.(*combinedAppender)
- series := cappImpl.refs[hash]
- series.ls = labels.FromStrings(
- model.MetricNameLabel, "test_bytes_total",
- "foo", "club",
- )
- // The hash and ref remain the same, but we altered the labels.
- // This simulates a conflict with an existing series.
- cappImpl.refs[hash] = series
-
- require.NoError(t, capp.AppendSample(ls, floatMetadata, 3, 4, 62.0, []exemplar.Exemplar{
- {
- Labels: labels.FromStrings("tracid", "122"),
- Value: 1337,
- },
- }))
-
- require.Len(t, app.records, 5)
- requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
- ref := app.records[0].outRef
- require.NotZero(t, ref)
- requireEqualOpAndRef(t, "Append", ref, app.records[1])
- requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[2])
- newRef := app.records[2].outRef
- require.NotEqual(t, ref, newRef, "the second AppendSTZeroSample returned a different reference")
- requireEqualOpAndRef(t, "Append", newRef, app.records[3])
- requireEqualOpAndRef(t, "AppendExemplar", newRef, app.records[4])
- })
-
- t.Run("check that invoking AppendHistogram returns an error for nil histogram", func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- ls := labels.FromStrings(
- model.MetricNameLabel, "test_bytes_total",
- "foo", "bar",
- )
- err := capp.AppendHistogram(ls, Metadata{}, 4, 2, nil, nil)
- require.Error(t, err)
- })
-
- for _, appendMetadata := range []bool{false, true} {
- t.Run(fmt.Sprintf("appendMetadata=%t", appendMetadata), func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, appendMetadata, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil))
-
- if appendMetadata {
- require.Len(t, app.records, 3)
- requireEqualOp(t, "AppendSTZeroSample", app.records[0])
- requireEqualOp(t, "Append", app.records[1])
- requireEqualOp(t, "UpdateMetadata", app.records[2])
- } else {
- require.Len(t, app.records, 2)
- requireEqualOp(t, "AppendSTZeroSample", app.records[0])
- requireEqualOp(t, "Append", app.records[1])
- }
- })
- }
-}
-
-// TestCombinedAppenderMetadataChanges verifies that UpdateMetadata is called
-// when metadata fields change (help, unit, or type).
-func TestCombinedAppenderMetadataChanges(t *testing.T) {
- seriesLabels := labels.FromStrings(
- model.MetricNameLabel, "test_metric",
- "foo", "bar",
- )
-
- baseMetadata := Metadata{
- Metadata: metadata.Metadata{
- Type: model.MetricTypeCounter,
- Unit: "bytes",
- Help: "original help",
- },
- MetricFamilyName: "test_metric",
- }
-
- tests := []struct {
- name string
- modifyMetadata func(Metadata) Metadata
- }{
- {
- name: "help changes",
- modifyMetadata: func(m Metadata) Metadata {
- m.Help = "new help text"
- return m
- },
- },
- {
- name: "unit changes",
- modifyMetadata: func(m Metadata) Metadata {
- m.Unit = "seconds"
- return m
- },
- },
- {
- name: "type changes",
- modifyMetadata: func(m Metadata) Metadata {
- m.Type = model.MetricTypeGauge
- return m
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, true, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- newMetadata := tt.modifyMetadata(baseMetadata)
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), baseMetadata, 1, 2, 42.0, nil))
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 4, 62.0, nil))
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 5, 162.0, nil))
-
- // Verify expected operations.
- require.Len(t, app.records, 7)
- requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
- ref := app.records[0].outRef
- require.NotZero(t, ref)
- requireEqualOpAndRef(t, "Append", ref, app.records[1])
- requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2])
- requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[3])
- requireEqualOpAndRef(t, "Append", ref, app.records[4])
- requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[5])
- requireEqualOpAndRef(t, "Append", ref, app.records[6])
- })
- }
-}
-
-func requireEqualOp(t *testing.T, expectedOp string, actual appenderRecord) {
- t.Helper()
- require.Equal(t, expectedOp, actual.op)
-}
-
-func requireEqualOpAndRef(t *testing.T, expectedOp string, expectedRef storage.SeriesRef, actual appenderRecord) {
- t.Helper()
- require.Equal(t, expectedOp, actual.op)
- require.Equal(t, expectedRef, actual.ref)
-}
-
-type appenderRecord struct {
- op string
- ref storage.SeriesRef
- outRef storage.SeriesRef
- ls labels.Labels
-}
-
-type appenderRecorder struct {
- refcount uint64
- records []appenderRecord
-
- appendError error
- appendSTZeroSampleError error
- appendHistogramError error
- appendHistogramSTZeroSampleError error
- updateMetadataError error
- appendExemplarError error
-}
-
-var _ storage.Appender = &appenderRecorder{}
-
-func (a *appenderRecorder) setOutRef(ref storage.SeriesRef) {
- if len(a.records) == 0 {
- return
- }
- a.records[len(a.records)-1].outRef = ref
-}
-
-func (a *appenderRecorder) newRef() storage.SeriesRef {
- a.refcount++
- return storage.SeriesRef(a.refcount)
-}
-
-func (a *appenderRecorder) Append(ref storage.SeriesRef, ls labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) {
- a.records = append(a.records, appenderRecord{op: "Append", ref: ref, ls: ls})
- if a.appendError != nil {
- return 0, a.appendError
- }
- if ref == 0 {
- ref = a.newRef()
- }
- a.setOutRef(ref)
- return ref, nil
-}
-
-func (a *appenderRecorder) AppendSTZeroSample(ref storage.SeriesRef, ls labels.Labels, _, _ int64) (storage.SeriesRef, error) {
- a.records = append(a.records, appenderRecord{op: "AppendSTZeroSample", ref: ref, ls: ls})
- if a.appendSTZeroSampleError != nil {
- return 0, a.appendSTZeroSampleError
- }
- if ref == 0 {
- ref = a.newRef()
- }
- a.setOutRef(ref)
- return ref, nil
-}
-
-func (a *appenderRecorder) AppendHistogram(ref storage.SeriesRef, ls labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
- a.records = append(a.records, appenderRecord{op: "AppendHistogram", ref: ref, ls: ls})
- if a.appendHistogramError != nil {
- return 0, a.appendHistogramError
- }
- if ref == 0 {
- ref = a.newRef()
- }
- a.setOutRef(ref)
- return ref, nil
-}
-
-func (a *appenderRecorder) AppendHistogramSTZeroSample(ref storage.SeriesRef, ls labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
- a.records = append(a.records, appenderRecord{op: "AppendHistogramSTZeroSample", ref: ref, ls: ls})
- if a.appendHistogramSTZeroSampleError != nil {
- return 0, a.appendHistogramSTZeroSampleError
- }
- if ref == 0 {
- ref = a.newRef()
- }
- a.setOutRef(ref)
- return ref, nil
-}
-
-func (a *appenderRecorder) UpdateMetadata(ref storage.SeriesRef, ls labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
- a.records = append(a.records, appenderRecord{op: "UpdateMetadata", ref: ref, ls: ls})
- if a.updateMetadataError != nil {
- return 0, a.updateMetadataError
- }
- a.setOutRef(ref)
- return ref, nil
-}
-
-func (a *appenderRecorder) AppendExemplar(ref storage.SeriesRef, ls labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) {
- a.records = append(a.records, appenderRecord{op: "AppendExemplar", ref: ref, ls: ls})
- if a.appendExemplarError != nil {
- return 0, a.appendExemplarError
- }
- a.setOutRef(ref)
- return ref, nil
-}
-
-func (a *appenderRecorder) Commit() error {
- a.records = append(a.records, appenderRecord{op: "Commit"})
- return nil
-}
-
-func (a *appenderRecorder) Rollback() error {
- a.records = append(a.records, appenderRecord{op: "Rollback"})
- return nil
-}
-
-func (*appenderRecorder) SetOptions(_ *storage.AppendOptions) {
- panic("not implemented")
-}
-
-func TestMetadataChangedLogic(t *testing.T) {
- seriesLabels := labels.FromStrings(model.MetricNameLabel, "test_metric", "foo", "bar")
- baseMetadata := Metadata{
- Metadata: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "original"},
- MetricFamilyName: "test_metric",
- }
-
- tests := []struct {
- name string
- appendMetadata bool
- modifyMetadata func(Metadata) Metadata
- expectWALCall bool
- verifyCached func(*testing.T, metadata.Metadata)
- }{
- {
- name: "appendMetadata=false, no change",
- appendMetadata: false,
- modifyMetadata: func(m Metadata) Metadata { return m },
- expectWALCall: false,
- verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "original", m.Help) },
- },
- {
- name: "appendMetadata=false, help changes - cache updated, no WAL",
- appendMetadata: false,
- modifyMetadata: func(m Metadata) Metadata { m.Help = "changed"; return m },
- expectWALCall: false,
- verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "changed", m.Help) },
- },
- {
- name: "appendMetadata=true, help changes - cache and WAL updated",
- appendMetadata: true,
- modifyMetadata: func(m Metadata) Metadata { m.Help = "changed"; return m },
- expectWALCall: true,
- verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "changed", m.Help) },
- },
- {
- name: "appendMetadata=true, unit changes",
- appendMetadata: true,
- modifyMetadata: func(m Metadata) Metadata { m.Unit = "seconds"; return m },
- expectWALCall: true,
- verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "seconds", m.Unit) },
- },
- {
- name: "appendMetadata=true, type changes",
- appendMetadata: true,
- modifyMetadata: func(m Metadata) Metadata { m.Type = model.MetricTypeGauge; return m },
- expectWALCall: true,
- verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, model.MetricTypeGauge, m.Type) },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, tt.appendMetadata, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), baseMetadata, 1, 2, 42.0, nil))
-
- modifiedMetadata := tt.modifyMetadata(baseMetadata)
- app.records = nil
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), modifiedMetadata, 1, 3, 43.0, nil))
-
- hash := seriesLabels.Hash()
- cached, exists := capp.(*combinedAppender).refs[hash]
- require.True(t, exists)
- tt.verifyCached(t, cached.meta)
-
- updateMetadataCalled := false
- for _, record := range app.records {
- if record.op == "UpdateMetadata" {
- updateMetadataCalled = true
- break
- }
- }
- require.Equal(t, tt.expectWALCall, updateMetadataCalled)
- })
- }
-}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go
index 669e10e0a7..730486772e 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go
@@ -40,6 +40,7 @@ import (
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
+ "github.com/prometheus/prometheus/storage"
)
const (
@@ -73,8 +74,13 @@ var reservedLabelNames = []string{
// if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized.
//
// This function requires for cached resource and scope labels to be set up first.
-func (c *PrometheusConverter) createAttributes(attributes pcommon.Map, settings Settings,
- ignoreAttrs []string, logOnOverwrite bool, meta Metadata, extras ...string,
+func (c *PrometheusConverter) createAttributes(
+ attributes pcommon.Map,
+ settings Settings,
+ ignoreAttrs []string,
+ logOnOverwrite bool,
+ meta metadata.Metadata,
+ extras ...string,
) (labels.Labels, error) {
if c.resourceLabels == nil {
return labels.EmptyLabels(), errors.New("createAttributes called without initializing resource context")
@@ -210,8 +216,11 @@ func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporali
// with the user defined bucket boundaries of non-exponential OTel histograms.
// However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets:
// https://github.com/prometheus/prometheus/issues/13485.
-func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice,
- settings Settings, meta Metadata,
+func (c *PrometheusConverter) addHistogramDataPoints(
+ ctx context.Context,
+ dataPoints pmetric.HistogramDataPointSlice,
+ settings Settings,
+ appOpts storage.AOptions,
) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
@@ -221,36 +230,32 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo
pt := dataPoints.At(x)
timestamp := convertTimeStamp(pt.Timestamp())
startTimestamp := convertTimeStamp(pt.StartTimestamp())
- baseLabels, err := c.createAttributes(pt.Attributes(), settings, reservedLabelNames, false, meta)
+ baseLabels, err := c.createAttributes(pt.Attributes(), settings, reservedLabelNames, false, appOpts.Metadata)
if err != nil {
return err
}
- baseName := meta.MetricFamilyName
-
// If the sum is unset, it indicates the _sum metric point should be
// omitted
if pt.HasSum() {
- // treat sum as a sample in an individual TimeSeries
+ // Treat sum as a sample in an individual TimeSeries.
val := pt.Sum()
if pt.Flags().NoRecordedValue() {
val = math.Float64frombits(value.StaleNaN)
}
-
- sumlabels := c.addLabels(baseName+sumStr, baseLabels)
- if err := c.appender.AppendSample(sumlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ sumLabels := c.addLabels(appOpts.MetricFamilyName+sumStr, baseLabels)
+ if _, err := c.appender.Append(0, sumLabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
}
- // treat count as a sample in an individual TimeSeries
+ // Treat count as a sample in an individual TimeSeries.
val := float64(pt.Count())
if pt.Flags().NoRecordedValue() {
val = math.Float64frombits(value.StaleNaN)
}
-
- countlabels := c.addLabels(baseName+countStr, baseLabels)
- if err := c.appender.AppendSample(countlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ countLabels := c.addLabels(appOpts.MetricFamilyName+countStr, baseLabels)
+ if _, err := c.appender.Append(0, countLabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
@@ -259,10 +264,10 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo
}
nextExemplarIdx := 0
- // cumulative count for conversion to cumulative histogram
+ // Cumulative count for conversion to cumulative histogram.
var cumulativeCount uint64
- // process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1
+ // Process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1.
for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ {
if err := c.everyN.checkContext(ctx); err != nil {
return err
@@ -273,32 +278,34 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo
// Find exemplars that belong to this bucket. Both exemplars and
// buckets are sorted in ascending order.
- var currentBucketExemplars []exemplar.Exemplar
+ appOpts.Exemplars = appOpts.Exemplars[:0]
for ; nextExemplarIdx < len(exemplars); nextExemplarIdx++ {
ex := exemplars[nextExemplarIdx]
if ex.Value > bound {
// This exemplar belongs in a higher bucket.
break
}
- currentBucketExemplars = append(currentBucketExemplars, ex)
+ appOpts.Exemplars = append(appOpts.Exemplars, ex)
}
val := float64(cumulativeCount)
if pt.Flags().NoRecordedValue() {
val = math.Float64frombits(value.StaleNaN)
}
boundStr := strconv.FormatFloat(bound, 'f', -1, 64)
- labels := c.addLabels(baseName+bucketStr, baseLabels, leStr, boundStr)
- if err := c.appender.AppendSample(labels, meta, startTimestamp, timestamp, val, currentBucketExemplars); err != nil {
+ bucketLabels := c.addLabels(appOpts.MetricFamilyName+bucketStr, baseLabels, leStr, boundStr)
+ if _, err := c.appender.Append(0, bucketLabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
}
- // add le=+Inf bucket
+
+ appOpts.Exemplars = exemplars[nextExemplarIdx:]
+ // Add le=+Inf bucket.
val = float64(pt.Count())
if pt.Flags().NoRecordedValue() {
val = math.Float64frombits(value.StaleNaN)
}
- infLabels := c.addLabels(baseName+bucketStr, baseLabels, leStr, pInfStr)
- if err := c.appender.AppendSample(infLabels, meta, startTimestamp, timestamp, val, exemplars[nextExemplarIdx:]); err != nil {
+ infLabels := c.addLabels(appOpts.MetricFamilyName+bucketStr, baseLabels, leStr, pInfStr)
+ if _, err := c.appender.Append(0, infLabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
}
@@ -412,8 +419,11 @@ func findMinAndMaxTimestamps(metric pmetric.Metric, minTimestamp, maxTimestamp p
return minTimestamp, maxTimestamp
}
-func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice,
- settings Settings, meta Metadata,
+func (c *PrometheusConverter) addSummaryDataPoints(
+ ctx context.Context,
+ dataPoints pmetric.SummaryDataPointSlice,
+ settings Settings,
+ appOpts storage.AOptions,
) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
@@ -423,21 +433,18 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin
pt := dataPoints.At(x)
timestamp := convertTimeStamp(pt.Timestamp())
startTimestamp := convertTimeStamp(pt.StartTimestamp())
- baseLabels, err := c.createAttributes(pt.Attributes(), settings, reservedLabelNames, false, meta)
+ baseLabels, err := c.createAttributes(pt.Attributes(), settings, reservedLabelNames, false, appOpts.Metadata)
if err != nil {
return err
}
- baseName := meta.MetricFamilyName
-
// treat sum as a sample in an individual TimeSeries
val := pt.Sum()
if pt.Flags().NoRecordedValue() {
val = math.Float64frombits(value.StaleNaN)
}
- // sum and count of the summary should append suffix to baseName
- sumlabels := c.addLabels(baseName+sumStr, baseLabels)
- if err := c.appender.AppendSample(sumlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ sumLabels := c.addLabels(appOpts.MetricFamilyName+sumStr, baseLabels)
+ if _, err := c.appender.Append(0, sumLabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
@@ -446,8 +453,8 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin
if pt.Flags().NoRecordedValue() {
val = math.Float64frombits(value.StaleNaN)
}
- countlabels := c.addLabels(baseName+countStr, baseLabels)
- if err := c.appender.AppendSample(countlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ countLabels := c.addLabels(appOpts.MetricFamilyName+countStr, baseLabels)
+ if _, err := c.appender.Append(0, countLabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
@@ -459,8 +466,8 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin
val = math.Float64frombits(value.StaleNaN)
}
percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64)
- qtlabels := c.addLabels(baseName, baseLabels, quantileStr, percentileStr)
- if err := c.appender.AppendSample(qtlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ qtlabels := c.addLabels(appOpts.MetricFamilyName, baseLabels, quantileStr, percentileStr)
+ if _, err := c.appender.Append(0, qtlabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
}
@@ -518,7 +525,7 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s
// Do not pass identifying attributes as ignoreAttrs below.
identifyingAttrs = nil
}
- meta := Metadata{
+ appOpts := storage.AOptions{
Metadata: metadata.Metadata{
Type: model.MetricTypeGauge,
Help: "Target metadata",
@@ -530,7 +537,7 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s
// Temporarily clear scope labels for this call.
savedScopeLabels := c.scopeLabels
c.scopeLabels = nil
- lbls, err := c.createAttributes(attributes, settings, identifyingAttrs, false, Metadata{}, model.MetricNameLabel, name)
+ lbls, err := c.createAttributes(attributes, settings, identifyingAttrs, false, metadata.Metadata{}, model.MetricNameLabel, name)
c.scopeLabels = savedScopeLabels
if err != nil {
return err
@@ -573,7 +580,8 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s
}
c.seenTargetInfo[key] = struct{}{}
- if err := c.appender.AppendSample(lbls, meta, 0, timestampMs, float64(1), nil); err != nil {
+ _, err = c.appender.Append(0, lbls, 0, timestampMs, 1.0, nil, nil, appOpts)
+ if err != nil {
return err
}
}
@@ -589,7 +597,8 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s
}
c.seenTargetInfo[key] = struct{}{}
- return c.appender.AppendSample(lbls, meta, 0, finalTimestampMs, float64(1), nil)
+ _, err = c.appender.Append(0, lbls, 0, finalTimestampMs, 1.0, nil, nil, appOpts)
+ return err
}
// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms.
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
index b86b8cb3ea..3b5a1c4b34 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
@@ -33,6 +33,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/prompb"
+ "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/testutil"
)
@@ -430,7 +431,7 @@ func TestPrometheusConverter_createAttributes(t *testing.T) {
require.NoError(t, c.setResourceContext(testResource, settings))
require.NoError(t, c.setScopeContext(tc.scope, settings))
- lbls, err := c.createAttributes(testAttrs, settings, tc.ignoreAttrs, false, Metadata{}, model.MetricNameLabel, "test_metric")
+ lbls, err := c.createAttributes(testAttrs, settings, tc.ignoreAttrs, false, metadata.Metadata{}, model.MetricNameLabel, "test_metric")
require.NoError(t, err)
testutil.RequireEqual(t, tc.expectedLabels, lbls)
@@ -462,7 +463,7 @@ func TestPrometheusConverter_createAttributes(t *testing.T) {
settings,
reservedLabelNames,
true,
- Metadata{},
+ metadata.Metadata{},
model.MetricNameLabel, "correct_metric_name",
)
require.NoError(t, err)
@@ -508,7 +509,7 @@ func TestPrometheusConverter_createAttributes(t *testing.T) {
settings,
reservedLabelNames,
true,
- Metadata{Metadata: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "seconds"}},
+ metadata.Metadata{Type: model.MetricTypeGauge, Unit: "seconds"},
model.MetricNameLabel, "test_metric",
)
require.NoError(t, err)
@@ -775,7 +776,7 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
context.Background(),
metric.Summary().DataPoints(),
settings,
- Metadata{
+ storage.AOptions{
MetricFamilyName: metric.Name(),
},
)
@@ -942,7 +943,7 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
context.Background(),
metric.Histogram().DataPoints(),
settings,
- Metadata{
+ storage.AOptions{
MetricFamilyName: metric.Name(),
},
)
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go
index e2537b5cec..31c16b1c10 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go
@@ -26,6 +26,7 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/value"
+ "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/annotations"
)
@@ -33,8 +34,12 @@ const defaultZeroThreshold = 1e-128
// addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series
// as native histogram samples.
-func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Context, dataPoints pmetric.ExponentialHistogramDataPointSlice,
- settings Settings, temporality pmetric.AggregationTemporality, meta Metadata,
+func (c *PrometheusConverter) addExponentialHistogramDataPoints(
+ ctx context.Context,
+ dataPoints pmetric.ExponentialHistogramDataPointSlice,
+ settings Settings,
+ temporality pmetric.AggregationTemporality,
+ appOpts storage.AOptions,
) (annotations.Annotations, error) {
var annots annotations.Annotations
for x := 0; x < dataPoints.Len(); x++ {
@@ -55,9 +60,9 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont
settings,
reservedLabelNames,
true,
- meta,
+ appOpts.Metadata,
model.MetricNameLabel,
- meta.MetricFamilyName,
+ appOpts.MetricFamilyName,
)
if err != nil {
return annots, err
@@ -68,8 +73,10 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont
if err != nil {
return annots, err
}
- // OTel exponential histograms are always Int Histograms.
- if err = c.appender.AppendHistogram(lbls, meta, st, ts, hp, exemplars); err != nil {
+
+ appOpts.Exemplars = exemplars
+ // OTel exponential histograms are always integer histograms.
+ if _, err = c.appender.Append(0, lbls, st, ts, 0, hp, nil, appOpts); err != nil {
return annots, err
}
}
@@ -248,8 +255,12 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust
return spans, deltas
}
-func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice,
- settings Settings, temporality pmetric.AggregationTemporality, meta Metadata,
+func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(
+ ctx context.Context,
+ dataPoints pmetric.HistogramDataPointSlice,
+ settings Settings,
+ temporality pmetric.AggregationTemporality,
+ appOpts storage.AOptions,
) (annotations.Annotations, error) {
var annots annotations.Annotations
@@ -271,9 +282,9 @@ func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Co
settings,
reservedLabelNames,
true,
- meta,
+ appOpts.Metadata,
model.MetricNameLabel,
- meta.MetricFamilyName,
+ appOpts.MetricFamilyName,
)
if err != nil {
return annots, err
@@ -284,7 +295,9 @@ func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Co
if err != nil {
return annots, err
}
- if err = c.appender.AppendHistogram(lbls, meta, st, ts, hp, exemplars); err != nil {
+
+ appOpts.Exemplars = exemplars
+ if _, err = c.appender.Append(0, lbls, st, ts, 0, hp, nil, appOpts); err != nil {
return annots, err
}
}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
index f55aef2f36..58d7c4e835 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
@@ -32,6 +32,7 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/storage"
)
type expectedBucketLayout struct {
@@ -875,7 +876,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
metric.ExponentialHistogram().DataPoints(),
settings,
pmetric.AggregationTemporalityCumulative,
- Metadata{
+ storage.AOptions{
MetricFamilyName: name,
},
)
@@ -1354,7 +1355,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
metric.Histogram().DataPoints(),
settings,
pmetric.AggregationTemporalityCumulative,
- Metadata{
+ storage.AOptions{
MetricFamilyName: name,
},
)
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
index 81e99a2f50..600282af6f 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
@@ -31,6 +31,7 @@ import (
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/annotations"
)
@@ -85,7 +86,7 @@ type PrometheusConverter struct {
everyN everyNTimes
scratchBuilder labels.ScratchBuilder
builder *labels.Builder
- appender CombinedAppender
+ appender storage.AppenderV2
// seenTargetInfo tracks target_info samples within a batch to prevent duplicates.
seenTargetInfo map[targetInfoKey]struct{}
@@ -105,7 +106,7 @@ type targetInfoKey struct {
timestamp int64
}
-func NewPrometheusConverter(appender CombinedAppender) *PrometheusConverter {
+func NewPrometheusConverter(appender storage.AppenderV2) *PrometheusConverter {
return &PrometheusConverter{
scratchBuilder: labels.NewScratchBuilder(0),
builder: labels.NewBuilder(labels.EmptyLabels()),
@@ -170,7 +171,7 @@ func newScopeFromScopeMetrics(scopeMetrics pmetric.ScopeMetrics) scope {
}
}
-// FromMetrics converts pmetric.Metrics to Prometheus remote write format.
+// FromMetrics appends pmetric.Metrics to storage.AppenderV2.
func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings) (annots annotations.Annotations, errs error) {
namer := otlptranslator.MetricNamer{
Namespace: settings.Namespace,
@@ -236,7 +237,8 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
errs = errors.Join(errs, err)
continue
}
- meta := Metadata{
+
+ appOpts := storage.AOptions{
Metadata: metadata.Metadata{
Type: otelMetricTypeToPromMetricType(metric),
Unit: unitNamer.Build(metric.Unit()),
@@ -254,7 +256,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
errs = errors.Join(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
}
- if err := c.addGaugeNumberDataPoints(ctx, dataPoints, settings, meta); err != nil {
+ if err := c.addGaugeNumberDataPoints(ctx, dataPoints, settings, appOpts); err != nil {
errs = errors.Join(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return annots, errs
@@ -266,7 +268,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
errs = errors.Join(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
}
- if err := c.addSumNumberDataPoints(ctx, dataPoints, settings, meta); err != nil {
+ if err := c.addSumNumberDataPoints(ctx, dataPoints, settings, appOpts); err != nil {
errs = errors.Join(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return annots, errs
@@ -280,7 +282,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
}
if settings.ConvertHistogramsToNHCB {
ws, err := c.addCustomBucketsHistogramDataPoints(
- ctx, dataPoints, settings, temporality, meta,
+ ctx, dataPoints, settings, temporality, appOpts,
)
annots.Merge(ws)
if err != nil {
@@ -290,7 +292,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
}
}
} else {
- if err := c.addHistogramDataPoints(ctx, dataPoints, settings, meta); err != nil {
+ if err := c.addHistogramDataPoints(ctx, dataPoints, settings, appOpts); err != nil {
errs = errors.Join(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return annots, errs
@@ -308,7 +310,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
dataPoints,
settings,
temporality,
- meta,
+ appOpts,
)
annots.Merge(ws)
if err != nil {
@@ -323,7 +325,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
errs = errors.Join(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
}
- if err := c.addSummaryDataPoints(ctx, dataPoints, settings, meta); err != nil {
+ if err := c.addSummaryDataPoints(ctx, dataPoints, settings, appOpts); err != nil {
errs = errors.Join(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return annots, errs
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
index f90051e84d..8ac860a291 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
@@ -22,9 +22,7 @@ import (
"testing"
"time"
- "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
- "github.com/prometheus/common/promslog"
"github.com/prometheus/otlptranslator"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pcommon"
@@ -32,7 +30,6 @@ import (
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"github.com/prometheus/prometheus/config"
- "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
@@ -1239,54 +1236,57 @@ func createOTelEmptyMetricForTranslator(name string) pmetric.Metric {
return m
}
+// Recommended CLI invocation(s):
+/*
+ export bench=fromMetrics && go test ./storage/remote/otlptranslator/prometheusremotewrite/... \
+ -run '^$' -bench '^BenchmarkPrometheusConverter_FromMetrics' \
+ -benchtime 1s -count 6 -cpu 2 -timeout 999m -benchmem \
+ | tee ${bench}.txt
+*/
func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) {
for _, resourceAttributeCount := range []int{0, 5, 50} {
b.Run(fmt.Sprintf("resource attribute count: %v", resourceAttributeCount), func(b *testing.B) {
- for _, histogramCount := range []int{0, 1000} {
- b.Run(fmt.Sprintf("histogram count: %v", histogramCount), func(b *testing.B) {
- nonHistogramCounts := []int{0, 1000}
+ for _, metricCount := range []struct {
+ histogramCount int
+ nonHistogramCount int
+ }{
+ {histogramCount: 0, nonHistogramCount: 1000},
+ {histogramCount: 1000, nonHistogramCount: 0},
+ {histogramCount: 1000, nonHistogramCount: 1000},
+ } {
+ b.Run(fmt.Sprintf("histogram count: %v/non-histogram count: %v", metricCount.histogramCount, metricCount.nonHistogramCount), func(b *testing.B) {
+ for _, labelsPerMetric := range []int{2, 20} {
+ b.Run(fmt.Sprintf("labels per metric: %v", labelsPerMetric), func(b *testing.B) {
+ for _, exemplarsPerSeries := range []int{0, 5, 10} {
+ b.Run(fmt.Sprintf("exemplars per series: %v", exemplarsPerSeries), func(b *testing.B) {
+ settings := Settings{}
+ payload, _ := createExportRequest(
+ resourceAttributeCount,
+ metricCount.histogramCount,
+ metricCount.nonHistogramCount,
+ labelsPerMetric,
+ exemplarsPerSeries,
+ settings,
+ pmetric.AggregationTemporalityCumulative,
+ )
- if resourceAttributeCount == 0 && histogramCount == 0 {
- // Don't bother running a scenario where we'll generate no series.
- nonHistogramCounts = []int{1000}
- }
+ b.ResetTimer()
+ for b.Loop() {
+ app := &noOpAppender{}
+ converter := NewPrometheusConverter(app)
+ annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
+ require.NoError(b, err)
+ require.Empty(b, annots)
- for _, nonHistogramCount := range nonHistogramCounts {
- b.Run(fmt.Sprintf("non-histogram count: %v", nonHistogramCount), func(b *testing.B) {
- for _, labelsPerMetric := range []int{2, 20} {
- b.Run(fmt.Sprintf("labels per metric: %v", labelsPerMetric), func(b *testing.B) {
- for _, exemplarsPerSeries := range []int{0, 5, 10} {
- b.Run(fmt.Sprintf("exemplars per series: %v", exemplarsPerSeries), func(b *testing.B) {
- settings := Settings{}
- payload, _ := createExportRequest(
- resourceAttributeCount,
- histogramCount,
- nonHistogramCount,
- labelsPerMetric,
- exemplarsPerSeries,
- settings,
- pmetric.AggregationTemporalityCumulative,
- )
- appMetrics := NewCombinedAppenderMetrics(prometheus.NewRegistry())
- noOpLogger := promslog.NewNopLogger()
- b.ResetTimer()
-
- for b.Loop() {
- app := &noOpAppender{}
- mockAppender := NewCombinedAppender(app, noOpLogger, false, true, appMetrics)
- converter := NewPrometheusConverter(mockAppender)
- annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
- require.NoError(b, err)
- require.Empty(b, annots)
- if histogramCount+nonHistogramCount > 0 {
- require.Positive(b, app.samples+app.histograms)
- require.Positive(b, app.metadata)
- } else {
- require.Zero(b, app.samples+app.histograms)
- require.Zero(b, app.metadata)
- }
- }
- })
+ // TODO(bwplotka): This should be tested somewhere else, otherwise we benchmark
+ // mock too.
+ if metricCount.histogramCount+metricCount.nonHistogramCount > 0 {
+ require.Positive(b, app.samples+app.histograms)
+ require.Positive(b, app.metadata)
+ } else {
+ require.Zero(b, app.samples+app.histograms)
+ require.Zero(b, app.metadata)
+ }
}
})
}
@@ -1304,35 +1304,20 @@ type noOpAppender struct {
metadata int
}
-var _ storage.Appender = &noOpAppender{}
+var _ storage.AppenderV2 = &noOpAppender{}
-func (a *noOpAppender) Append(_ storage.SeriesRef, _ labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) {
+func (a *noOpAppender) Append(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ float64, h *histogram.Histogram, _ *histogram.FloatHistogram, opts storage.AOptions) (_ storage.SeriesRef, err error) {
+ if !opts.Metadata.IsEmpty() {
+ a.metadata++
+ }
+ if h != nil {
+ a.histograms++
+ return 1, nil
+ }
a.samples++
return 1, nil
}
-func (*noOpAppender) AppendSTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) {
- return 1, nil
-}
-
-func (a *noOpAppender) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
- a.histograms++
- return 1, nil
-}
-
-func (*noOpAppender) AppendHistogramSTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
- return 1, nil
-}
-
-func (a *noOpAppender) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
- a.metadata++
- return 1, nil
-}
-
-func (*noOpAppender) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) {
- return 1, nil
-}
-
func (*noOpAppender) Commit() error {
return nil
}
@@ -1341,10 +1326,6 @@ func (*noOpAppender) Rollback() error {
return nil
}
-func (*noOpAppender) SetOptions(_ *storage.AppendOptions) {
- panic("not implemented")
-}
-
type wantPrometheusMetric struct {
name string
familyName string
@@ -1677,15 +1658,12 @@ func BenchmarkFromMetrics_LabelCaching_MultipleDatapointsPerResource(b *testing.
labelsPerMetric,
scopeAttributeCount,
)
- appMetrics := NewCombinedAppenderMetrics(prometheus.NewRegistry())
- noOpLogger := promslog.NewNopLogger()
b.ReportAllocs()
b.ResetTimer()
for b.Loop() {
app := &noOpAppender{}
- mockAppender := NewCombinedAppender(app, noOpLogger, false, false, appMetrics)
- converter := NewPrometheusConverter(mockAppender)
+ converter := NewPrometheusConverter(app)
_, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
require.NoError(b, err)
}
@@ -1709,15 +1687,12 @@ func BenchmarkFromMetrics_LabelCaching_RepeatedLabelNames(b *testing.B) {
datapoints,
labelsPerDatapoint,
)
- appMetrics := NewCombinedAppenderMetrics(prometheus.NewRegistry())
- noOpLogger := promslog.NewNopLogger()
b.ReportAllocs()
b.ResetTimer()
for b.Loop() {
app := &noOpAppender{}
- mockAppender := NewCombinedAppender(app, noOpLogger, false, false, appMetrics)
- converter := NewPrometheusConverter(mockAppender)
+ converter := NewPrometheusConverter(app)
_, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
require.NoError(b, err)
}
@@ -1747,15 +1722,12 @@ func BenchmarkFromMetrics_LabelCaching_ScopeMetadata(b *testing.B) {
labelsPerMetric,
scopeAttrs,
)
- appMetrics := NewCombinedAppenderMetrics(prometheus.NewRegistry())
- noOpLogger := promslog.NewNopLogger()
b.ReportAllocs()
b.ResetTimer()
for b.Loop() {
app := &noOpAppender{}
- mockAppender := NewCombinedAppender(app, noOpLogger, false, false, appMetrics)
- converter := NewPrometheusConverter(mockAppender)
+ converter := NewPrometheusConverter(app)
_, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
require.NoError(b, err)
}
@@ -1786,15 +1758,12 @@ func BenchmarkFromMetrics_LabelCaching_MultipleResources(b *testing.B) {
metricsPerResource,
labelsPerMetric,
)
- appMetrics := NewCombinedAppenderMetrics(prometheus.NewRegistry())
- noOpLogger := promslog.NewNopLogger()
b.ReportAllocs()
b.ResetTimer()
for b.Loop() {
app := &noOpAppender{}
- mockAppender := NewCombinedAppender(app, noOpLogger, false, false, appMetrics)
- converter := NewPrometheusConverter(mockAppender)
+ converter := NewPrometheusConverter(app)
_, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
require.NoError(b, err)
}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
index e681bb352b..3c74ec9382 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
@@ -24,10 +24,14 @@ import (
"go.opentelemetry.io/collector/pdata/pmetric"
"github.com/prometheus/prometheus/model/value"
+ "github.com/prometheus/prometheus/storage"
)
-func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice,
- settings Settings, meta Metadata,
+func (c *PrometheusConverter) addGaugeNumberDataPoints(
+ ctx context.Context,
+ dataPoints pmetric.NumberDataPointSlice,
+ settings Settings,
+ appOpts storage.AOptions,
) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
@@ -40,9 +44,9 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data
settings,
reservedLabelNames,
true,
- meta,
+ appOpts.Metadata,
model.MetricNameLabel,
- meta.MetricFamilyName,
+ appOpts.MetricFamilyName,
)
if err != nil {
return err
@@ -59,7 +63,7 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data
}
ts := convertTimeStamp(pt.Timestamp())
st := convertTimeStamp(pt.StartTimestamp())
- if err := c.appender.AppendSample(labels, meta, st, ts, val, nil); err != nil {
+ if _, err = c.appender.Append(0, labels, st, ts, val, nil, nil, appOpts); err != nil {
return err
}
}
@@ -67,8 +71,11 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data
return nil
}
-func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice,
- settings Settings, meta Metadata,
+func (c *PrometheusConverter) addSumNumberDataPoints(
+ ctx context.Context,
+ dataPoints pmetric.NumberDataPointSlice,
+ settings Settings,
+ appOpts storage.AOptions,
) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
@@ -81,9 +88,9 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo
settings,
reservedLabelNames,
true,
- meta,
+ appOpts.Metadata,
model.MetricNameLabel,
- meta.MetricFamilyName,
+ appOpts.MetricFamilyName,
)
if err != nil {
return err
@@ -104,7 +111,9 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo
if err != nil {
return err
}
- if err := c.appender.AppendSample(lbls, meta, st, ts, val, exemplars); err != nil {
+
+ appOpts.Exemplars = exemplars
+ if _, err = c.appender.Append(0, lbls, st, ts, val, nil, nil, appOpts); err != nil {
return err
}
}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
index 58a27c12e1..67961a2943 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
@@ -29,6 +29,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/storage"
)
func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
@@ -127,7 +128,7 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
context.Background(),
metric.Gauge().DataPoints(),
settings,
- Metadata{
+ storage.AOptions{
MetricFamilyName: metric.Name(),
},
)
@@ -361,7 +362,7 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
context.Background(),
metric.Sum().DataPoints(),
settings,
- Metadata{
+ storage.AOptions{
MetricFamilyName: metric.Name(),
},
)
diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go
index a72712a535..3dac96f6a0 100644
--- a/storage/remote/write_handler.go
+++ b/storage/remote/write_handler.go
@@ -537,3 +537,27 @@ func (app *remoteWriteAppender) AppendExemplar(ref storage.SeriesRef, l labels.L
}
return ref, nil
}
+
+type remoteWriteAppenderV2 struct {
+ storage.AppenderV2
+
+ maxTime int64
+}
+
+func (app *remoteWriteAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
+ if t > app.maxTime {
+ return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
+ }
+
+ if h != nil && histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > histogram.ExponentialSchemaMax {
+ if err := h.ReduceResolution(histogram.ExponentialSchemaMax); err != nil {
+ return 0, err
+ }
+ }
+ if fh != nil && histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > histogram.ExponentialSchemaMax {
+ if err := fh.ReduceResolution(histogram.ExponentialSchemaMax); err != nil {
+ return 0, err
+ }
+ }
+ return app.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
+}
diff --git a/storage/remote/write_otlp_handler.go b/storage/remote/write_otlp_handler.go
index 489a7b574a..b8888baeb8 100644
--- a/storage/remote/write_otlp_handler.go
+++ b/storage/remote/write_otlp_handler.go
@@ -23,6 +23,7 @@ import (
deltatocumulative "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pmetric"
@@ -30,6 +31,8 @@ import (
"go.opentelemetry.io/otel/metric/noop"
"github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/storage"
otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
@@ -47,16 +50,11 @@ type OTLPOptions struct {
LookbackDelta time.Duration
// Add type and unit labels to the metrics.
EnableTypeAndUnitLabels bool
- // IngestSTZeroSample enables writing zero samples based on the start time
- // of metrics.
- IngestSTZeroSample bool
- // AppendMetadata enables writing metadata to WAL when metadata-wal-records feature is enabled.
- AppendMetadata bool
}
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
// writes them to the provided appendable.
-func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler {
+func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.AppendableV2, configFunc func() config.Config, opts OTLPOptions) http.Handler {
if opts.NativeDelta && opts.ConvertDelta {
// This should be validated when iterating through feature flags, so not expected to fail here.
panic("cannot enable native delta ingestion and delta2cumulative conversion at the same time")
@@ -64,15 +62,11 @@ func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appenda
ex := &rwExporter{
logger: logger,
- appendable: appendable,
+ appendable: newOTLPInstrumentedAppendable(reg, appendable),
config: configFunc,
allowDeltaTemporality: opts.NativeDelta,
lookbackDelta: opts.LookbackDelta,
- ingestSTZeroSample: opts.IngestSTZeroSample,
enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels,
- appendMetadata: opts.AppendMetadata,
- // Register metrics.
- metrics: otlptranslator.NewCombinedAppenderMetrics(reg),
}
wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex}
@@ -107,26 +101,20 @@ func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appenda
type rwExporter struct {
logger *slog.Logger
- appendable storage.Appendable
+ appendable storage.AppendableV2
config func() config.Config
allowDeltaTemporality bool
lookbackDelta time.Duration
- ingestSTZeroSample bool
enableTypeAndUnitLabels bool
- appendMetadata bool
-
- // Metrics.
- metrics otlptranslator.CombinedAppenderMetrics
}
func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
otlpCfg := rw.config().OTLPConfig
- app := &remoteWriteAppender{
- Appender: rw.appendable.Appender(ctx),
- maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
+ app := &remoteWriteAppenderV2{
+ AppenderV2: rw.appendable.AppenderV2(ctx),
+ maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
}
- combinedAppender := otlptranslator.NewCombinedAppender(app, rw.logger, rw.ingestSTZeroSample, rw.appendMetadata, rw.metrics)
- converter := otlptranslator.NewPrometheusConverter(combinedAppender)
+ converter := otlptranslator.NewPrometheusConverter(app)
annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{
AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(),
AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(),
@@ -225,3 +213,64 @@ func hasDelta(md pmetric.Metrics) bool {
}
return false
}
+
+type otlpInstrumentedAppendable struct {
+ storage.AppendableV2
+
+ samplesAppendedWithoutMetadata prometheus.Counter
+ outOfOrderExemplars prometheus.Counter
+}
+
+// newOTLPInstrumentedAppendable instruments some OTLP metrics per append and
+// handles partial errors, so the caller does not need to.
+func newOTLPInstrumentedAppendable(reg prometheus.Registerer, app storage.AppendableV2) *otlpInstrumentedAppendable {
+ return &otlpInstrumentedAppendable{
+ AppendableV2: app,
+ samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Namespace: "prometheus",
+ Subsystem: "api",
+ Name: "otlp_appended_samples_without_metadata_total",
+ Help: "The total number of samples ingested from OTLP without corresponding metadata.",
+ }),
+ outOfOrderExemplars: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Namespace: "prometheus",
+ Subsystem: "api",
+ Name: "otlp_out_of_order_exemplars_total",
+ Help: "The total number of received OTLP exemplars which were rejected because they were out of order.",
+ }),
+ }
+}
+
+func (a *otlpInstrumentedAppendable) AppenderV2(ctx context.Context) storage.AppenderV2 {
+ return &otlpInstrumentedAppender{
+ AppenderV2: a.AppendableV2.AppenderV2(ctx),
+
+ samplesAppendedWithoutMetadata: a.samplesAppendedWithoutMetadata,
+ outOfOrderExemplars: a.outOfOrderExemplars,
+ }
+}
+
+type otlpInstrumentedAppender struct {
+ storage.AppenderV2
+
+ samplesAppendedWithoutMetadata prometheus.Counter
+ outOfOrderExemplars prometheus.Counter
+}
+
+func (app *otlpInstrumentedAppender) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
+ ref, err := app.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
+ if err != nil {
+ var partialErr *storage.AppendPartialError
+ partialErr, hErr := partialErr.Handle(err)
+ if hErr != nil {
+ // Not a partial error, return err.
+ return 0, err
+ }
+ app.outOfOrderExemplars.Add(float64(len(partialErr.ExemplarErrors)))
+ // Hide the partial error as otlp converter does not handle it.
+ }
+ if opts.Metadata.IsEmpty() {
+ app.samplesAppendedWithoutMetadata.Inc()
+ }
+ return ref, nil
+}
diff --git a/storage/remote/write_otlp_handler_test.go b/storage/remote/write_otlp_handler_test.go
index 57c0b2ab22..be3482f440 100644
--- a/storage/remote/write_otlp_handler_test.go
+++ b/storage/remote/write_otlp_handler_test.go
@@ -15,6 +15,7 @@ package remote
import (
"bytes"
+ "errors"
"fmt"
"log/slog"
"math/rand/v2"
@@ -28,6 +29,8 @@ import (
"time"
"github.com/google/go-cmp/cmp"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/model"
"github.com/prometheus/otlptranslator"
"github.com/stretchr/testify/require"
@@ -41,6 +44,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/timestamp"
+ "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/teststorage"
)
@@ -48,6 +52,7 @@ type sample = teststorage.Sample
func TestOTLPWriteHandler(t *testing.T) {
ts := time.Now()
+ st := ts.Add(-1 * time.Millisecond)
// Expected samples passed via OTLP request without details (labels for now) that
// depend on translation or type and unit labels options.
@@ -55,7 +60,7 @@ func TestOTLPWriteHandler(t *testing.T) {
return []sample{
{
M: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- V: 10.0, T: timestamp.FromTime(ts), ES: []exemplar.Exemplar{
+ V: 10.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts), ES: []exemplar.Exemplar{
{
Labels: labels.FromStrings("span_id", "0001020304050607", "trace_id", "000102030405060708090a0b0c0d0e0f"),
Value: 10, Ts: timestamp.FromTime(ts), HasTs: true,
@@ -64,43 +69,43 @@ func TestOTLPWriteHandler(t *testing.T) {
},
{
M: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- V: 10.0, T: timestamp.FromTime(ts),
+ V: 10.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
},
{
M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- V: 30.0, T: timestamp.FromTime(ts),
+ V: 30.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
},
{
M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- V: 12.0, T: timestamp.FromTime(ts),
+ V: 12.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
},
{
M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- V: 2.0, T: timestamp.FromTime(ts),
+ V: 2.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
},
{
M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- V: 4.0, T: timestamp.FromTime(ts),
+ V: 4.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
},
{
M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- V: 6.0, T: timestamp.FromTime(ts),
+ V: 6.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
},
{
M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- V: 8.0, T: timestamp.FromTime(ts),
+ V: 8.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
},
{
M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- V: 10.0, T: timestamp.FromTime(ts),
+ V: 10.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
},
{
M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- V: 12.0, T: timestamp.FromTime(ts),
+ V: 12.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
},
{
M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- V: 12.0, T: timestamp.FromTime(ts),
+ V: 12.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
},
{
M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
@@ -112,7 +117,7 @@ func TestOTLPWriteHandler(t *testing.T) {
ZeroCount: 2,
PositiveSpans: []histogram.Span{{Offset: 1, Length: 5}},
PositiveBuckets: []int64{2, 0, 0, 0, 0},
- }, T: timestamp.FromTime(ts),
+ }, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
},
{
M: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, V: 1, T: timestamp.FromTime(ts),
@@ -120,34 +125,32 @@ func TestOTLPWriteHandler(t *testing.T) {
}
}
- exportRequest := generateOTLPWriteRequest(ts, time.Time{})
+ exportRequest := generateOTLPWriteRequest(ts, st)
for _, testCase := range []struct {
- name string
- otlpCfg config.OTLPConfig
- typeAndUnitLabels bool
- // NOTE: This is a slice of samples, not []labels.Labels because metric family detail will be added once
- // OTLP handler moves to AppenderV2.
- expectedLabels []sample
+ name string
+ otlpCfg config.OTLPConfig
+ typeAndUnitLabels bool
+ expectedLabelsAndMFs []sample
}{
{
name: "NoTranslation/NoTypeAndUnitLabels",
otlpCfg: config.OTLPConfig{
TranslationStrategy: otlptranslator.NoTranslation,
},
- expectedLabels: []sample{
- {L: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service")},
+ expectedLabelsAndMFs: []sample{
+ {MF: "test.counter", L: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.gauge", L: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {MF: "test.exponential.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "target_info", L: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service")},
},
},
{
@@ -156,20 +159,20 @@ func TestOTLPWriteHandler(t *testing.T) {
TranslationStrategy: otlptranslator.NoTranslation,
},
typeAndUnitLabels: true,
- expectedLabels: []sample{
- {L: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.gauge", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service")},
+ expectedLabelsAndMFs: []sample{
+ {MF: "test.counter", L: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.gauge", L: labels.FromStrings(model.MetricNameLabel, "test.gauge", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {MF: "test.exponential.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "target_info", L: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service")},
},
},
// For the following cases, skip type and unit cases, it has nothing todo with translation.
@@ -178,20 +181,20 @@ func TestOTLPWriteHandler(t *testing.T) {
otlpCfg: config.OTLPConfig{
TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
},
- expectedLabels: []sample{
- {L: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service")},
+ expectedLabelsAndMFs: []sample{
+ {MF: "test_counter_bytes_total", L: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_gauge_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {MF: "test_exponential_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "target_info", L: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service")},
},
},
{
@@ -199,20 +202,20 @@ func TestOTLPWriteHandler(t *testing.T) {
otlpCfg: config.OTLPConfig{
TranslationStrategy: otlptranslator.UnderscoreEscapingWithoutSuffixes,
},
- expectedLabels: []sample{
- {L: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_gauge", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
- {L: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service")},
+ expectedLabelsAndMFs: []sample{
+ {MF: "test_counter", L: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_gauge", L: labels.FromStrings(model.MetricNameLabel, "test_gauge", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {MF: "test_exponential_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "target_info", L: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service")},
},
},
{
@@ -220,34 +223,33 @@ func TestOTLPWriteHandler(t *testing.T) {
otlpCfg: config.OTLPConfig{
TranslationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes,
},
- expectedLabels: []sample{
+ expectedLabelsAndMFs: []sample{
// TODO: Counter MF name looks likea bug. Uncovered in unrelated refactor. fix it.
- {L: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
- {L: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
- {L: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.counter_bytes_total", L: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.gauge_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {MF: "test.exponential.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "target_info", L: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service")},
},
},
} {
t.Run(testCase.name, func(t *testing.T) {
otlpOpts := OTLPOptions{
EnableTypeAndUnitLabels: testCase.typeAndUnitLabels,
- AppendMetadata: true,
}
appendable := handleOTLP(t, exportRequest, testCase.otlpCfg, otlpOpts)
// Compile final expected samples.
expectedSamples := expectedSamplesWithoutLabelsFn()
- for i, s := range testCase.expectedLabels {
+ for i, s := range testCase.expectedLabelsAndMFs {
expectedSamples[i].L = s.L
expectedSamples[i].MF = s.MF
}
@@ -256,204 +258,6 @@ func TestOTLPWriteHandler(t *testing.T) {
}
}
-// Check that start time is ingested if ingestSTZeroSample is enabled
-// and the start time is actually set (non-zero).
-// TODO(bwplotka): This test is still using old mockAppender. Keeping like this as this test
-// will be removed when OTLP handling switches to AppenderV2.
-func TestOTLPWriteHandler_StartTime(t *testing.T) {
- timestamp := time.Now()
- startTime := timestamp.Add(-1 * time.Millisecond)
- var zeroTime time.Time
-
- expectedSamples := []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 30.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 12.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- t: timestamp.UnixMilli(),
- v: 2.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- t: timestamp.UnixMilli(),
- v: 4.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- t: timestamp.UnixMilli(),
- v: 6.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- t: timestamp.UnixMilli(),
- v: 8.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- t: timestamp.UnixMilli(),
- v: 12.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- t: timestamp.UnixMilli(),
- v: 12.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1.0,
- },
- }
- expectedHistograms := []mockHistogram{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- h: &histogram.Histogram{
- Schema: 2,
- ZeroThreshold: 1e-128,
- ZeroCount: 2,
- Count: 10,
- Sum: 30,
- PositiveSpans: []histogram.Span{{Offset: 1, Length: 5}},
- PositiveBuckets: []int64{2, 0, 0, 0, 0},
- },
- },
- }
-
- expectedSamplesWithSTZero := make([]mockSample, 0, len(expectedSamples)*2-1) // All samples will get ST zero, except target_info.
- for _, s := range expectedSamples {
- if s.l.Get(model.MetricNameLabel) != "target_info" {
- expectedSamplesWithSTZero = append(expectedSamplesWithSTZero, mockSample{
- l: s.l.Copy(),
- t: startTime.UnixMilli(),
- v: 0,
- })
- }
- expectedSamplesWithSTZero = append(expectedSamplesWithSTZero, s)
- }
- expectedHistogramsWithSTZero := make([]mockHistogram, 0, len(expectedHistograms)*2)
- for _, s := range expectedHistograms {
- if s.l.Get(model.MetricNameLabel) != "target_info" {
- expectedHistogramsWithSTZero = append(expectedHistogramsWithSTZero, mockHistogram{
- l: s.l.Copy(),
- t: startTime.UnixMilli(),
- h: &histogram.Histogram{},
- })
- }
- expectedHistogramsWithSTZero = append(expectedHistogramsWithSTZero, s)
- }
-
- for _, testCase := range []struct {
- name string
- otlpOpts OTLPOptions
- startTime time.Time
- expectSTZero bool
- expectedSamples []mockSample
- expectedHistograms []mockHistogram
- }{
- {
- name: "IngestSTZero=false/startTime=0",
- otlpOpts: OTLPOptions{
- IngestSTZeroSample: false,
- },
- startTime: zeroTime,
- expectedSamples: expectedSamples,
- expectedHistograms: expectedHistograms,
- },
- {
- name: "IngestSTZero=true/startTime=0",
- otlpOpts: OTLPOptions{
- IngestSTZeroSample: true,
- },
- startTime: zeroTime,
- expectedSamples: expectedSamples,
- expectedHistograms: expectedHistograms,
- },
- {
- name: "IngestSTZero=false/startTime=ts-1ms",
- otlpOpts: OTLPOptions{
- IngestSTZeroSample: false,
- },
- startTime: startTime,
- expectedSamples: expectedSamples,
- expectedHistograms: expectedHistograms,
- },
- {
- name: "IngestSTZero=true/startTime=ts-1ms",
- otlpOpts: OTLPOptions{
- IngestSTZeroSample: true,
- },
- startTime: startTime,
- expectedSamples: expectedSamplesWithSTZero,
- expectedHistograms: expectedHistogramsWithSTZero,
- },
- } {
- t.Run(testCase.name, func(t *testing.T) {
- exportRequest := generateOTLPWriteRequest(timestamp, testCase.startTime)
-
- buf, err := exportRequest.MarshalProto()
- require.NoError(t, err)
-
- req, err := http.NewRequest("", "", bytes.NewReader(buf))
- require.NoError(t, err)
- req.Header.Set("Content-Type", "application/x-protobuf")
-
- log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
- appendable := &mockAppendable{}
- handler := NewOTLPWriteHandler(log, nil, appendable, func() config.Config {
- return config.Config{
- OTLPConfig: config.OTLPConfig{
- TranslationStrategy: otlptranslator.NoTranslation,
- },
- }
- }, testCase.otlpOpts)
- recorder := httptest.NewRecorder()
- handler.ServeHTTP(recorder, req)
-
- resp := recorder.Result()
- require.Equal(t, http.StatusOK, resp.StatusCode)
-
- for i, expect := range testCase.expectedSamples {
- actual := appendable.samples[i]
- require.True(t, labels.Equal(expect.l, actual.l), "sample labels,pos=%v", i)
- require.Equal(t, expect.t, actual.t, "sample timestamp,pos=%v", i)
- require.Equal(t, expect.v, actual.v, "sample value,pos=%v", i)
- }
- for i, expect := range testCase.expectedHistograms {
- actual := appendable.histograms[i]
- require.True(t, labels.Equal(expect.l, actual.l), "histogram labels,pos=%v", i)
- require.Equal(t, expect.t, actual.t, "histogram timestamp,pos=%v", i)
- require.True(t, expect.h.Equals(actual.h), "histogram value,pos=%v", i)
- }
- require.Len(t, appendable.samples, len(testCase.expectedSamples))
- require.Len(t, appendable.histograms, len(testCase.expectedHistograms))
- })
- }
-}
-
func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg config.OTLPConfig, otlpOpts OTLPOptions) *teststorage.Appendable {
t.Helper()
@@ -608,9 +412,9 @@ func TestOTLPDelta(t *testing.T) {
}
want := []sample{
- {T: milli(0), L: ls, V: 0}, // +0
- {T: milli(1), L: ls, V: 1}, // +1
- {T: milli(2), L: ls, V: 3}, // +2
+ {MF: "some_delta_total", M: metadata.Metadata{Type: model.MetricTypeGauge}, T: milli(0), L: ls, V: 0}, // +0
+ {MF: "some_delta_total", M: metadata.Metadata{Type: model.MetricTypeGauge}, T: milli(1), L: ls, V: 1}, // +1
+ {MF: "some_delta_total", M: metadata.Metadata{Type: model.MetricTypeGauge}, T: milli(2), L: ls, V: 3}, // +2
}
if diff := cmp.Diff(want, appendable.ResultSamples(), cmp.Exporter(func(reflect.Type) bool { return true })); diff != "" {
t.Fatal(diff)
@@ -901,3 +705,55 @@ func sampleCount(md pmetric.Metrics) int {
}
return total
}
+
+func TestOTLPInstrumentedAppendable(t *testing.T) {
+ t.Run("no problems", func(t *testing.T) {
+ appTest := teststorage.NewAppendable()
+ oa := newOTLPInstrumentedAppendable(prometheus.NewRegistry(), appTest)
+
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.outOfOrderExemplars))
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.samplesAppendedWithoutMetadata))
+
+ app := oa.AppenderV2(t.Context())
+ _, err := app.Append(0, labels.EmptyLabels(), -1, 1, 2, nil, nil, storage.AOptions{Metadata: metadata.Metadata{Help: "yo"}})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Len(t, appTest.ResultSamples(), 1)
+
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.outOfOrderExemplars))
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.samplesAppendedWithoutMetadata))
+ })
+ t.Run("without metadata", func(t *testing.T) {
+ appTest := teststorage.NewAppendable()
+ oa := newOTLPInstrumentedAppendable(prometheus.NewRegistry(), appTest)
+
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.outOfOrderExemplars))
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.samplesAppendedWithoutMetadata))
+
+ app := oa.AppenderV2(t.Context())
+ _, err := app.Append(0, labels.EmptyLabels(), -1, 1, 2, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Len(t, appTest.ResultSamples(), 1)
+
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.outOfOrderExemplars))
+ require.Equal(t, 1.0, testutil.ToFloat64(oa.samplesAppendedWithoutMetadata))
+ })
+ t.Run("without metadata; 2 exemplar OOO errors", func(t *testing.T) {
+ appTest := teststorage.NewAppendable().WithErrs(nil, errors.New("exemplar error"), nil)
+ oa := newOTLPInstrumentedAppendable(prometheus.NewRegistry(), appTest)
+
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.outOfOrderExemplars))
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.samplesAppendedWithoutMetadata))
+
+ app := oa.AppenderV2(t.Context())
+ _, err := app.Append(0, labels.EmptyLabels(), -1, 1, 2, nil, nil, storage.AOptions{Exemplars: []exemplar.Exemplar{{}, {}}})
+ // Partial errors should be handled in the middleware, OTLP converter does not handle it.
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Len(t, appTest.ResultSamples(), 1)
+
+ require.Equal(t, 2.0, testutil.ToFloat64(oa.outOfOrderExemplars))
+ require.Equal(t, 1.0, testutil.ToFloat64(oa.samplesAppendedWithoutMetadata))
+ })
+}
diff --git a/web/api/v1/api.go b/web/api/v1/api.go
index 225ef6911d..8f2c848710 100644
--- a/web/api/v1/api.go
+++ b/web/api/v1/api.go
@@ -265,7 +265,7 @@ type API struct {
func NewAPI(
qe promql.QueryEngine,
q storage.SampleAndChunkQueryable,
- ap storage.Appendable,
+ ap storage.Appendable, apV2 storage.AppendableV2,
eq storage.ExemplarQueryable,
spsr func(context.Context) ScrapePoolsRetriever,
tr func(context.Context) TargetRetriever,
@@ -342,7 +342,7 @@ func NewAPI(
a.statsRenderer = statsRenderer
}
- if ap == nil && (rwEnabled || otlpEnabled) {
+ if (ap == nil || apV2 == nil) && (rwEnabled || otlpEnabled) {
panic("remote write or otlp write enabled, but no appender passed in.")
}
@@ -350,13 +350,11 @@ func NewAPI(
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, stZeroIngestionEnabled, enableTypeAndUnitLabels, appendMetadata)
}
if otlpEnabled {
- a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{
+ a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, apV2, configFunc, remote.OTLPOptions{
ConvertDelta: otlpDeltaToCumulative,
NativeDelta: otlpNativeDeltaIngestion,
LookbackDelta: lookbackDelta,
- IngestSTZeroSample: stZeroIngestionEnabled,
EnableTypeAndUnitLabels: enableTypeAndUnitLabels,
- AppendMetadata: appendMetadata,
})
}
diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go
index 850bedef17..6e123ac51c 100644
--- a/web/api/v1/errors_test.go
+++ b/web/api/v1/errors_test.go
@@ -134,7 +134,7 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable, overri
api := NewAPI(
engine,
q,
- nil,
+ nil, nil,
nil,
func(context.Context) ScrapePoolsRetriever { return &DummyScrapePoolsRetriever{} },
func(context.Context) TargetRetriever { return &DummyTargetRetriever{} },
diff --git a/web/api/v1/test_helpers.go b/web/api/v1/test_helpers.go
index 2662b0c84b..2f84cd22d2 100644
--- a/web/api/v1/test_helpers.go
+++ b/web/api/v1/test_helpers.go
@@ -33,7 +33,7 @@ func newTestAPI(t *testing.T, cfg testhelpers.APIConfig) *testhelpers.APIWrapper
api := NewAPI(
params.QueryEngine,
params.Queryable,
- nil, // appendable
+ nil, nil, // appendables
params.ExemplarQueryable,
func(ctx context.Context) ScrapePoolsRetriever {
return adaptScrapePoolsRetriever(params.ScrapePoolsRetriever(ctx))
diff --git a/web/web.go b/web/web.go
index cb9258d87f..5d44cedd97 100644
--- a/web/web.go
+++ b/web/web.go
@@ -356,9 +356,12 @@ func New(logger *slog.Logger, o *Options) *Handler {
factoryAr := func(context.Context) api_v1.AlertmanagerRetriever { return h.notifier }
FactoryRr := func(context.Context) api_v1.RulesRetriever { return h.ruleManager }
- var app storage.Appendable
+ var (
+ app storage.Appendable
+ appV2 storage.AppendableV2
+ )
if o.EnableRemoteWriteReceiver || o.EnableOTLPWriteReceiver {
- app = h.storage
+ app, appV2 = h.storage, h.storage
}
version := ""
@@ -366,7 +369,7 @@ func New(logger *slog.Logger, o *Options) *Handler {
version = o.Version.Version
}
- h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, app, h.exemplarStorage, factorySPr, factoryTr, factoryAr,
+ h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, app, appV2, h.exemplarStorage, factorySPr, factoryTr, factoryAr,
func() config.Config {
h.mtx.RLock()
defer h.mtx.RUnlock()
From 859abfb35d16178acef4518c0a54f84a0e30a08d Mon Sep 17 00:00:00 2001
From: Ben Kochie
Date: Wed, 4 Feb 2026 10:40:56 +0100
Subject: [PATCH 114/165] Reduce CI duplication (#18008)
Only run CI on pushes to main and release branches. Avoids duplicate
runs between pull_request and push.
Signed-off-by: SuperQ
---
.github/workflows/ci.yml | 1 +
1 file changed, 1 insertion(+)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d1f3a0c988..8453110e7f 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -3,6 +3,7 @@ name: CI
on:
pull_request:
push:
+ branches: [main, 'release-*']
permissions:
contents: read
From 8d491cc64241d9608e5fa418275e69507dea077f Mon Sep 17 00:00:00 2001
From: Ben Kochie
Date: Wed, 4 Feb 2026 10:41:57 +0100
Subject: [PATCH 115/165] tsdb: Migrate multi-errors to errors package (#17768)
Modernize tsdb package by migrating multi-error handling
to the standard library errors package.
* Add a modernized CloseAll helper.
Signed-off-by: SuperQ
---
tsdb/block.go | 11 ++-
tsdb/chunks/chunks.go | 2 +-
tsdb/compact.go | 23 +++--
tsdb/compact_test.go | 1 -
tsdb/db.go | 67 ++++++++-------
tsdb/errors/errors.go | 109 -----------------------
tsdb/errors/errors_test.go | 172 -------------------------------------
tsdb/head.go | 13 ++-
tsdb/head_wal.go | 15 ++--
tsdb/querier.go | 7 +-
tsdb/repair.go | 24 +++---
tsdb/wlog/reader_test.go | 2 +-
12 files changed, 83 insertions(+), 363 deletions(-)
delete mode 100644 tsdb/errors/errors.go
delete mode 100644 tsdb/errors/errors_test.go
diff --git a/tsdb/block.go b/tsdb/block.go
index 92638df164..118dd672ef 100644
--- a/tsdb/block.go
+++ b/tsdb/block.go
@@ -33,7 +33,6 @@ import (
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
@@ -297,12 +296,12 @@ func writeMetaFile(logger *slog.Logger, dir string, meta *BlockMeta) (int64, err
n, err := f.Write(jsonMeta)
if err != nil {
- return 0, tsdb_errors.NewMulti(err, f.Close()).Err()
+ return 0, errors.Join(err, f.Close())
}
// Force the kernel to persist the file on disk to avoid data loss if the host crashes.
if err := f.Sync(); err != nil {
- return 0, tsdb_errors.NewMulti(err, f.Close()).Err()
+ return 0, errors.Join(err, f.Close())
}
if err := f.Close(); err != nil {
return 0, err
@@ -344,7 +343,7 @@ func OpenBlock(logger *slog.Logger, dir string, pool chunkenc.Pool, postingsDeco
var closers []io.Closer
defer func() {
if err != nil {
- err = tsdb_errors.NewMulti(err, tsdb_errors.CloseAll(closers)).Err()
+ err = errors.Join(err, closeAll(closers))
}
}()
meta, sizeMeta, err := readMetaFile(dir)
@@ -398,11 +397,11 @@ func (pb *Block) Close() error {
pb.pendingReaders.Wait()
- return tsdb_errors.NewMulti(
+ return errors.Join(
pb.chunkr.Close(),
pb.indexr.Close(),
pb.tombstones.Close(),
- ).Err()
+ )
}
func (pb *Block) String() string {
diff --git a/tsdb/chunks/chunks.go b/tsdb/chunks/chunks.go
index ce4c9d3d78..9b4e011562 100644
--- a/tsdb/chunks/chunks.go
+++ b/tsdb/chunks/chunks.go
@@ -777,7 +777,7 @@ func sequenceFiles(dir string) ([]string, error) {
return res, nil
}
-// closeAll closes all given closers while recording error in MultiError.
+// closeAll closes all given closers while recording all errors.
func closeAll(cs []io.Closer) error {
var errs []error
for _, c := range cs {
diff --git a/tsdb/compact.go b/tsdb/compact.go
index 9a32cec449..7091d34d50 100644
--- a/tsdb/compact.go
+++ b/tsdb/compact.go
@@ -32,7 +32,6 @@ import (
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
@@ -572,16 +571,16 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
return []ulid.ULID{uid}, nil
}
- errs := tsdb_errors.NewMulti(err)
+ errs := []error{err}
if !errors.Is(err, context.Canceled) {
for _, b := range bs {
if err := b.setCompactionFailed(); err != nil {
- errs.Add(fmt.Errorf("setting compaction failed for block: %s: %w", b.Dir(), err))
+ errs = append(errs, fmt.Errorf("setting compaction failed for block: %s: %w", b.Dir(), err))
}
}
}
- return nil, errs.Err()
+ return nil, errors.Join(errs...)
}
func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) ([]ulid.ULID, error) {
@@ -661,7 +660,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl
tmp := dir + tmpForCreationBlockDirSuffix
var closers []io.Closer
defer func(t time.Time) {
- err = tsdb_errors.NewMulti(err, tsdb_errors.CloseAll(closers)).Err()
+ err = errors.Join(err, closeAll(closers))
// RemoveAll returns no error when tmp doesn't exist so it is safe to always run it.
if err := os.RemoveAll(tmp); err != nil {
@@ -718,13 +717,13 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl
// though these are covered under defer. This is because in Windows,
// you cannot delete these unless they are closed and the defer is to
// make sure they are closed if the function exits due to an error above.
- errs := tsdb_errors.NewMulti()
+ var errs []error
for _, w := range closers {
- errs.Add(w.Close())
+ errs = append(errs, w.Close())
}
closers = closers[:0] // Avoid closing the writers twice in the defer.
- if errs.Err() != nil {
- return errs.Err()
+ if err := errors.Join(errs...); err != nil {
+ return err
}
// Populated block is empty, so exit early.
@@ -803,11 +802,9 @@ func (DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compact
overlapping bool
)
defer func() {
- errs := tsdb_errors.NewMulti(err)
- if cerr := tsdb_errors.CloseAll(closers); cerr != nil {
- errs.Add(fmt.Errorf("close: %w", cerr))
+ if cerr := closeAll(closers); cerr != nil {
+ err = errors.Join(err, fmt.Errorf("close: %w", cerr))
}
- err = errs.Err()
metrics.PopulatingBlocks.Set(0)
}()
metrics.PopulatingBlocks.Set(1)
diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go
index fcb659d040..afe15a5f31 100644
--- a/tsdb/compact_test.go
+++ b/tsdb/compact_test.go
@@ -1421,7 +1421,6 @@ func TestCancelCompactions(t *testing.T) {
// Make sure that no blocks were marked as compaction failed.
// This checks that the `context.Canceled` error is properly checked at all levels:
- // - tsdb_errors.NewMulti() should have the Is() method implemented for correct checks.
// - callers should check with errors.Is() instead of ==.
readOnlyDB, err := OpenDBReadOnly(tmpdirCopy, "", promslog.NewNopLogger())
require.NoError(t, err)
diff --git a/tsdb/db.go b/tsdb/db.go
index c5da5b54a6..3e98b1e8d9 100644
--- a/tsdb/db.go
+++ b/tsdb/db.go
@@ -41,7 +41,6 @@ import (
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
_ "github.com/prometheus/prometheus/tsdb/goversion" // Load the package into main to make sure minimum Go version is met.
"github.com/prometheus/prometheus/tsdb/tsdbutil"
@@ -538,11 +537,9 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) {
return err
}
defer func() {
- errs := tsdb_errors.NewMulti(returnErr)
if err := head.Close(); err != nil {
- errs.Add(fmt.Errorf("closing Head: %w", err))
+ returnErr = errors.Join(returnErr, fmt.Errorf("closing Head: %w", err))
}
- returnErr = errs.Err()
}()
// Set the min valid time for the ingested wal samples
// to be no lower than the maxt of the last block.
@@ -697,13 +694,13 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
db.logger.Warn("Closing block failed", "err", err, "block", b)
}
}
- errs := tsdb_errors.NewMulti()
+ var errs []error
for ulid, err := range corrupted {
if err != nil {
- errs.Add(fmt.Errorf("corrupted block %s: %w", ulid.String(), err))
+ errs = append(errs, fmt.Errorf("corrupted block %s: %w", ulid.String(), err))
}
}
- return nil, errs.Err()
+ return nil, errors.Join(errs...)
}
if len(loadable) == 0 {
@@ -814,7 +811,7 @@ func (db *DBReadOnly) Close() error {
}
close(db.closed)
- return tsdb_errors.CloseAll(db.closers)
+ return closeAll(db.closers)
}
// Open returns a new DB in the given directory. If options are empty, DefaultOptions will be used.
@@ -934,11 +931,9 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
}
close(db.donec) // DB is never run if it was an error, so close this channel here.
- errs := tsdb_errors.NewMulti(returnedErr)
if err := db.Close(); err != nil {
- errs.Add(fmt.Errorf("close DB after failed startup: %w", err))
+ returnedErr = errors.Join(returnedErr, fmt.Errorf("close DB after failed startup: %w", err))
}
- returnedErr = errs.Err()
}()
if db.blocksToDelete == nil {
@@ -1392,11 +1387,9 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) {
lastBlockMaxt := int64(math.MinInt64)
defer func() {
- errs := tsdb_errors.NewMulti(returnErr)
if err := db.head.truncateWAL(lastBlockMaxt); err != nil {
- errs.Add(fmt.Errorf("WAL truncation in Compact defer: %w", err))
+ returnErr = errors.Join(returnErr, fmt.Errorf("WAL truncation in Compact defer: %w", err))
}
- returnErr = errs.Err()
}()
start := time.Now()
@@ -1521,13 +1514,13 @@ func (db *DB) compactOOOHead(ctx context.Context) error {
return fmt.Errorf("compact ooo head: %w", err)
}
if err := db.reloadBlocks(); err != nil {
- errs := tsdb_errors.NewMulti(err)
+ errs := []error{err}
for _, uid := range ulids {
if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil {
- errs.Add(errRemoveAll)
+ errs = append(errs, errRemoveAll)
}
}
- return fmt.Errorf("reloadBlocks blocks after failed compact ooo head: %w", errs.Err())
+ return fmt.Errorf("reloadBlocks blocks after failed compact ooo head: %w", errors.Join(errs...))
}
lastWBLFile, minOOOMmapRef := oooHead.LastWBLFile(), oooHead.LastMmapRef()
@@ -1612,13 +1605,15 @@ func (db *DB) compactHead(head *RangeHead) error {
}
if err := db.reloadBlocks(); err != nil {
- multiErr := tsdb_errors.NewMulti(fmt.Errorf("reloadBlocks blocks: %w", err))
+ errs := []error{
+ fmt.Errorf("reloadBlocks blocks: %w", err),
+ }
for _, uid := range uids {
if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil {
- multiErr.Add(fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll))
+ errs = append(errs, fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll))
}
}
- return multiErr.Err()
+ return errors.Join(errs...)
}
if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil {
return fmt.Errorf("head memory truncate: %w", err)
@@ -1708,13 +1703,13 @@ func (db *DB) compactBlocks() (err error) {
}
if err := db.reloadBlocks(); err != nil {
- errs := tsdb_errors.NewMulti(fmt.Errorf("reloadBlocks blocks: %w", err))
+ errs := []error{fmt.Errorf("reloadBlocks blocks: %w", err)}
for _, uid := range uids {
if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil {
- errs.Add(fmt.Errorf("delete persisted block after failed db reloadBlocks:%s: %w", uid, errRemoveAll))
+ errs = append(errs, fmt.Errorf("delete persisted block after failed db reloadBlocks:%s: %w", uid, errRemoveAll))
}
}
- return errs.Err()
+ return errors.Join(errs...)
}
}
@@ -1794,13 +1789,13 @@ func (db *DB) reloadBlocks() (err error) {
}
}
db.mtx.RUnlock()
- errs := tsdb_errors.NewMulti()
+ var errs []error
for ulid, err := range corrupted {
if err != nil {
- errs.Add(fmt.Errorf("corrupted block %s: %w", ulid.String(), err))
+ errs = append(errs, fmt.Errorf("corrupted block %s: %w", ulid.String(), err))
}
}
- return errs.Err()
+ return errors.Join(errs...)
}
var (
@@ -2172,11 +2167,14 @@ func (db *DB) Close() error {
g.Go(pb.Close)
}
- errs := tsdb_errors.NewMulti(g.Wait(), db.locker.Release())
- if db.head != nil {
- errs.Add(db.head.Close())
+ errs := []error{
+ g.Wait(),
+ db.locker.Release(),
}
- return errs.Err()
+ if db.head != nil {
+ errs = append(errs, db.head.Close())
+ }
+ return errors.Join(errs...)
}
// DisableCompactions disables auto compactions.
@@ -2557,3 +2555,12 @@ func exponential(d, minD, maxD time.Duration) time.Duration {
}
return d
}
+
+// closeAll closes all given closers while recording all errors.
+func closeAll(cs []io.Closer) error {
+ var errs []error
+ for _, c := range cs {
+ errs = append(errs, c.Close())
+ }
+ return errors.Join(errs...)
+}
diff --git a/tsdb/errors/errors.go b/tsdb/errors/errors.go
deleted file mode 100644
index 138b38a8d2..0000000000
--- a/tsdb/errors/errors.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright The Prometheus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package errors
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
-)
-
-// multiError type allows combining multiple errors into one.
-type multiError []error
-
-// NewMulti returns multiError with provided errors added if not nil.
-func NewMulti(errs ...error) multiError { //nolint:revive // unexported-return
- m := multiError{}
- m.Add(errs...)
- return m
-}
-
-// Add adds single or many errors to the error list. Each error is added only if not nil.
-// If the error is a nonNilMultiError type, the errors inside nonNilMultiError are added to the main multiError.
-func (es *multiError) Add(errs ...error) {
- for _, err := range errs {
- if err == nil {
- continue
- }
- var merr nonNilMultiError
- if errors.As(err, &merr) {
- *es = append(*es, merr.errs...)
- continue
- }
- *es = append(*es, err)
- }
-}
-
-// Err returns the error list as an error or nil if it is empty.
-func (es multiError) Err() error {
- if len(es) == 0 {
- return nil
- }
- return nonNilMultiError{errs: es}
-}
-
-// nonNilMultiError implements the error interface, and it represents
-// multiError with at least one error inside it.
-// This type is needed to make sure that nil is returned when no error is combined in multiError for err != nil
-// check to work.
-type nonNilMultiError struct {
- errs multiError
-}
-
-// Error returns a concatenated string of the contained errors.
-func (es nonNilMultiError) Error() string {
- var buf bytes.Buffer
-
- if len(es.errs) > 1 {
- fmt.Fprintf(&buf, "%d errors: ", len(es.errs))
- }
-
- for i, err := range es.errs {
- if i != 0 {
- buf.WriteString("; ")
- }
- buf.WriteString(err.Error())
- }
-
- return buf.String()
-}
-
-// Is attempts to match the provided error against errors in the error list.
-//
-// This function allows errors.Is to traverse the values stored in the MultiError.
-// It returns true if any of the errors in the list match the target.
-func (es nonNilMultiError) Is(target error) bool {
- for _, err := range es.errs {
- if errors.Is(err, target) {
- return true
- }
- }
- return false
-}
-
-// Unwrap returns the list of errors contained in the multiError.
-func (es nonNilMultiError) Unwrap() []error {
- return es.errs
-}
-
-// CloseAll closes all given closers while recording error in MultiError.
-func CloseAll(cs []io.Closer) error {
- errs := NewMulti()
- for _, c := range cs {
- errs.Add(c.Close())
- }
- return errs.Err()
-}
diff --git a/tsdb/errors/errors_test.go b/tsdb/errors/errors_test.go
deleted file mode 100644
index acffdea261..0000000000
--- a/tsdb/errors/errors_test.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package errors
-
-import (
- "context"
- "errors"
- "fmt"
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestMultiError_Is(t *testing.T) {
- customErr1 := errors.New("test error 1")
- customErr2 := errors.New("test error 2")
-
- testCases := map[string]struct {
- sourceErrors []error
- target error
- is bool
- }{
- "adding a context cancellation doesn't lose the information": {
- sourceErrors: []error{context.Canceled},
- target: context.Canceled,
- is: true,
- },
- "adding multiple context cancellations doesn't lose the information": {
- sourceErrors: []error{context.Canceled, context.Canceled},
- target: context.Canceled,
- is: true,
- },
- "adding wrapped context cancellations doesn't lose the information": {
- sourceErrors: []error{errors.New("some error"), fmt.Errorf("some message: %w", context.Canceled)},
- target: context.Canceled,
- is: true,
- },
- "adding a nil error doesn't lose the information": {
- sourceErrors: []error{errors.New("some error"), fmt.Errorf("some message: %w", context.Canceled), nil},
- target: context.Canceled,
- is: true,
- },
- "errors with no context cancellation error are not a context canceled error": {
- sourceErrors: []error{errors.New("first error"), errors.New("second error")},
- target: context.Canceled,
- is: false,
- },
- "no errors are not a context canceled error": {
- sourceErrors: nil,
- target: context.Canceled,
- is: false,
- },
- "no errors are a nil error": {
- sourceErrors: nil,
- target: nil,
- is: true,
- },
- "nested multi-error contains customErr1": {
- sourceErrors: []error{
- customErr1,
- NewMulti(
- customErr2,
- fmt.Errorf("wrapped %w", context.Canceled),
- ).Err(),
- },
- target: customErr1,
- is: true,
- },
- "nested multi-error contains customErr2": {
- sourceErrors: []error{
- customErr1,
- NewMulti(
- customErr2,
- fmt.Errorf("wrapped %w", context.Canceled),
- ).Err(),
- },
- target: customErr2,
- is: true,
- },
- "nested multi-error contains wrapped context.Canceled": {
- sourceErrors: []error{
- customErr1,
- NewMulti(
- customErr2,
- fmt.Errorf("wrapped %w", context.Canceled),
- ).Err(),
- },
- target: context.Canceled,
- is: true,
- },
- "nested multi-error does not contain context.DeadlineExceeded": {
- sourceErrors: []error{
- customErr1,
- NewMulti(
- customErr2,
- fmt.Errorf("wrapped %w", context.Canceled),
- ).Err(),
- },
- target: context.DeadlineExceeded,
- is: false, // make sure we still return false in valid cases
- },
- }
-
- for testName, testCase := range testCases {
- t.Run(testName, func(t *testing.T) {
- mErr := NewMulti(testCase.sourceErrors...)
- require.Equal(t, testCase.is, errors.Is(mErr.Err(), testCase.target))
- })
- }
-}
-
-func TestMultiError_As(t *testing.T) {
- tE1 := testError{"error cause 1"}
- tE2 := testError{"error cause 2"}
- var target testError
- testCases := map[string]struct {
- sourceErrors []error
- target error
- as bool
- }{
- "MultiError containing only a testError can be cast to that testError": {
- sourceErrors: []error{tE1},
- target: tE1,
- as: true,
- },
- "MultiError containing multiple testErrors can be cast to the first testError added": {
- sourceErrors: []error{tE1, tE2},
- target: tE1,
- as: true,
- },
- "MultiError containing multiple errors can be cast to the first testError added": {
- sourceErrors: []error{context.Canceled, tE1, context.DeadlineExceeded, tE2},
- target: tE1,
- as: true,
- },
- "MultiError not containing a testError cannot be cast to a testError": {
- sourceErrors: []error{context.Canceled, context.DeadlineExceeded},
- as: false,
- },
- }
-
- for testName, testCase := range testCases {
- t.Run(testName, func(t *testing.T) {
- mErr := NewMulti(testCase.sourceErrors...).Err()
- if testCase.as {
- require.ErrorAs(t, mErr, &target)
- require.Equal(t, testCase.target, target)
- } else {
- require.NotErrorAs(t, mErr, &target)
- }
- })
- }
-}
-
-type testError struct {
- cause string
-}
-
-func (e testError) Error() string {
- return fmt.Sprintf("testError[cause: %s]", e.cause)
-}
diff --git a/tsdb/head.go b/tsdb/head.go
index 3d700944d9..6fe42c8cf2 100644
--- a/tsdb/head.go
+++ b/tsdb/head.go
@@ -40,7 +40,6 @@ import (
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
@@ -1812,17 +1811,17 @@ func (h *Head) Close() error {
// takes samples from most recent head chunk.
h.mmapHeadChunks()
- errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close())
+ errs := h.chunkDiskMapper.Close()
if h.wal != nil {
- errs.Add(h.wal.Close())
+ errs = errors.Join(errs, h.wal.Close())
}
if h.wbl != nil {
- errs.Add(h.wbl.Close())
+ errs = errors.Join(errs, h.wbl.Close())
}
- if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown {
- errs.Add(h.performChunkSnapshot())
+ if errs == nil && h.opts.EnableMemorySnapshotOnShutdown {
+ errs = errors.Join(errs, h.performChunkSnapshot())
}
- return errs.Err()
+ return errs
}
// String returns an human readable representation of the TSDB head. It's important to
diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go
index b323f0dbf6..0581b9306e 100644
--- a/tsdb/head_wal.go
+++ b/tsdb/head_wal.go
@@ -37,7 +37,6 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/encoding"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
@@ -1536,7 +1535,7 @@ func DeleteChunkSnapshots(dir string, maxIndex, maxOffset int) error {
return err
}
- errs := tsdb_errors.NewMulti()
+ var errs []error
for _, fi := range files {
if !strings.HasPrefix(fi.Name(), chunkSnapshotPrefix) {
continue
@@ -1559,11 +1558,11 @@ func DeleteChunkSnapshots(dir string, maxIndex, maxOffset int) error {
if idx < maxIndex || (idx == maxIndex && offset < maxOffset) {
if err := os.RemoveAll(filepath.Join(dir, fi.Name())); err != nil {
- errs.Add(err)
+ errs = append(errs, err)
}
}
}
- return errs.Err()
+ return errors.Join(errs...)
}
// loadChunkSnapshot replays the chunk snapshot and restores the Head state from it. If there was any error returned,
@@ -1751,14 +1750,14 @@ Outer:
}
close(errChan)
- merr := tsdb_errors.NewMulti()
+ var errs []error
if loopErr != nil {
- merr.Add(fmt.Errorf("decode loop: %w", loopErr))
+ errs = append(errs, fmt.Errorf("decode loop: %w", loopErr))
}
for err := range errChan {
- merr.Add(fmt.Errorf("record processing: %w", err))
+ errs = append(errs, fmt.Errorf("record processing: %w", err))
}
- if err := merr.Err(); err != nil {
+ if err := errors.Join(errs...); err != nil {
return -1, -1, nil, err
}
diff --git a/tsdb/querier.go b/tsdb/querier.go
index ce0292bf24..ac7a14e1b3 100644
--- a/tsdb/querier.go
+++ b/tsdb/querier.go
@@ -27,7 +27,6 @@ import (
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/util/annotations"
@@ -92,13 +91,13 @@ func (q *blockBaseQuerier) Close() error {
return errors.New("block querier already closed")
}
- errs := tsdb_errors.NewMulti(
+ errs := []error{
q.index.Close(),
q.chunks.Close(),
q.tombstones.Close(),
- )
+ }
q.closed = true
- return errs.Err()
+ return errors.Join(errs...)
}
type blockQuerier struct {
diff --git a/tsdb/repair.go b/tsdb/repair.go
index 0d9d449a40..4ef69c80ed 100644
--- a/tsdb/repair.go
+++ b/tsdb/repair.go
@@ -15,13 +15,13 @@ package tsdb
import (
"encoding/json"
+ "errors"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
)
@@ -82,20 +82,22 @@ func repairBadIndexVersion(logger *slog.Logger, dir string) error {
// Set the 5th byte to 2 to indicate the correct file format version.
if _, err := repl.WriteAt([]byte{2}, 4); err != nil {
- errs := tsdb_errors.NewMulti(
- fmt.Errorf("rewrite of index.repaired for block dir: %v: %w", d, err))
- if err := repl.Close(); err != nil {
- errs.Add(fmt.Errorf("close: %w", err))
+ errs := []error{
+ fmt.Errorf("rewrite of index.repaired for block dir: %v: %w", d, err),
}
- return errs.Err()
+ if err := repl.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("close: %w", err))
+ }
+ return errors.Join(errs...)
}
if err := repl.Sync(); err != nil {
- errs := tsdb_errors.NewMulti(
- fmt.Errorf("sync of index.repaired for block dir: %v: %w", d, err))
- if err := repl.Close(); err != nil {
- errs.Add(fmt.Errorf("close: %w", err))
+ errs := []error{
+ fmt.Errorf("sync of index.repaired for block dir: %v: %w", d, err),
}
- return errs.Err()
+ if err := repl.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("close: %w", err))
+ }
+ return errors.Join(errs...)
}
if err := repl.Close(); err != nil {
return fmt.Errorf("close repaired index for block dir: %v: %w", d, err)
diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go
index 971423e5cc..9381fe99b5 100644
--- a/tsdb/wlog/reader_test.go
+++ b/tsdb/wlog/reader_test.go
@@ -550,7 +550,7 @@ func TestReaderData(t *testing.T) {
}
}
-// closeAll closes all given closers while recording error in MultiError.
+// closeAll closes all given closers while recording all errors.
func closeAll(cs []io.Closer) error {
var errs []error
for _, c := range cs {
From f0f40b970d49829adc688972127350348adb7ae9 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Wed, 4 Feb 2026 06:42:37 -0600
Subject: [PATCH 116/165] Document special labels that affect scraping and
targets (`__address__`, `__scheme__`, `__metrics_path__`,
`__scrape_interval__`, `__scrape_timeout__`) (#17765)
Signed-off-by: Eric Eastwood
---
docs/configuration/configuration.md | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md
index 1f2f9931e8..edaf6adbb2 100644
--- a/docs/configuration/configuration.md
+++ b/docs/configuration/configuration.md
@@ -2973,6 +2973,11 @@ labels:
[ : ... ]
```
+The special labels mentioned in the [relabeling](#relabel_config) section can also be
+used here to override the respective settings in the scrape configuration. This is
+especially useful when combined with any of the service discovery mechanisms that do not
+support these settings directly.
+
### ``
Relabeling is a powerful tool to dynamically rewrite the label set of a target before
@@ -2982,6 +2987,11 @@ in the configuration file.
Initially, aside from the configured per-target labels, a target's `job`
label is set to the `job_name` value of the respective scrape configuration.
+
+You can also use special labels like `__address__`, `__scheme__`, `__metrics_path__`,
+`__scrape_interval__`, `__scrape_timeout__` to customize the defined targets. These will
+override the respective settings in the scrape configuration.
+
The `__address__` label is set to the `:` address of the target.
After relabeling, the `instance` label is set to the value of `__address__` by default if
it was not set during relabeling.
From cf9d093f8ff2e930ef979c1800ff58c6da52c0ad Mon Sep 17 00:00:00 2001
From: Matt
Date: Thu, 5 Feb 2026 09:40:45 +0000
Subject: [PATCH 117/165] prw2: Move Remote Write 2.0 CT to be per Sample;
Rename to ST (start timestamp) (#17411) (#17600)
Relates to
https://github.com/prometheus/prometheus/issues/16944#issuecomment-3164760343
Signed-off-by: bwplotka
Signed-off-by: matt-gp
Co-authored-by: Bartlomiej Plotka
---
cmd/prometheus/testdata/features.json | 1 +
discovery/aws/aws.go | 42 +-
discovery/aws/aws_test.go | 26 +
discovery/aws/metrics_msk.go | 32 +
discovery/aws/msk.go | 463 +++++++++++
discovery/aws/msk_test.go | 1057 +++++++++++++++++++++++++
docs/configuration/configuration.md | 49 +-
go.mod | 7 +-
go.sum | 14 +-
9 files changed, 1678 insertions(+), 13 deletions(-)
create mode 100644 discovery/aws/metrics_msk.go
create mode 100644 discovery/aws/msk.go
create mode 100644 discovery/aws/msk_test.go
diff --git a/cmd/prometheus/testdata/features.json b/cmd/prometheus/testdata/features.json
index 4f74b7e810..c39f60ab33 100644
--- a/cmd/prometheus/testdata/features.json
+++ b/cmd/prometheus/testdata/features.json
@@ -196,6 +196,7 @@
"lightsail": true,
"linode": true,
"marathon": true,
+ "msk": true,
"nerve": true,
"nomad": true,
"openstack": true,
diff --git a/discovery/aws/aws.go b/discovery/aws/aws.go
index be6b4dabbe..9db87965bb 100644
--- a/discovery/aws/aws.go
+++ b/discovery/aws/aws.go
@@ -43,6 +43,7 @@ const (
RoleEC2 Role = "ec2"
RoleECS Role = "ecs"
RoleLightsail Role = "lightsail"
+ RoleMSK Role = "msk"
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@@ -51,7 +52,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(any) error) error {
return err
}
switch *c {
- case RoleEC2, RoleECS, RoleLightsail:
+ case RoleEC2, RoleECS, RoleLightsail, RoleMSK:
return nil
default:
return fmt.Errorf("unknown AWS SD role %q", *c)
@@ -78,13 +79,14 @@ type SDConfig struct {
// ec2 specific
Filters []*EC2Filter `yaml:"filters,omitempty"`
- // ecs specific
+ // ecs, msk specific
Clusters []string `yaml:"clusters,omitempty"`
// Embedded sub-configs (internal use only, not serialized)
*EC2SDConfig `yaml:"-"`
*ECSSDConfig `yaml:"-"`
*LightsailSDConfig `yaml:"-"`
+ *MSKSDConfig `yaml:"-"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface for SDConfig.
@@ -195,6 +197,39 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
if c.RefreshInterval != 0 {
c.LightsailSDConfig.RefreshInterval = c.RefreshInterval
}
+ case RoleMSK:
+ if c.MSKSDConfig == nil {
+ mskConfig := DefaultMSKSDConfig
+ c.MSKSDConfig = &mskConfig
+ }
+ c.MSKSDConfig.HTTPClientConfig = c.HTTPClientConfig
+ if c.Region != "" {
+ c.MSKSDConfig.Region = c.Region
+ }
+ if c.Endpoint != "" {
+ c.MSKSDConfig.Endpoint = c.Endpoint
+ }
+ if c.AccessKey != "" {
+ c.MSKSDConfig.AccessKey = c.AccessKey
+ }
+ if c.SecretKey != "" {
+ c.MSKSDConfig.SecretKey = c.SecretKey
+ }
+ if c.Profile != "" {
+ c.MSKSDConfig.Profile = c.Profile
+ }
+ if c.RoleARN != "" {
+ c.MSKSDConfig.RoleARN = c.RoleARN
+ }
+ if c.Port != 0 {
+ c.MSKSDConfig.Port = c.Port
+ }
+ if c.RefreshInterval != 0 {
+ c.MSKSDConfig.RefreshInterval = c.RefreshInterval
+ }
+ if c.Clusters != nil {
+ c.MSKSDConfig.Clusters = c.Clusters
+ }
default:
return fmt.Errorf("unknown AWS SD role %q", c.Role)
}
@@ -226,6 +261,9 @@ func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Di
case RoleLightsail:
opts.Metrics = &lightsailMetrics{refreshMetrics: awsMetrics.refreshMetrics}
return NewLightsailDiscovery(c.LightsailSDConfig, opts)
+ case RoleMSK:
+ opts.Metrics = &mskMetrics{refreshMetrics: awsMetrics.refreshMetrics}
+ return NewMSKDiscovery(c.MSKSDConfig, opts)
default:
return nil, fmt.Errorf("unknown AWS SD role %q", c.Role)
}
diff --git a/discovery/aws/aws_test.go b/discovery/aws/aws_test.go
index dc1f2044ec..b47a6cd92c 100644
--- a/discovery/aws/aws_test.go
+++ b/discovery/aws/aws_test.go
@@ -272,6 +272,32 @@ func TestMultipleSDConfigsDoNotShareState(t *testing.T) {
"LightsailSDConfig objects should not share the same memory address")
},
},
+ {
+ name: "MSKMultipleJobsDifferentPorts",
+ yaml: `
+- role: msk
+ region: ap-south-1
+ port: 6060
+ clusters: ["cluster-1"]
+- role: msk
+ region: ap-south-1
+ port: 6061
+ clusters: ["cluster-2"]`,
+ validateFunc: func(t *testing.T, cfg1, cfg2 *SDConfig) {
+ require.Equal(t, RoleMSK, cfg1.Role)
+ require.Equal(t, RoleMSK, cfg2.Role)
+ require.NotNil(t, cfg1.MSKSDConfig)
+ require.NotNil(t, cfg2.MSKSDConfig)
+
+ require.Equal(t, 6060, cfg1.MSKSDConfig.Port)
+ require.Equal(t, []string{"cluster-1"}, cfg1.MSKSDConfig.Clusters)
+ require.Equal(t, 6061, cfg2.MSKSDConfig.Port)
+ require.Equal(t, []string{"cluster-2"}, cfg2.MSKSDConfig.Clusters)
+
+ require.NotSame(t, cfg1.MSKSDConfig, cfg2.MSKSDConfig,
+ "MSKSDConfig objects should not share the same memory address")
+ },
+ },
}
for _, tt := range tests {
diff --git a/discovery/aws/metrics_msk.go b/discovery/aws/metrics_msk.go
new file mode 100644
index 0000000000..fc69f57aa1
--- /dev/null
+++ b/discovery/aws/metrics_msk.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aws
+
+import (
+ "github.com/prometheus/prometheus/discovery"
+)
+
+type mskMetrics struct {
+ refreshMetrics discovery.RefreshMetricsInstantiator
+}
+
+var _ discovery.DiscovererMetrics = (*mskMetrics)(nil)
+
+// Register implements discovery.DiscovererMetrics.
+func (*mskMetrics) Register() error {
+ return nil
+}
+
+// Unregister implements discovery.DiscovererMetrics.
+func (*mskMetrics) Unregister() {}
diff --git a/discovery/aws/msk.go b/discovery/aws/msk.go
new file mode 100644
index 0000000000..2a2b240d49
--- /dev/null
+++ b/discovery/aws/msk.go
@@ -0,0 +1,463 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aws
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsConfig "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ "github.com/aws/aws-sdk-go-v2/service/kafka"
+ "github.com/aws/aws-sdk-go-v2/service/kafka/types"
+ "github.com/aws/aws-sdk-go-v2/service/sts"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/common/promslog"
+
+ "github.com/prometheus/prometheus/discovery"
+ "github.com/prometheus/prometheus/discovery/refresh"
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+ "github.com/prometheus/prometheus/util/strutil"
+)
+
+type NodeType string
+
+const (
+ NodeTypeBroker NodeType = "BROKER"
+ NodeTypeController NodeType = "CONTROLLER"
+)
+
+const (
+ mskLabel = model.MetaLabelPrefix + "msk_"
+
+ // Cluster labels.
+ mskLabelCluster = mskLabel + "cluster_"
+ mskLabelClusterName = mskLabelCluster + "name"
+ mskLabelClusterARN = mskLabelCluster + "arn"
+ mskLabelClusterState = mskLabelCluster + "state"
+ mskLabelClusterType = mskLabelCluster + "type"
+ mskLabelClusterVersion = mskLabelCluster + "version"
+ mskLabelClusterJmxExporterEnabled = mskLabelCluster + "jmx_exporter_enabled"
+ mskLabelClusterConfigurationARN = mskLabelCluster + "configuration_arn"
+ mskLabelClusterConfigurationRevision = mskLabelCluster + "configuration_revision"
+ mskLabelClusterKafkaVersion = mskLabelCluster + "kafka_version"
+ mskLabelClusterTags = mskLabelCluster + "tag_"
+
+ // Node labels.
+ mskLabelNode = mskLabel + "node_"
+ mskLabelNodeType = mskLabelNode + "type"
+ mskLabelNodeARN = mskLabelNode + "arn"
+ mskLabelNodeAddedTime = mskLabelNode + "added_time"
+ mskLabelNodeInstanceType = mskLabelNode + "instance_type"
+ mskLabelNodeAttachedENI = mskLabelNode + "attached_eni"
+
+ // Broker labels.
+ mskLabelBroker = mskLabel + "broker_"
+ mskLabelBrokerEndpointIndex = mskLabelBroker + "endpoint_index"
+ mskLabelBrokerID = mskLabelBroker + "id"
+ mskLabelBrokerClientSubnet = mskLabelBroker + "client_subnet"
+ mskLabelBrokerClientVPCIP = mskLabelBroker + "client_vpc_ip"
+ mskLabelBrokerNodeExporterEnabled = mskLabelBroker + "node_exporter_enabled"
+
+ // Controller labels.
+ mskLabelController = mskLabel + "controller_"
+ mskLabelControllerEndpointIndex = mskLabelController + "endpoint_index"
+)
+
+// DefaultMSKSDConfig is the default MSK SD configuration.
+var DefaultMSKSDConfig = MSKSDConfig{
+ Port: 80,
+ RefreshInterval: model.Duration(60 * time.Second),
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
+}
+
+func init() {
+ discovery.RegisterConfig(&MSKSDConfig{})
+}
+
+// MSKSDConfig is the configuration for MSK based service discovery.
+type MSKSDConfig struct {
+ Region string `yaml:"region"`
+ Endpoint string `yaml:"endpoint"`
+ AccessKey string `yaml:"access_key,omitempty"`
+ SecretKey config.Secret `yaml:"secret_key,omitempty"`
+ Profile string `yaml:"profile,omitempty"`
+ RoleARN string `yaml:"role_arn,omitempty"`
+ Clusters []string `yaml:"clusters,omitempty"`
+ Port int `yaml:"port"`
+ RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
+
+ HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
+}
+
+// NewDiscovererMetrics implements discovery.Config.
+func (*MSKSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
+ return &mskMetrics{
+ refreshMetrics: rmi,
+ }
+}
+
+// Name returns the name of the MSK Config.
+func (*MSKSDConfig) Name() string { return "msk" }
+
+// NewDiscoverer returns a Discoverer for the MSK Config.
+func (c *MSKSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
+ return NewMSKDiscovery(c, opts)
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface for the MSK Config.
+func (c *MSKSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
+ *c = DefaultMSKSDConfig
+ type plain MSKSDConfig
+ err := unmarshal((*plain)(c))
+ if err != nil {
+ return err
+ }
+
+ if c.Region == "" {
+ cfg, err := awsConfig.LoadDefaultConfig(context.Background())
+ if err != nil {
+ return err
+ }
+ if cfg.Region != "" {
+ // If the region is already set in the config, use it (env vars).
+ c.Region = cfg.Region
+ }
+
+ if c.Region == "" {
+ // Try to get the region from IMDS.
+ imdsClient := imds.NewFromConfig(cfg)
+ region, err := imdsClient.GetRegion(context.Background(), &imds.GetRegionInput{})
+ if err != nil {
+ return err
+ }
+ c.Region = region.Region
+ }
+ }
+
+ if c.Region == "" {
+ return errors.New("MSK SD configuration requires a region")
+ }
+
+ return c.HTTPClientConfig.Validate()
+}
+
+type mskClient interface {
+ DescribeClusterV2(context.Context, *kafka.DescribeClusterV2Input, ...func(*kafka.Options)) (*kafka.DescribeClusterV2Output, error)
+ ListClustersV2(context.Context, *kafka.ListClustersV2Input, ...func(*kafka.Options)) (*kafka.ListClustersV2Output, error)
+ ListNodes(context.Context, *kafka.ListNodesInput, ...func(*kafka.Options)) (*kafka.ListNodesOutput, error)
+}
+
+// MSKDiscovery periodically performs MSK-SD requests. It implements
+// the Discoverer interface.
+type MSKDiscovery struct {
+ *refresh.Discovery
+ logger *slog.Logger
+ cfg *MSKSDConfig
+ msk mskClient
+}
+
+// NewMSKDiscovery returns a new MSKDiscovery which periodically refreshes its targets.
+func NewMSKDiscovery(conf *MSKSDConfig, opts discovery.DiscovererOptions) (*MSKDiscovery, error) {
+ m, ok := opts.Metrics.(*mskMetrics)
+ if !ok {
+ return nil, errors.New("invalid discovery metrics type")
+ }
+
+ if opts.Logger == nil {
+ opts.Logger = promslog.NewNopLogger()
+ }
+ d := &MSKDiscovery{
+ logger: opts.Logger,
+ cfg: conf,
+ }
+ d.Discovery = refresh.NewDiscovery(
+ refresh.Options{
+ Logger: opts.Logger,
+ Mech: "msk",
+ Interval: time.Duration(d.cfg.RefreshInterval),
+ RefreshF: d.refresh,
+ MetricsInstantiator: m.refreshMetrics,
+ },
+ )
+ return d, nil
+}
+
+func (d *MSKDiscovery) initMskClient(ctx context.Context) error {
+ if d.msk != nil {
+ return nil
+ }
+
+ if d.cfg.Region == "" {
+ return errors.New("region must be set for MSK service discovery")
+ }
+
+ // Build the HTTP client from the provided HTTPClientConfig.
+ client, err := config.NewClientFromConfig(d.cfg.HTTPClientConfig, "msk_sd")
+ if err != nil {
+ return err
+ }
+
+ // Build the AWS config with the provided region.
+ var configOptions []func(*awsConfig.LoadOptions) error
+ configOptions = append(configOptions, awsConfig.WithRegion(d.cfg.Region))
+ configOptions = append(configOptions, awsConfig.WithHTTPClient(client))
+
+ // Only set static credentials if both access key and secret key are provided
+ // Otherwise, let AWS SDK use its default credential chain
+ if d.cfg.AccessKey != "" && d.cfg.SecretKey != "" {
+ credProvider := credentials.NewStaticCredentialsProvider(d.cfg.AccessKey, string(d.cfg.SecretKey), "")
+ configOptions = append(configOptions, awsConfig.WithCredentialsProvider(credProvider))
+ }
+
+ if d.cfg.Profile != "" {
+ configOptions = append(configOptions, awsConfig.WithSharedConfigProfile(d.cfg.Profile))
+ }
+
+ cfg, err := awsConfig.LoadDefaultConfig(ctx, configOptions...)
+ if err != nil {
+ d.logger.Error("Failed to create AWS config", "error", err)
+ return fmt.Errorf("could not create aws config: %w", err)
+ }
+
+ // If the role ARN is set, assume the role to get credentials and set the credentials provider in the config.
+ if d.cfg.RoleARN != "" {
+ assumeProvider := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), d.cfg.RoleARN)
+ cfg.Credentials = aws.NewCredentialsCache(assumeProvider)
+ }
+
+ d.msk = kafka.NewFromConfig(cfg, func(options *kafka.Options) {
+ if d.cfg.Endpoint != "" {
+ options.BaseEndpoint = &d.cfg.Endpoint
+ }
+ options.HTTPClient = client
+ })
+
+ // Test credentials by making a simple API call
+ testCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel()
+
+ _, err = d.msk.ListClustersV2(testCtx, &kafka.ListClustersV2Input{})
+ if err != nil {
+ d.logger.Error("Failed to test MSK credentials", "error", err)
+ return fmt.Errorf("MSK credential test failed: %w", err)
+ }
+
+ return nil
+}
+
+func (d *MSKDiscovery) describeClusters(ctx context.Context, clusterARNs []string) ([]types.Cluster, error) {
+ var (
+ clusters []types.Cluster
+ wg sync.WaitGroup
+ mu sync.Mutex
+ errs []error
+ )
+ for _, clusterARN := range clusterARNs {
+ wg.Add(1)
+ go func(clusterARN string) {
+ defer wg.Done()
+ cluster, err := d.msk.DescribeClusterV2(ctx, &kafka.DescribeClusterV2Input{
+ ClusterArn: aws.String(clusterARN),
+ })
+ if err != nil {
+ mu.Lock()
+ errs = append(errs, fmt.Errorf("could not describe cluster %v: %w", clusterARN, err))
+ mu.Unlock()
+ return
+ }
+ mu.Lock()
+ clusters = append(clusters, *cluster.ClusterInfo)
+ mu.Unlock()
+ }(clusterARN)
+ }
+ wg.Wait()
+ if len(errs) > 0 {
+ return nil, fmt.Errorf("errors occurred while describing clusters: %v", errs)
+ }
+
+ return clusters, nil
+}
+
+func (d *MSKDiscovery) listClusters(ctx context.Context) ([]types.Cluster, error) {
+ var (
+ clusters []types.Cluster
+ nextToken *string
+ )
+ for {
+ listClustersInput := kafka.ListClustersV2Input{
+ ClusterTypeFilter: aws.String("PROVISIONED"),
+ MaxResults: aws.Int32(100),
+ NextToken: nextToken,
+ }
+
+ resp, err := d.msk.ListClustersV2(ctx, &listClustersInput)
+ if err != nil {
+ return nil, fmt.Errorf("could not list clusters: %w", err)
+ }
+
+ clusters = append(clusters, resp.ClusterInfoList...)
+ if resp.NextToken == nil {
+ break
+ }
+ nextToken = resp.NextToken
+ }
+
+ return clusters, nil
+}
+
+func (d *MSKDiscovery) listNodes(ctx context.Context, clusterARN string) ([]types.NodeInfo, error) {
+ var (
+ nodes []types.NodeInfo
+ nextToken *string
+ )
+ for {
+ resp, err := d.msk.ListNodes(ctx, &kafka.ListNodesInput{
+ ClusterArn: aws.String(clusterARN),
+ MaxResults: aws.Int32(100),
+ NextToken: nextToken,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not list nodes for cluster %v: %w", clusterARN, err)
+ }
+
+ nodes = append(nodes, resp.NodeInfoList...)
+ if resp.NextToken == nil {
+ break
+ }
+ nextToken = resp.NextToken
+ }
+
+ return nodes, nil
+}
+
+func (d *MSKDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+ err := d.initMskClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ tg := &targetgroup.Group{
+ Source: d.cfg.Region,
+ }
+
+ var clusters []types.Cluster
+ if len(d.cfg.Clusters) > 0 {
+ clusters, err = d.describeClusters(ctx, d.cfg.Clusters)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ clusters, err = d.listClusters(ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var (
+ targetsMu sync.Mutex
+ wg sync.WaitGroup
+ )
+ for _, cluster := range clusters {
+ wg.Add(1)
+ go func(cluster types.Cluster) {
+ defer wg.Done()
+
+ nodes, err := d.listNodes(ctx, aws.ToString(cluster.ClusterArn))
+ if err != nil {
+ d.logger.Error("Failed to list nodes", "cluster", aws.ToString(cluster.ClusterName), "error", err)
+ return
+ }
+
+ for _, node := range nodes {
+ labels := model.LabelSet{
+ mskLabelClusterName: model.LabelValue(aws.ToString(cluster.ClusterName)),
+ mskLabelClusterARN: model.LabelValue(aws.ToString(cluster.ClusterArn)),
+ mskLabelClusterState: model.LabelValue(string(cluster.State)),
+ mskLabelClusterType: model.LabelValue(string(cluster.ClusterType)),
+ mskLabelClusterVersion: model.LabelValue(aws.ToString(cluster.CurrentVersion)),
+ mskLabelNodeARN: model.LabelValue(aws.ToString(node.NodeARN)),
+ mskLabelNodeAddedTime: model.LabelValue(aws.ToString(node.AddedToClusterTime)),
+ mskLabelNodeInstanceType: model.LabelValue(aws.ToString(node.InstanceType)),
+ mskLabelClusterJmxExporterEnabled: model.LabelValue(strconv.FormatBool(*cluster.Provisioned.OpenMonitoring.Prometheus.JmxExporter.EnabledInBroker)),
+ mskLabelClusterConfigurationARN: model.LabelValue(aws.ToString(cluster.Provisioned.CurrentBrokerSoftwareInfo.ConfigurationArn)),
+ mskLabelClusterConfigurationRevision: model.LabelValue(strconv.FormatInt(*cluster.Provisioned.CurrentBrokerSoftwareInfo.ConfigurationRevision, 10)),
+ mskLabelClusterKafkaVersion: model.LabelValue(aws.ToString(cluster.Provisioned.CurrentBrokerSoftwareInfo.KafkaVersion)),
+ }
+
+ for key, value := range cluster.Tags {
+ labels[model.LabelName(mskLabelClusterTags+strutil.SanitizeLabelName(key))] = model.LabelValue(value)
+ }
+
+ switch nodeType(node) {
+ case NodeTypeBroker:
+ labels[mskLabelNodeType] = model.LabelValue(NodeTypeBroker)
+ labels[mskLabelNodeAttachedENI] = model.LabelValue(aws.ToString(node.BrokerNodeInfo.AttachedENIId))
+ labels[mskLabelBrokerID] = model.LabelValue(fmt.Sprintf("%.0f", aws.ToFloat64(node.BrokerNodeInfo.BrokerId)))
+ labels[mskLabelBrokerClientSubnet] = model.LabelValue(aws.ToString(node.BrokerNodeInfo.ClientSubnet))
+ labels[mskLabelBrokerClientVPCIP] = model.LabelValue(aws.ToString(node.BrokerNodeInfo.ClientVpcIpAddress))
+ labels[mskLabelBrokerNodeExporterEnabled] = model.LabelValue(strconv.FormatBool(*cluster.Provisioned.OpenMonitoring.Prometheus.NodeExporter.EnabledInBroker))
+
+ for idx, endpoint := range node.BrokerNodeInfo.Endpoints {
+ endpointLabels := labels.Clone()
+ endpointLabels[mskLabelBrokerEndpointIndex] = model.LabelValue(strconv.Itoa(idx))
+ endpointLabels[model.AddressLabel] = model.LabelValue(net.JoinHostPort(endpoint, strconv.Itoa(d.cfg.Port)))
+
+ targetsMu.Lock()
+ tg.Targets = append(tg.Targets, endpointLabels)
+ targetsMu.Unlock()
+ }
+
+ case NodeTypeController:
+ labels[mskLabelNodeType] = model.LabelValue(NodeTypeController)
+
+ for idx, endpoint := range node.ControllerNodeInfo.Endpoints {
+ endpointLabels := labels.Clone()
+ endpointLabels[mskLabelControllerEndpointIndex] = model.LabelValue(strconv.Itoa(idx))
+ endpointLabels[model.AddressLabel] = model.LabelValue(net.JoinHostPort(endpoint, strconv.Itoa(d.cfg.Port)))
+
+ targetsMu.Lock()
+ tg.Targets = append(tg.Targets, endpointLabels)
+ targetsMu.Unlock()
+ }
+ default:
+ continue
+ }
+ }
+ }(cluster)
+ }
+ wg.Wait()
+
+ return []*targetgroup.Group{tg}, nil
+}
+
+func nodeType(node types.NodeInfo) NodeType {
+ if node.BrokerNodeInfo != nil {
+ return NodeTypeBroker
+ } else if node.ControllerNodeInfo != nil {
+ return NodeTypeController
+ }
+ return ""
+}
diff --git a/discovery/aws/msk_test.go b/discovery/aws/msk_test.go
new file mode 100644
index 0000000000..31744221ef
--- /dev/null
+++ b/discovery/aws/msk_test.go
@@ -0,0 +1,1057 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aws
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "testing"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/kafka"
+ "github.com/aws/aws-sdk-go-v2/service/kafka/types"
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+)
+
+// Struct for test data.
+type mskDataStore struct {
+ region string
+ clusters []types.Cluster
+ nodes map[string][]types.NodeInfo // keyed by cluster ARN
+}
+
+func TestMSKDiscoveryListClusters(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ mskData *mskDataStore
+ expected []types.Cluster
+ }{
+ {
+ name: "MultipleClusters",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("test-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ {
+ ClusterName: strptr("prod-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/prod-cluster/def-456"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ },
+ },
+ expected: []types.Cluster{
+ {
+ ClusterName: strptr("test-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ {
+ ClusterName: strptr("prod-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/prod-cluster/def-456"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ },
+ },
+ {
+ name: "SingleCluster",
+ mskData: &mskDataStore{
+ region: "us-east-1",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("single-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/single-cluster/xyz-789"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ },
+ },
+ expected: []types.Cluster{
+ {
+ ClusterName: strptr("single-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/single-cluster/xyz-789"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ },
+ },
+ {
+ name: "NoClusters",
+ mskData: &mskDataStore{
+ region: "us-east-1",
+ clusters: []types.Cluster{},
+ },
+ expected: nil,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ client := newMockMSKClient(tt.mskData)
+
+ d := &MSKDiscovery{
+ msk: client,
+ cfg: &MSKSDConfig{
+ Region: tt.mskData.region,
+ },
+ }
+
+ clusters, err := d.listClusters(ctx)
+ require.NoError(t, err)
+ require.Equal(t, tt.expected, clusters)
+ })
+ }
+}
+
+func TestMSKDiscoveryDescribeClusters(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ mskData *mskDataStore
+ clusterARNs []string
+ expected []types.Cluster
+ }{
+ {
+ name: "SingleCluster",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("test-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ CurrentVersion: strptr("1.2.3"),
+ Tags: map[string]string{
+ "Environment": "production",
+ "Team": "platform",
+ },
+ },
+ },
+ },
+ clusterARNs: []string{"arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"},
+ expected: []types.Cluster{
+ {
+ ClusterName: strptr("test-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ CurrentVersion: strptr("1.2.3"),
+ Tags: map[string]string{
+ "Environment": "production",
+ "Team": "platform",
+ },
+ },
+ },
+ },
+ {
+ name: "MultipleClusters",
+ mskData: &mskDataStore{
+ region: "us-east-1",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("cluster-1"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/cluster-1/xyz-789"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ {
+ ClusterName: strptr("cluster-2"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/cluster-2/def-456"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ Tags: map[string]string{
+ "Stage": "prod",
+ },
+ },
+ },
+ },
+ clusterARNs: []string{
+ "arn:aws:kafka:us-east-1:123456789012:cluster/cluster-1/xyz-789",
+ "arn:aws:kafka:us-east-1:123456789012:cluster/cluster-2/def-456",
+ },
+ expected: []types.Cluster{
+ {
+ ClusterName: strptr("cluster-1"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/cluster-1/xyz-789"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ {
+ ClusterName: strptr("cluster-2"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/cluster-2/def-456"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ Tags: map[string]string{
+ "Stage": "prod",
+ },
+ },
+ },
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ client := newMockMSKClient(tt.mskData)
+
+ d := &MSKDiscovery{
+ msk: client,
+ cfg: &MSKSDConfig{
+ Region: tt.mskData.region,
+ },
+ }
+
+ clusters, err := d.describeClusters(ctx, tt.clusterARNs)
+ require.NoError(t, err)
+
+ // Sort clusters by ARN to handle non-deterministic ordering from goroutines
+ sort.Slice(clusters, func(i, j int) bool {
+ return aws.ToString(clusters[i].ClusterArn) < aws.ToString(clusters[j].ClusterArn)
+ })
+ sort.Slice(tt.expected, func(i, j int) bool {
+ return aws.ToString(tt.expected[i].ClusterArn) < aws.ToString(tt.expected[j].ClusterArn)
+ })
+
+ require.Equal(t, tt.expected, clusters)
+ })
+ }
+}
+
+func TestMSKDiscoveryListNodes(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ mskData *mskDataStore
+ clusterARN string
+ expected []types.NodeInfo
+ }{
+ {
+ name: "ClusterWithBrokers",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ ClientSubnet: strptr("subnet-12345"),
+ ClientVpcIpAddress: strptr("10.0.1.100"),
+ Endpoints: []string{"b-1.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-12345"),
+ },
+ },
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(2),
+ ClientSubnet: strptr("subnet-67890"),
+ ClientVpcIpAddress: strptr("10.0.1.101"),
+ Endpoints: []string{"b-2.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-67890"),
+ },
+ },
+ },
+ },
+ },
+ clusterARN: "arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123",
+ expected: []types.NodeInfo{
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ ClientSubnet: strptr("subnet-12345"),
+ ClientVpcIpAddress: strptr("10.0.1.100"),
+ Endpoints: []string{"b-1.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-12345"),
+ },
+ },
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(2),
+ ClientSubnet: strptr("subnet-67890"),
+ ClientVpcIpAddress: strptr("10.0.1.101"),
+ Endpoints: []string{"b-2.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-67890"),
+ },
+ },
+ },
+ },
+ {
+ name: "ClusterWithNoNodes",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/empty-cluster/xyz-789": {},
+ },
+ },
+ clusterARN: "arn:aws:kafka:us-west-2:123456789012:cluster/empty-cluster/xyz-789",
+ expected: nil,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ client := newMockMSKClient(tt.mskData)
+
+ d := &MSKDiscovery{
+ msk: client,
+ cfg: &MSKSDConfig{
+ Region: tt.mskData.region,
+ },
+ }
+
+ nodes, err := d.listNodes(ctx, tt.clusterARN)
+ require.NoError(t, err)
+ require.Equal(t, tt.expected, nodes)
+ })
+ }
+}
+
+func TestMSKDiscoveryRefresh(t *testing.T) {
+ ctx := context.Background()
+
+ tests := []struct {
+ name string
+ mskData *mskDataStore
+ config *MSKSDConfig
+ expected []*targetgroup.Group
+ }{
+ {
+ name: "ClusterWithBrokersUsingClustersConfig",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("test-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ CurrentVersion: strptr("1.2.3"),
+ Tags: map[string]string{
+ "Environment": "production",
+ "Team": "platform",
+ },
+ Provisioned: &types.Provisioned{
+ CurrentBrokerSoftwareInfo: &types.BrokerSoftwareInfo{
+ ConfigurationArn: strptr("arn:aws:kafka:us-west-2:123456789012:configuration/my-config/abc-123"),
+ ConfigurationRevision: aws.Int64(1),
+ KafkaVersion: strptr("2.8.1"),
+ },
+ OpenMonitoring: &types.OpenMonitoringInfo{
+ Prometheus: &types.PrometheusInfo{
+ JmxExporter: &types.JmxExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ NodeExporter: &types.NodeExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ },
+ },
+ },
+ },
+ },
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ ClientSubnet: strptr("subnet-12345"),
+ ClientVpcIpAddress: strptr("10.0.1.100"),
+ Endpoints: []string{"b-1.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-12345"),
+ },
+ },
+ },
+ },
+ },
+ config: &MSKSDConfig{
+ Region: "us-west-2",
+ Port: 80,
+ Clusters: []string{"arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"},
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-west-2",
+ Targets: []model.LabelSet{
+ {
+ model.AddressLabel: model.LabelValue("b-1.test-cluster.abc123.kafka.us-west-2.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("test-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("1.2.3"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:configuration/my-config/abc-123"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("2.8.1"),
+ "__meta_msk_cluster_tag_Environment": model.LabelValue("production"),
+ "__meta_msk_cluster_tag_Team": model.LabelValue("platform"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-01-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-12345"),
+ "__meta_msk_broker_id": model.LabelValue("1"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-12345"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.1.100"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("0"),
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "NoClustersWithEmptyClustersConfig",
+ mskData: &mskDataStore{
+ region: "us-east-1",
+ clusters: []types.Cluster{},
+ },
+ config: &MSKSDConfig{
+ Region: "us-east-1",
+ Port: 80,
+ Clusters: []string{}, // Empty clusters list uses listClusters
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-east-1",
+ },
+ },
+ },
+ {
+ name: "ClusterWithBrokersUsingListClusters",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("auto-discovered-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/auto-discovered-cluster/xyz-123"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ CurrentVersion: strptr("1.0.0"),
+ Provisioned: &types.Provisioned{
+ CurrentBrokerSoftwareInfo: &types.BrokerSoftwareInfo{
+ ConfigurationArn: strptr("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ ConfigurationRevision: aws.Int64(1),
+ KafkaVersion: strptr("3.3.1"),
+ },
+ OpenMonitoring: &types.OpenMonitoringInfo{
+ Prometheus: &types.PrometheusInfo{
+ JmxExporter: &types.JmxExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ NodeExporter: &types.NodeExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ },
+ },
+ },
+ },
+ },
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/auto-discovered-cluster/xyz-123": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-auto"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ ClientSubnet: strptr("subnet-auto"),
+ ClientVpcIpAddress: strptr("10.0.1.200"),
+ Endpoints: []string{"b-auto.cluster.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-auto"),
+ },
+ },
+ },
+ },
+ },
+ config: &MSKSDConfig{
+ Region: "us-west-2",
+ Port: 80,
+ Clusters: nil, // nil clusters list uses listClusters (backward compatibility)
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-west-2",
+ Targets: []model.LabelSet{
+ {
+ model.AddressLabel: model.LabelValue("b-auto.cluster.kafka.us-west-2.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("auto-discovered-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:cluster/auto-discovered-cluster/xyz-123"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("1.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.3.1"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:node/broker-auto"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-01-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-auto"),
+ "__meta_msk_broker_id": model.LabelValue("1"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-auto"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.1.200"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("0"),
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "ClusterWithBrokersAndControllersUsingClustersConfig",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("kraft-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ CurrentVersion: strptr("1.0.0"),
+ Tags: map[string]string{
+ "Type": "kraft",
+ },
+ Provisioned: &types.Provisioned{
+ CurrentBrokerSoftwareInfo: &types.BrokerSoftwareInfo{
+ ConfigurationArn: strptr("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ ConfigurationRevision: aws.Int64(2),
+ KafkaVersion: strptr("3.3.1"),
+ },
+ OpenMonitoring: &types.OpenMonitoringInfo{
+ Prometheus: &types.PrometheusInfo{
+ JmxExporter: &types.JmxExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ NodeExporter: &types.NodeExporterInfo{
+ EnabledInBroker: aws.Bool(false),
+ },
+ },
+ },
+ },
+ },
+ },
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ AddedToClusterTime: strptr("2023-06-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ ClientSubnet: strptr("subnet-abc123"),
+ ClientVpcIpAddress: strptr("10.0.2.100"),
+ Endpoints: []string{"b-1.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-broker-1"),
+ },
+ },
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
+ AddedToClusterTime: strptr("2023-06-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(2),
+ ClientSubnet: strptr("subnet-abc124"),
+ ClientVpcIpAddress: strptr("10.0.2.101"),
+ Endpoints: []string{"b-2.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-broker-2"),
+ },
+ },
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/controller-1"),
+ AddedToClusterTime: strptr("2023-06-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ ControllerNodeInfo: &types.ControllerNodeInfo{
+ Endpoints: []string{"c-1.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com"},
+ },
+ },
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/controller-2"),
+ AddedToClusterTime: strptr("2023-06-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ ControllerNodeInfo: &types.ControllerNodeInfo{
+ Endpoints: []string{"c-2.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com"},
+ },
+ },
+ },
+ },
+ },
+ config: &MSKSDConfig{
+ Region: "us-west-2",
+ Port: 80,
+ Clusters: []string{"arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"},
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-west-2",
+ Targets: []model.LabelSet{
+ {
+ model.AddressLabel: model.LabelValue("b-1.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("kraft-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("1.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("2"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.3.1"),
+ "__meta_msk_cluster_tag_Type": model.LabelValue("kraft"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-06-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-broker-1"),
+ "__meta_msk_broker_id": model.LabelValue("1"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-abc123"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.2.100"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("false"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("0"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("b-2.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("kraft-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("1.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("2"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.3.1"),
+ "__meta_msk_cluster_tag_Type": model.LabelValue("kraft"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-06-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-broker-2"),
+ "__meta_msk_broker_id": model.LabelValue("2"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-abc124"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.2.101"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("false"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("0"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("c-1.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("kraft-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("1.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("2"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.3.1"),
+ "__meta_msk_cluster_tag_Type": model.LabelValue("kraft"),
+ "__meta_msk_node_type": model.LabelValue("CONTROLLER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:node/controller-1"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-06-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_controller_endpoint_index": model.LabelValue("0"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("c-2.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("kraft-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("1.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("2"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.3.1"),
+ "__meta_msk_cluster_tag_Type": model.LabelValue("kraft"),
+ "__meta_msk_node_type": model.LabelValue("CONTROLLER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:node/controller-2"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-06-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_controller_endpoint_index": model.LabelValue("0"),
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "NodesWithMultipleEndpointsUsingClustersConfig",
+ mskData: &mskDataStore{
+ region: "us-east-1",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("multi-endpoint-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ CurrentVersion: strptr("2.0.0"),
+ Provisioned: &types.Provisioned{
+ CurrentBrokerSoftwareInfo: &types.BrokerSoftwareInfo{
+ ConfigurationArn: strptr("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ ConfigurationRevision: aws.Int64(1),
+ KafkaVersion: strptr("3.4.0"),
+ },
+ OpenMonitoring: &types.OpenMonitoringInfo{
+ Prometheus: &types.PrometheusInfo{
+ JmxExporter: &types.JmxExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ NodeExporter: &types.NodeExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ },
+ },
+ },
+ },
+ },
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-east-1:123456789012:node/broker-multi"),
+ AddedToClusterTime: strptr("2023-08-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.xlarge"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(3),
+ ClientSubnet: strptr("subnet-multi-1"),
+ ClientVpcIpAddress: strptr("10.0.3.50"),
+ // Multiple endpoints for this broker
+ Endpoints: []string{"b-3-1.cluster.kafka.us-east-1.amazonaws.com", "b-3-2.cluster.kafka.us-east-1.amazonaws.com", "b-3-3.cluster.kafka.us-east-1.amazonaws.com"},
+ AttachedENIId: strptr("eni-multi-broker"),
+ },
+ },
+ {
+ NodeARN: strptr("arn:aws:kafka:us-east-1:123456789012:node/controller-multi"),
+ AddedToClusterTime: strptr("2023-08-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ ControllerNodeInfo: &types.ControllerNodeInfo{
+ // Multiple endpoints for this controller
+ Endpoints: []string{"c-1-1.cluster.kafka.us-east-1.amazonaws.com", "c-1-2.cluster.kafka.us-east-1.amazonaws.com", "c-1-3.cluster.kafka.us-east-1.amazonaws.com", "c-1-4.cluster.kafka.us-east-1.amazonaws.com"},
+ },
+ },
+ },
+ },
+ },
+ config: &MSKSDConfig{
+ Region: "us-east-1",
+ Port: 80,
+ Clusters: []string{"arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"},
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-east-1",
+ Targets: []model.LabelSet{
+ // Broker with 3 endpoints - creates 3 targets with different endpoint indices
+ {
+ model.AddressLabel: model.LabelValue("b-3-1.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/broker-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.xlarge"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-multi-broker"),
+ "__meta_msk_broker_id": model.LabelValue("3"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-multi-1"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.3.50"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("0"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("b-3-2.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/broker-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.xlarge"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-multi-broker"),
+ "__meta_msk_broker_id": model.LabelValue("3"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-multi-1"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.3.50"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("1"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("b-3-3.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/broker-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.xlarge"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-multi-broker"),
+ "__meta_msk_broker_id": model.LabelValue("3"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-multi-1"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.3.50"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("2"),
+ },
+ // Controller with 4 endpoints - creates 4 targets with different endpoint indices
+ {
+ model.AddressLabel: model.LabelValue("c-1-1.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("CONTROLLER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/controller-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_controller_endpoint_index": model.LabelValue("0"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("c-1-2.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("CONTROLLER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/controller-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_controller_endpoint_index": model.LabelValue("1"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("c-1-3.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("CONTROLLER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/controller-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_controller_endpoint_index": model.LabelValue("2"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("c-1-4.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("CONTROLLER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/controller-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_controller_endpoint_index": model.LabelValue("3"),
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ client := newMockMSKClient(tt.mskData)
+
+ config := tt.config
+ if config == nil {
+ // Default config for backward compatibility
+ config = &MSKSDConfig{
+ Region: tt.mskData.region,
+ Port: 80,
+ }
+ }
+
+ d := &MSKDiscovery{
+ msk: client,
+ cfg: config,
+ }
+
+ groups, err := d.refresh(ctx)
+ require.NoError(t, err)
+
+ // Sort targets within each group by address to handle non-deterministic ordering from goroutines
+ for _, group := range groups {
+ if len(group.Targets) > 0 {
+ sort.Slice(group.Targets, func(i, j int) bool {
+ return string(group.Targets[i][model.AddressLabel]) < string(group.Targets[j][model.AddressLabel])
+ })
+ }
+ }
+ for _, group := range tt.expected {
+ if len(group.Targets) > 0 {
+ sort.Slice(group.Targets, func(i, j int) bool {
+ return string(group.Targets[i][model.AddressLabel]) < string(group.Targets[j][model.AddressLabel])
+ })
+ }
+ }
+
+ require.Equal(t, tt.expected, groups)
+ })
+ }
+}
+
+func TestNodeType(t *testing.T) {
+ tests := []struct {
+ name string
+ node types.NodeInfo
+ expected NodeType
+ }{
+ {
+ name: "BrokerNode",
+ node: types.NodeInfo{
+ BrokerNodeInfo: &types.BrokerNodeInfo{},
+ },
+ expected: NodeTypeBroker,
+ },
+ {
+ name: "ControllerNode",
+ node: types.NodeInfo{
+ ControllerNodeInfo: &types.ControllerNodeInfo{},
+ },
+ expected: NodeTypeController,
+ },
+ {
+ name: "UnknownNode",
+ node: types.NodeInfo{},
+ expected: "",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := nodeType(tt.node)
+ require.Equal(t, tt.expected, result)
+ })
+ }
+}
+
+// MSK client mock.
+type mockMSKClient struct {
+ mskData mskDataStore
+}
+
+func newMockMSKClient(mskData *mskDataStore) *mockMSKClient {
+ return &mockMSKClient{
+ mskData: *mskData,
+ }
+}
+
+func (m *mockMSKClient) DescribeClusterV2(_ context.Context, input *kafka.DescribeClusterV2Input, _ ...func(*kafka.Options)) (*kafka.DescribeClusterV2Output, error) {
+ inputARN := aws.ToString(input.ClusterArn)
+ for i := range m.mskData.clusters {
+ cluster := &m.mskData.clusters[i]
+ if aws.ToString(cluster.ClusterArn) == inputARN {
+ return &kafka.DescribeClusterV2Output{
+ ClusterInfo: cluster,
+ }, nil
+ }
+ }
+
+ return nil, fmt.Errorf("cluster not found: %s", inputARN)
+}
+
+func (m *mockMSKClient) ListClustersV2(_ context.Context, input *kafka.ListClustersV2Input, _ ...func(*kafka.Options)) (*kafka.ListClustersV2Output, error) {
+ var clusters []types.Cluster
+
+ for _, cluster := range m.mskData.clusters {
+ // Apply cluster name filter if specified
+ if input.ClusterNameFilter != nil && *input.ClusterNameFilter != "" {
+ if cluster.ClusterName != nil && *cluster.ClusterName != *input.ClusterNameFilter {
+ continue
+ }
+ }
+
+ // Apply cluster type filter if specified
+ if input.ClusterTypeFilter != nil && *input.ClusterTypeFilter != "" {
+ if string(cluster.ClusterType) != *input.ClusterTypeFilter {
+ continue
+ }
+ }
+
+ clusters = append(clusters, cluster)
+ }
+
+ return &kafka.ListClustersV2Output{
+ ClusterInfoList: clusters,
+ }, nil
+}
+
+func (m *mockMSKClient) ListNodes(_ context.Context, input *kafka.ListNodesInput, _ ...func(*kafka.Options)) (*kafka.ListNodesOutput, error) {
+ clusterARN := aws.ToString(input.ClusterArn)
+ nodes, exists := m.mskData.nodes[clusterARN]
+ if !exists {
+ return &kafka.ListNodesOutput{
+ NodeInfoList: nil,
+ }, nil
+ }
+
+ return &kafka.ListNodesOutput{
+ NodeInfoList: nodes,
+ }, nil
+}
diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md
index edaf6adbb2..a3193c74c7 100644
--- a/docs/configuration/configuration.md
+++ b/docs/configuration/configuration.md
@@ -984,11 +984,56 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_ecs_tag_task_`: each task tag value, keyed by tag name
* `__meta_ecs_tag_ec2_`: each EC2 instance tag value, keyed by tag name (EC2 launch type only)
+#### `msk`
+
+The `msk` role discovers targets from AWS MSK (Managed Streaming for Apache Kafka) provisioned clusters.
+
+**Important**: This service discovery only works with **provisioned clusters**. Serverless clusters are not supported as they do not expose individual broker nodes.
+
+Discovery includes:
+- **Broker nodes**: Kafka broker instances (supports both ZooKeeper-based and KRaft-based clusters)
+- **KRaft Controller nodes**: Controller instances (KRaft-based clusters only)
+
+Note: ZooKeeper nodes are not discoverable via the MSK API. For monitoring, MSK provides:
+- **JMX Exporter**: Available on both broker and KRaft controller nodes (when enabled)
+- **Node Exporter**: Available on broker nodes only (when enabled)
+
+The IAM credentials used must have the following permissions to discover
+scrape targets:
+
+- `kafka:DescribeClusterV2`
+- `kafka:ListClustersV2`
+- `kafka:ListNodes`
+
+The following meta labels are available on targets during [relabeling](#relabel_config):
+
+* `__meta_msk_cluster_name`: the name of the MSK cluster
+* `__meta_msk_cluster_arn`: the ARN of the MSK cluster
+* `__meta_msk_cluster_state`: the state of the MSK cluster (e.g., ACTIVE, CREATING, DELETING)
+* `__meta_msk_cluster_type`: the type of the MSK cluster (e.g., PROVISIONED, SERVERLESS)
+* `__meta_msk_cluster_version`: the current version of the MSK cluster
+* `__meta_msk_cluster_kafka_version`: the Kafka version running on the cluster
+* `__meta_msk_cluster_jmx_exporter_enabled`: whether JMX exporter is enabled on the cluster
+* `__meta_msk_cluster_configuration_arn`: the ARN of the MSK configuration
+* `__meta_msk_cluster_configuration_revision`: the revision of the MSK configuration
+* `__meta_msk_cluster_tag_`: each cluster tag value, keyed by tag name
+* `__meta_msk_node_type`: the type of the node (BROKER or CONTROLLER)
+* `__meta_msk_node_arn`: the ARN of the node
+* `__meta_msk_node_added_time`: the time the node was added to the cluster
+* `__meta_msk_node_instance_type`: the instance type of the node
+* `__meta_msk_node_attached_eni`: the ID of the attached ENI
+* `__meta_msk_broker_id`: the broker ID (broker nodes only)
+* `__meta_msk_broker_endpoint_index`: the index of the broker endpoint (broker nodes only)
+* `__meta_msk_broker_client_subnet`: the client subnet of the broker (broker nodes only)
+* `__meta_msk_broker_client_vpc_ip`: the VPC IP address of the broker (broker nodes only)
+* `__meta_msk_broker_node_exporter_enabled`: whether node exporter is enabled on brokers (broker nodes only)
+* `__meta_msk_controller_endpoint_index`: the index of the controller endpoint (controller nodes only)
+
See below for the configuration options for AWS discovery:
```yaml
# The AWS role to use for service discovery.
-# Must be one of: ec2, lightsail, or ecs.
+# Must be one of: ec2, lightsail, ecs, or msk.
role:
# The AWS region. If blank, the region from the instance metadata is used.
@@ -1024,7 +1069,7 @@ filters:
[ - name:
values: , [...] ]
-# List of ECS cluster ARNs to discover (ecs role only). If empty, all clusters in the region are discovered.
+# List of ECS or MSK cluster ARNs (ecs and msk roles only) to discover. If empty, all clusters in the region are discovered.
# This can significantly improve performance when you only need to monitor specific clusters.
[ clusters: [, ...] ]
diff --git a/go.mod b/go.mod
index 7ceb746720..668029856e 100644
--- a/go.mod
+++ b/go.mod
@@ -11,11 +11,12 @@ require (
github.com/KimMachineGun/automemlimit v0.7.5
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b
- github.com/aws/aws-sdk-go-v2 v1.41.0
+ github.com/aws/aws-sdk-go-v2 v1.41.1
github.com/aws/aws-sdk-go-v2/config v1.32.6
github.com/aws/aws-sdk-go-v2/credentials v1.19.6
github.com/aws/aws-sdk-go-v2/service/ec2 v1.279.0
github.com/aws/aws-sdk-go-v2/service/ecs v1.70.0
+ github.com/aws/aws-sdk-go-v2/service/kafka v1.46.7
github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.10
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5
github.com/aws/smithy-go v1.24.0
@@ -137,8 +138,8 @@ require (
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect
diff --git a/go.sum b/go.sum
index 280724445a..9d2022c63d 100644
--- a/go.sum
+++ b/go.sum
@@ -47,18 +47,18 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4=
-github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
+github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
+github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
github.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8=
github.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI=
github.com/aws/aws-sdk-go-v2/credentials v1.19.6 h1:F9vWao2TwjV2MyiyVS+duza0NIRtAslgLUM0vTA1ZaE=
github.com/aws/aws-sdk-go-v2/credentials v1.19.6/go.mod h1:SgHzKjEVsdQr6Opor0ihgWtkWdfRAIwxYzSJ8O85VHY=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.279.0 h1:o7eJKe6VYAnqERPlLAvDW5VKXV6eTKv1oxTpMoDP378=
@@ -69,6 +69,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEd
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM=
+github.com/aws/aws-sdk-go-v2/service/kafka v1.46.7 h1:0jDb9b505gbCmtjH1RT7kx8hDbVDzOhnTeZm7dzskpQ=
+github.com/aws/aws-sdk-go-v2/service/kafka v1.46.7/go.mod h1:tWnHS64fg5ydLHivFlCAtEh/1iMNzr56QsH3F+UTwD4=
github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.10 h1:MQuZZ6Tq1qQabPlkVxrCMdyVl70Ogl4AERZKo+y9Wzo=
github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.10/go.mod h1:U5C3JME1ibKESmpzBAqlRpTYZfVbTqrb5ICJm+sVVd8=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ=
From c8e7f4e2a6ba1033ce6c3c73814908a3d824842e Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Thu, 5 Feb 2026 16:11:35 +0000
Subject: [PATCH 118/165] tests: Unify TestDiskFillingUpAfterDisablingOOO and
avoid hiding errors (#18017)
* tests: Unify TestDiskFillingUpAfterDisablingOOO and avoid hiding errors
Signed-off-by: bwplotka
* addressed comments
Signed-off-by: bwplotka
---------
Signed-off-by: bwplotka
---
storage/interface_append.go | 2 +
tsdb/db_append_v2_test.go | 91 -------------------------------------
tsdb/db_test.go | 50 +++++++++++++++-----
tsdb/head_append.go | 3 ++
4 files changed, 43 insertions(+), 103 deletions(-)
diff --git a/storage/interface_append.go b/storage/interface_append.go
index beb17f9e16..3753544eb0 100644
--- a/storage/interface_append.go
+++ b/storage/interface_append.go
@@ -206,6 +206,8 @@ type AppenderTransaction interface {
// This is to support migration to AppenderV2.
// TODO(bwplotka): Remove once migration to AppenderV2 is fully complete.
type LimitedAppenderV1 interface {
+ AppenderTransaction
+
Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error)
AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error)
}
diff --git a/tsdb/db_append_v2_test.go b/tsdb/db_append_v2_test.go
index 16134e8c93..e6bcfb696d 100644
--- a/tsdb/db_append_v2_test.go
+++ b/tsdb/db_append_v2_test.go
@@ -7049,97 +7049,6 @@ func testPanicOnApplyConfigAppendV2(t *testing.T, scenario sampleTypeScenario) {
require.NoError(t, err)
}
-func TestDiskFillingUpAfterDisablingOOO_AppendV2(t *testing.T) {
- t.Parallel()
- for name, scenario := range sampleTypeScenarios {
- t.Run(name, func(t *testing.T) {
- testDiskFillingUpAfterDisablingOOOAppenderV2(t, scenario)
- })
- }
-}
-
-func testDiskFillingUpAfterDisablingOOOAppenderV2(t *testing.T, scenario sampleTypeScenario) {
- t.Parallel()
- ctx := context.Background()
-
- opts := DefaultOptions()
- opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
-
- db := newTestDB(t, withOpts(opts))
- db.DisableCompactions()
-
- series1 := labels.FromStrings("foo", "bar1")
- var allSamples []chunks.Sample
- addSamples := func(fromMins, toMins int64) {
- app := db.AppenderV2(context.Background())
- for m := fromMins; m <= toMins; m++ {
- ts := m * time.Minute.Milliseconds()
- _, s, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series1, ts, ts)
- require.NoError(t, err)
- allSamples = append(allSamples, s)
- }
- require.NoError(t, app.Commit())
- }
-
- // In-order samples.
- addSamples(290, 300)
- // OOO samples.
- addSamples(250, 299)
-
- // Restart DB with OOO disabled.
- require.NoError(t, db.Close())
-
- opts.OutOfOrderTimeWindow = 0
- db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
- db.DisableCompactions()
-
- ms := db.head.series.getByHash(series1.Hash(), series1)
- require.NotEmpty(t, ms.ooo.oooMmappedChunks, "OOO mmap chunk was not replayed")
-
- checkMmapFileContents := func(contains, notContains []string) {
- mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
- files, err := os.ReadDir(mmapDir)
- require.NoError(t, err)
-
- fnames := make([]string, 0, len(files))
- for _, f := range files {
- fnames = append(fnames, f.Name())
- }
-
- for _, f := range contains {
- require.Contains(t, fnames, f)
- }
- for _, f := range notContains {
- require.NotContains(t, fnames, f)
- }
- }
-
- // Add in-order samples until ready for compaction..
- addSamples(301, 500)
-
- // Check that m-map files gets deleted properly after compactions.
-
- db.head.mmapHeadChunks()
- checkMmapFileContents([]string{"000001", "000002"}, nil)
- require.NoError(t, db.Compact(ctx))
- checkMmapFileContents([]string{"000002"}, []string{"000001"})
- require.Nil(t, ms.ooo, "OOO mmap chunk was not compacted")
-
- addSamples(501, 650)
- db.head.mmapHeadChunks()
- checkMmapFileContents([]string{"000002", "000003"}, []string{"000001"})
- require.NoError(t, db.Compact(ctx))
- checkMmapFileContents(nil, []string{"000001", "000002", "000003"})
-
- // Verify that WBL is empty.
- files, err := os.ReadDir(db.head.wbl.Dir())
- require.NoError(t, err)
- require.Len(t, files, 1) // Last empty file after compaction.
- finfo, err := files[0].Info()
- require.NoError(t, err)
- require.Equal(t, int64(0), finfo.Size())
-}
-
func TestHistogramAppendAndQuery_AppendV2(t *testing.T) {
t.Run("integer histograms", func(t *testing.T) {
testHistogramAppendAndQueryHelperAppendV2(t, false)
diff --git a/tsdb/db_test.go b/tsdb/db_test.go
index 403ce3636a..18e969f952 100644
--- a/tsdb/db_test.go
+++ b/tsdb/db_test.go
@@ -126,6 +126,7 @@ func newTestDB(t testing.TB, opts ...testDBOpt) (db *DB) {
db, err = open(o.dir, nil, nil, o.opts, o.rngs, nil)
}
require.NoError(t, err)
+
t.Cleanup(func() {
// Always close. DB is safe for close-after-close.
require.NoError(t, db.Close())
@@ -8304,16 +8305,22 @@ func testPanicOnApplyConfig(t *testing.T, scenario sampleTypeScenario) {
func TestDiskFillingUpAfterDisablingOOO(t *testing.T) {
t.Parallel()
- for name, scenario := range sampleTypeScenarios {
- t.Run(name, func(t *testing.T) {
- testDiskFillingUpAfterDisablingOOO(t, scenario)
- })
+ for _, appV2 := range []bool{true, false} {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(fmt.Sprintf("sample=%v/appV2=%v", name, appV2), func(t *testing.T) {
+ testDiskFillingUpAfterDisablingOOO(t, scenario, func(db *DB, ctx context.Context) storage.LimitedAppenderV1 {
+ if appV2 {
+ return storage.AppenderV2AsLimitedV1(db.AppenderV2(ctx))
+ }
+ return db.Appender(ctx)
+ })
+ })
+ }
}
}
-func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenario) {
+func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenario, appenderFn func(db *DB, ctx context.Context) storage.LimitedAppenderV1) {
t.Parallel()
- ctx := context.Background()
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
@@ -8321,10 +8328,14 @@ func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenari
db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
- series1 := labels.FromStrings("foo", "bar1")
- var allSamples []chunks.Sample
+ var (
+ ctx = t.Context()
+ series1 = labels.FromStrings("foo", "bar1")
+ allSamples []chunks.Sample
+ )
+
addSamples := func(fromMins, toMins int64) {
- app := db.Appender(context.Background())
+ app := appenderFn(db, ctx)
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, s, err := scenario.appendFunc(app, series1, ts, ts)
@@ -8367,21 +8378,36 @@ func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenari
}
}
- // Add in-order samples until ready for compaction..
+ // Add in-order samples until ready for compaction.
addSamples(301, 500)
// Check that m-map files gets deleted properly after compactions.
db.head.mmapHeadChunks()
checkMmapFileContents([]string{"000001", "000002"}, nil)
- require.NoError(t, db.Compact(ctx))
+
+ // NOTE: We are investigating flaky errors from this compaction on i386 architecture. Compaction panics due to chunk
+ // mapper fatal error. Recover here to understand the error cause. Leaving panic recovery to test causes deadlock
+ // as t.Cleanup tries to close DB with open locks.
+ // See https://github.com/prometheus/prometheus/issues/17941#issuecomment-3846381263
+ require.NotPanics(t, func() {
+ require.NoError(t, db.Compact(ctx))
+ })
+
checkMmapFileContents([]string{"000002"}, []string{"000001"})
require.Nil(t, ms.ooo, "OOO mmap chunk was not compacted")
addSamples(501, 650)
db.head.mmapHeadChunks()
checkMmapFileContents([]string{"000002", "000003"}, []string{"000001"})
- require.NoError(t, db.Compact(ctx))
+
+ // NOTE: We are investigating flaky errors from this compaction on i386 architecture. Compaction panics due to chunk
+ // mapper fatal error. Recover here to understand the error cause. Leaving panic recovery to test causes deadlock
+ // as t.Cleanup tries to close DB with open locks.
+ // See https://github.com/prometheus/prometheus/issues/17941#issuecomment-3846381263
+ require.NotPanics(t, func() {
+ require.NoError(t, db.Compact(ctx))
+ })
checkMmapFileContents(nil, []string{"000001", "000002", "000003"})
// Verify that WBL is empty.
diff --git a/tsdb/head_append.go b/tsdb/head_append.go
index 005d20b720..e6c9f2828a 100644
--- a/tsdb/head_append.go
+++ b/tsdb/head_append.go
@@ -2231,6 +2231,9 @@ func (s *memSeries) mmapChunks(chunkDiskMapper *chunks.ChunkDiskMapper) (count i
return count
}
+// TODO(bwplotka): Propagate errors correctly, even when they are async. Panicking here do occurs from time to time
+// and cause flaky tests with hidden root cause (unlocked mutexes when deferred closing).
+// We didn't have evidences of prod impact though, yet.
func handleChunkWriteError(err error) {
if err != nil && !errors.Is(err, chunks.ErrChunkDiskMapperClosed) {
panic(err)
From 4321a5573c4400caa7a767aac66fb7a95cf7c0f6 Mon Sep 17 00:00:00 2001
From: zenador
Date: Fri, 6 Feb 2026 01:35:48 +0800
Subject: [PATCH 119/165] Use custom annotation for histogram quantile
monotonicity info to provide more details (#15578)
Signed-off-by: Jeanette Tan
---
promql/functions.go | 10 +--
promql/quantile.go | 103 +++++++++++++++++----------
promql/quantile_test.go | 4 +-
util/annotations/annotations.go | 64 ++++++++++++++++-
util/annotations/annotations_test.go | 5 ++
5 files changed, 139 insertions(+), 47 deletions(-)
diff --git a/promql/functions.go b/promql/functions.go
index aad02370f8..3a6bc3348d 100644
--- a/promql/functions.go
+++ b/promql/functions.go
@@ -1666,13 +1666,13 @@ func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expression
// Deal with classic histograms that have already been filtered for conflicting native histograms.
for _, mb := range enh.signatureToMetricWithBuckets {
if len(mb.buckets) > 0 {
- res, forcedMonotonicity, _ := BucketQuantile(q, mb.buckets)
+ quantile, forcedMonotonicity, _, minBucket, maxBucket, maxDiff := BucketQuantile(q, mb.buckets)
if forcedMonotonicity {
+ metricName := ""
if enh.enableDelayedNameRemoval {
- annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(getMetricName(mb.metric), args[1].PositionRange()))
- } else {
- annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo("", args[1].PositionRange()))
+ metricName = getMetricName(mb.metric)
}
+ annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(metricName, args[1].PositionRange(), enh.Ts, minBucket, maxBucket, maxDiff))
}
if !enh.enableDelayedNameRemoval {
@@ -1681,7 +1681,7 @@ func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expression
enh.Out = append(enh.Out, Sample{
Metric: mb.metric,
- F: res,
+ F: quantile,
DropName: true,
})
}
diff --git a/promql/quantile.go b/promql/quantile.go
index c44eb89e68..f3657e1621 100644
--- a/promql/quantile.go
+++ b/promql/quantile.go
@@ -94,10 +94,7 @@ type metricWithBuckets struct {
//
// If q>1, +Inf is returned.
//
-// We also return a bool to indicate if monotonicity needed to be forced,
-// and another bool to indicate if small differences between buckets (that
-// are likely artifacts of floating point precision issues) have been
-// ignored.
+// We also return extra info, see doc for ensureMonotonicAndIgnoreSmallDeltas.
//
// Generically speaking, BucketQuantile is for calculating the
// histogram_quantile() of classic histograms. See also: HistogramQuantile
@@ -105,15 +102,21 @@ type metricWithBuckets struct {
//
// BucketQuantile is exported as a useful quantile function over a set of
// given buckets. It may be used by other PromQL engine implementations.
-func BucketQuantile(q float64, buckets Buckets) (float64, bool, bool) {
- if math.IsNaN(q) {
- return math.NaN(), false, false
- }
- if q < 0 {
- return math.Inf(-1), false, false
- }
- if q > 1 {
- return math.Inf(+1), false, false
+func BucketQuantile(q float64, buckets Buckets) (
+ quantile float64,
+ forcedMonotonic, fixedPrecision bool,
+ minBucket, maxBucket, maxDiff float64,
+) {
+ switch {
+ case math.IsNaN(q):
+ quantile = math.NaN()
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
+ case q < 0:
+ quantile = math.Inf(-1)
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
+ case q > 1:
+ quantile = math.Inf(+1)
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
}
slices.SortFunc(buckets, func(a, b Bucket) int {
// We don't expect the bucket boundary to be a NaN.
@@ -126,39 +129,44 @@ func BucketQuantile(q float64, buckets Buckets) (float64, bool, bool) {
return 0
})
if !math.IsInf(buckets[len(buckets)-1].UpperBound, +1) {
- return math.NaN(), false, false
+ quantile = math.NaN()
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
}
buckets = coalesceBuckets(buckets)
- forcedMonotonic, fixedPrecision := ensureMonotonicAndIgnoreSmallDeltas(buckets, smallDeltaTolerance)
+ forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff = ensureMonotonicAndIgnoreSmallDeltas(buckets, smallDeltaTolerance)
if len(buckets) < 2 {
- return math.NaN(), false, false
+ quantile = math.NaN()
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
}
observations := buckets[len(buckets)-1].Count
if observations == 0 {
- return math.NaN(), false, false
+ quantile = math.NaN()
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
}
rank := q * observations
b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].Count >= rank })
- if b == len(buckets)-1 {
- return buckets[len(buckets)-2].UpperBound, forcedMonotonic, fixedPrecision
+ switch {
+ case b == len(buckets)-1:
+ quantile = buckets[len(buckets)-2].UpperBound
+ case b == 0 && buckets[0].UpperBound <= 0:
+ quantile = buckets[0].UpperBound
+ default:
+ var (
+ bucketStart float64
+ bucketEnd = buckets[b].UpperBound
+ count = buckets[b].Count
+ )
+ if b > 0 {
+ bucketStart = buckets[b-1].UpperBound
+ count -= buckets[b-1].Count
+ rank -= buckets[b-1].Count
+ }
+ quantile = bucketStart + (bucketEnd-bucketStart)*(rank/count)
}
- if b == 0 && buckets[0].UpperBound <= 0 {
- return buckets[0].UpperBound, forcedMonotonic, fixedPrecision
- }
- var (
- bucketStart float64
- bucketEnd = buckets[b].UpperBound
- count = buckets[b].Count
- )
- if b > 0 {
- bucketStart = buckets[b-1].UpperBound
- count -= buckets[b-1].Count
- rank -= buckets[b-1].Count
- }
- return bucketStart + (bucketEnd-bucketStart)*(rank/count), forcedMonotonic, fixedPrecision
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
}
// HistogramQuantile calculates the quantile 'q' based on the given histogram.
@@ -655,10 +663,20 @@ func coalesceBuckets(buckets Buckets) Buckets {
// the histogram buckets, essentially removing any decreases in the count
// between successive buckets.
//
-// We return a bool to indicate if this monotonicity was forced or not, and
-// another bool to indicate if small deltas were ignored or not.
-func ensureMonotonicAndIgnoreSmallDeltas(buckets Buckets, tolerance float64) (bool, bool) {
- var forcedMonotonic, fixedPrecision bool
+// We return:
+// - a bool to indicate if monotonicity needed to be forced
+// - a bool to indicate if small differences between buckets (that are likely
+// artifacts of floating point precision issues) have been ignored.
+// - a float to indicate the minimum bucket upper bound where monotonicity was forced, if applicable
+// - a float to indicate the maximum bucket upper bound where monotonicity was forced, if applicable
+// - a float to indicate the maximum difference between the count of two consecutive buckets
+// where monotonicity was forced, if applicable
+func ensureMonotonicAndIgnoreSmallDeltas(buckets Buckets, tolerance float64) (
+ forcedMonotonic, fixedPrecision bool,
+ minBucket, maxBucket, maxDiff float64,
+) {
+ minBucket = math.Inf(+1)
+ maxBucket = math.Inf(-1)
prev := buckets[0].Count
for i := 1; i < len(buckets); i++ {
curr := buckets[i].Count // Assumed always positive.
@@ -679,11 +697,20 @@ func ensureMonotonicAndIgnoreSmallDeltas(buckets Buckets, tolerance float64) (bo
// Do not update the 'prev' value as we are ignoring the decrease.
buckets[i].Count = prev
forcedMonotonic = true
+ if buckets[i].UpperBound < minBucket {
+ minBucket = buckets[i].UpperBound
+ }
+ if buckets[i].UpperBound > maxBucket {
+ maxBucket = buckets[i].UpperBound
+ }
+ if diff := prev - curr; diff > maxDiff {
+ maxDiff = diff
+ }
continue
}
prev = curr
}
- return forcedMonotonic, fixedPrecision
+ return forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
}
// quantile calculates the given quantile of a vector of samples.
diff --git a/promql/quantile_test.go b/promql/quantile_test.go
index c97ff7c3c4..e2042dc3c4 100644
--- a/promql/quantile_test.go
+++ b/promql/quantile_test.go
@@ -308,10 +308,10 @@ func TestBucketQuantile_ForcedMonotonicity(t *testing.T) {
} {
t.Run(name, func(t *testing.T) {
for q, v := range tc.expectedValues {
- res, forced, fixed := BucketQuantile(q, tc.getInput())
+ quantile, forced, fixed, _, _, _ := BucketQuantile(q, tc.getInput())
require.Equal(t, tc.expectedForced, forced)
require.Equal(t, tc.expectedFixed, fixed)
- require.InEpsilon(t, v, res, eps)
+ require.InEpsilon(t, v, quantile, eps)
}
})
}
diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go
index 581e4987d1..550b9fcdc5 100644
--- a/util/annotations/annotations.go
+++ b/util/annotations/annotations.go
@@ -16,6 +16,7 @@ package annotations
import (
"errors"
"fmt"
+ "time"
"github.com/prometheus/common/model"
@@ -319,12 +320,71 @@ func NewPossibleNonCounterLabelInfo(metricName, typeLabel string, pos posrange.P
}
}
+type histogramQuantileForcedMonotonicityErr struct {
+ PositionRange posrange.PositionRange
+ Err error
+ Query string
+ minTs, maxTs int64
+ minBucket, maxBucket, maxDiff float64
+ count int
+}
+
+func (e *histogramQuantileForcedMonotonicityErr) Error() string {
+ if e.Query == "" {
+ return e.Err.Error()
+ }
+ startTime := time.Unix(e.minTs/1000, 0).UTC().Format(time.RFC3339)
+ endTime := time.Unix(e.maxTs/1000, 0).UTC().Format(time.RFC3339)
+ return fmt.Sprintf("%s, from buckets %g to %g, with a max diff of %.2g, over %d samples from %s to %s (%s)", e.Err, e.minBucket, e.maxBucket, e.maxDiff, e.count+1, startTime, endTime, e.PositionRange.StartPosInput(e.Query, 0))
+}
+
+func (e *histogramQuantileForcedMonotonicityErr) Unwrap() error {
+ return e.Err
+}
+
+func (e *histogramQuantileForcedMonotonicityErr) SetQuery(query string) {
+ e.Query = query
+}
+
+func (e *histogramQuantileForcedMonotonicityErr) Merge(other error) error {
+ o := &histogramQuantileForcedMonotonicityErr{}
+ ok := errors.As(other, &o)
+ if !ok {
+ return e
+ }
+ if e.Err.Error() != o.Err.Error() {
+ return e
+ }
+ if e.minTs < o.minTs {
+ o.minTs = e.minTs
+ }
+ if e.maxTs > o.maxTs {
+ o.maxTs = e.maxTs
+ }
+ if e.minBucket < o.minBucket {
+ o.minBucket = e.minBucket
+ }
+ if e.maxBucket > o.maxBucket {
+ o.maxBucket = e.maxBucket
+ }
+ if e.maxDiff > o.maxDiff {
+ o.maxDiff = e.maxDiff
+ }
+ o.count += e.count + 1
+ return o
+}
+
// NewHistogramQuantileForcedMonotonicityInfo is used when the input (classic histograms) to
// histogram_quantile needs to be forced to be monotonic.
-func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange) error {
- return &annoErr{
+func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange, ts int64, minBucket, maxBucket, maxDiff float64) error {
+ return &histogramQuantileForcedMonotonicityErr{
PositionRange: pos,
Err: maybeAddMetricName(HistogramQuantileForcedMonotonicityInfo, metricName),
+ minTs: ts,
+ maxTs: ts,
+ minBucket: minBucket,
+ maxBucket: maxBucket,
+ maxDiff: maxDiff,
}
}
diff --git a/util/annotations/annotations_test.go b/util/annotations/annotations_test.go
index e3caaae7eb..39fb8e62f4 100644
--- a/util/annotations/annotations_test.go
+++ b/util/annotations/annotations_test.go
@@ -39,6 +39,10 @@ func TestAnnotations_AsStrings(t *testing.T) {
annos.Add(NewHistogramIgnoredInAggregationInfo("sum", pos))
+ annos.Add(NewHistogramQuantileForcedMonotonicityInfo("series_1", pos, 1735084800000, 5, 50, 5.5))
+ annos.Add(NewHistogramQuantileForcedMonotonicityInfo("series_1", pos, 1703462400000, 10, 100, 10))
+ annos.Add(NewHistogramQuantileForcedMonotonicityInfo("series_1", pos, 1733011200000, 2.5, 75, 7.5))
+
warnings, infos := annos.AsStrings("lorem ipsum dolor sit amet", 0, 0)
require.ElementsMatch(t, warnings, []string{
"this is a non-annotation error",
@@ -48,6 +52,7 @@ func TestAnnotations_AsStrings(t *testing.T) {
})
require.ElementsMatch(t, infos, []string{
"PromQL info: ignored histogram in sum aggregation (1:4)",
+ `PromQL info: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name "series_1", from buckets 2.5 to 100, with a max diff of 10, over 3 samples from 2023-12-25T00:00:00Z to 2024-12-25T00:00:00Z (1:4)`,
})
}
From 7bbce150b4467740e16eada79fbe0fe20e60df81 Mon Sep 17 00:00:00 2001
From: Ganesh Vernekar
Date: Thu, 5 Feb 2026 14:20:46 -0800
Subject: [PATCH 120/165] Update npm dependencies for v3.10
Signed-off-by: Ganesh Vernekar
---
web/ui/mantine-ui/package.json | 64 +-
web/ui/module/codemirror-promql/package.json | 18 +-
web/ui/module/lezer-promql/package.json | 2 +-
web/ui/package-lock.json | 862 ++++++++-----------
web/ui/package.json | 8 +-
5 files changed, 428 insertions(+), 526 deletions(-)
diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json
index f38a2d965f..8f35318090 100644
--- a/web/ui/mantine-ui/package.json
+++ b/web/ui/mantine-ui/package.json
@@ -12,57 +12,57 @@
"test": "vitest"
},
"dependencies": {
- "@codemirror/autocomplete": "^6.19.1",
- "@codemirror/language": "^6.11.3",
- "@codemirror/lint": "^6.9.2",
- "@codemirror/state": "^6.5.2",
- "@codemirror/view": "^6.38.6",
- "@floating-ui/dom": "^1.7.4",
- "@lezer/common": "^1.3.0",
+ "@codemirror/autocomplete": "^6.20.0",
+ "@codemirror/language": "^6.12.1",
+ "@codemirror/lint": "^6.9.3",
+ "@codemirror/state": "^6.5.4",
+ "@codemirror/view": "^6.39.12",
+ "@floating-ui/dom": "^1.7.5",
+ "@lezer/common": "^1.5.1",
"@lezer/highlight": "^1.2.3",
- "@mantine/code-highlight": "^8.3.6",
- "@mantine/core": "^8.3.6",
- "@mantine/dates": "^8.3.6",
- "@mantine/hooks": "^8.3.6",
- "@mantine/notifications": "^8.3.6",
+ "@mantine/code-highlight": "^8.3.14",
+ "@mantine/core": "^8.3.14",
+ "@mantine/dates": "^8.3.14",
+ "@mantine/hooks": "^8.3.14",
+ "@mantine/notifications": "^8.3.14",
"@microsoft/fetch-event-source": "^2.0.1",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.309.1",
- "@reduxjs/toolkit": "^2.10.1",
- "@tabler/icons-react": "^3.35.0",
- "@tanstack/react-query": "^5.90.7",
+ "@reduxjs/toolkit": "^2.11.2",
+ "@tabler/icons-react": "^3.36.1",
+ "@tanstack/react-query": "^5.90.20",
"@testing-library/jest-dom": "^6.9.1",
- "@testing-library/react": "^16.3.0",
- "@types/lodash": "^4.17.20",
+ "@testing-library/react": "^16.3.2",
+ "@types/lodash": "^4.17.23",
"@types/sanitize-html": "^2.16.0",
- "@uiw/react-codemirror": "^4.25.3",
+ "@uiw/react-codemirror": "^4.25.4",
"clsx": "^2.1.1",
"dayjs": "^1.11.19",
"highlight.js": "^11.11.1",
- "lodash": "^4.17.21",
- "react": "^19.2.0",
- "react-dom": "^19.2.0",
- "react-infinite-scroll-component": "^6.1.0",
+ "lodash": "^4.17.23",
+ "react": "^19.2.4",
+ "react-dom": "^19.2.4",
+ "react-infinite-scroll-component": "^6.1.1",
"react-redux": "^9.2.0",
- "react-router-dom": "^7.9.5",
+ "react-router-dom": "^7.13.0",
"sanitize-html": "^2.17.0",
"uplot": "^1.6.32",
"uplot-react": "^1.2.4",
- "use-query-params": "^2.2.1"
+ "use-query-params": "^2.2.2"
},
"devDependencies": {
"@eslint/compat": "^1.4.1",
- "@eslint/eslintrc": "^3.3.1",
- "@eslint/js": "^9.39.1",
- "@types/react": "^19.2.2",
- "@types/react-dom": "^19.2.2",
- "@typescript-eslint/eslint-plugin": "^8.46.3",
- "@typescript-eslint/parser": "^8.46.3",
+ "@eslint/eslintrc": "^3.3.3",
+ "@eslint/js": "^9.39.2",
+ "@types/react": "^19.2.13",
+ "@types/react-dom": "^19.2.3",
+ "@typescript-eslint/eslint-plugin": "^8.54.0",
+ "@typescript-eslint/parser": "^8.54.0",
"@vitejs/plugin-react": "^4.7.0",
- "eslint": "^9.39.1",
+ "eslint": "^9.39.2",
"eslint-plugin-react-hooks": "^5.2.0",
- "eslint-plugin-react-refresh": "^0.4.24",
+ "eslint-plugin-react-refresh": "^0.5.0",
"globals": "^16.5.0",
"jsdom": "^25.0.1",
"postcss": "^8.5.6",
diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json
index 06b75f735c..5208513eab 100644
--- a/web/ui/module/codemirror-promql/package.json
+++ b/web/ui/module/codemirror-promql/package.json
@@ -30,18 +30,18 @@
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
"@prometheus-io/lezer-promql": "0.309.1",
- "lru-cache": "^11.2.2"
+ "lru-cache": "^11.2.5"
},
"devDependencies": {
- "@codemirror/autocomplete": "^6.19.1",
- "@codemirror/language": "^6.11.3",
- "@codemirror/lint": "^6.9.2",
- "@codemirror/state": "^6.5.2",
- "@codemirror/view": "^6.38.6",
- "@lezer/common": "^1.3.0",
+ "@codemirror/autocomplete": "^6.20.0",
+ "@codemirror/language": "^6.12.1",
+ "@codemirror/lint": "^6.9.3",
+ "@codemirror/state": "^6.5.4",
+ "@codemirror/view": "^6.39.12",
+ "@lezer/common": "^1.5.1",
"@lezer/highlight": "^1.2.3",
- "@lezer/lr": "^1.4.3",
- "eslint-plugin-prettier": "^5.5.4",
+ "@lezer/lr": "^1.4.8",
+ "eslint-plugin-prettier": "^5.5.5",
"isomorphic-fetch": "^3.0.0",
"nock": "^14.0.10"
},
diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json
index eccae9a163..7a969b57e4 100644
--- a/web/ui/module/lezer-promql/package.json
+++ b/web/ui/module/lezer-promql/package.json
@@ -33,7 +33,7 @@
"devDependencies": {
"@lezer/generator": "^1.8.0",
"@lezer/highlight": "^1.2.3",
- "@lezer/lr": "^1.4.3",
+ "@lezer/lr": "^1.4.8",
"@rollup/plugin-node-resolve": "^16.0.3"
},
"peerDependencies": {
diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json
index a1f72ff228..8254c8b51c 100644
--- a/web/ui/package-lock.json
+++ b/web/ui/package-lock.json
@@ -13,11 +13,11 @@
],
"devDependencies": {
"@types/jest": "^29.5.14",
- "@typescript-eslint/eslint-plugin": "^8.46.3",
- "@typescript-eslint/parser": "^8.46.3",
+ "@typescript-eslint/eslint-plugin": "^8.54.0",
+ "@typescript-eslint/parser": "^8.54.0",
"eslint-config-prettier": "^10.1.8",
- "prettier": "^3.6.2",
- "ts-jest": "^29.4.5",
+ "prettier": "^3.8.1",
+ "ts-jest": "^29.4.6",
"typescript": "^5.9.3",
"vite": "^6.4.1"
}
@@ -26,57 +26,57 @@
"name": "@prometheus-io/mantine-ui",
"version": "0.309.1",
"dependencies": {
- "@codemirror/autocomplete": "^6.19.1",
- "@codemirror/language": "^6.11.3",
- "@codemirror/lint": "^6.9.2",
- "@codemirror/state": "^6.5.2",
- "@codemirror/view": "^6.38.6",
- "@floating-ui/dom": "^1.7.4",
- "@lezer/common": "^1.3.0",
+ "@codemirror/autocomplete": "^6.20.0",
+ "@codemirror/language": "^6.12.1",
+ "@codemirror/lint": "^6.9.3",
+ "@codemirror/state": "^6.5.4",
+ "@codemirror/view": "^6.39.12",
+ "@floating-ui/dom": "^1.7.5",
+ "@lezer/common": "^1.5.1",
"@lezer/highlight": "^1.2.3",
- "@mantine/code-highlight": "^8.3.6",
- "@mantine/core": "^8.3.6",
- "@mantine/dates": "^8.3.6",
- "@mantine/hooks": "^8.3.6",
- "@mantine/notifications": "^8.3.6",
+ "@mantine/code-highlight": "^8.3.14",
+ "@mantine/core": "^8.3.14",
+ "@mantine/dates": "^8.3.14",
+ "@mantine/hooks": "^8.3.14",
+ "@mantine/notifications": "^8.3.14",
"@microsoft/fetch-event-source": "^2.0.1",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.309.1",
- "@reduxjs/toolkit": "^2.10.1",
- "@tabler/icons-react": "^3.35.0",
- "@tanstack/react-query": "^5.90.7",
+ "@reduxjs/toolkit": "^2.11.2",
+ "@tabler/icons-react": "^3.36.1",
+ "@tanstack/react-query": "^5.90.20",
"@testing-library/jest-dom": "^6.9.1",
- "@testing-library/react": "^16.3.0",
- "@types/lodash": "^4.17.20",
+ "@testing-library/react": "^16.3.2",
+ "@types/lodash": "^4.17.23",
"@types/sanitize-html": "^2.16.0",
- "@uiw/react-codemirror": "^4.25.3",
+ "@uiw/react-codemirror": "^4.25.4",
"clsx": "^2.1.1",
"dayjs": "^1.11.19",
"highlight.js": "^11.11.1",
- "lodash": "^4.17.21",
- "react": "^19.2.0",
- "react-dom": "^19.2.0",
- "react-infinite-scroll-component": "^6.1.0",
+ "lodash": "^4.17.23",
+ "react": "^19.2.4",
+ "react-dom": "^19.2.4",
+ "react-infinite-scroll-component": "^6.1.1",
"react-redux": "^9.2.0",
- "react-router-dom": "^7.9.5",
+ "react-router-dom": "^7.13.0",
"sanitize-html": "^2.17.0",
"uplot": "^1.6.32",
"uplot-react": "^1.2.4",
- "use-query-params": "^2.2.1"
+ "use-query-params": "^2.2.2"
},
"devDependencies": {
"@eslint/compat": "^1.4.1",
- "@eslint/eslintrc": "^3.3.1",
- "@eslint/js": "^9.39.1",
- "@types/react": "^19.2.2",
- "@types/react-dom": "^19.2.2",
- "@typescript-eslint/eslint-plugin": "^8.46.3",
- "@typescript-eslint/parser": "^8.46.3",
+ "@eslint/eslintrc": "^3.3.3",
+ "@eslint/js": "^9.39.2",
+ "@types/react": "^19.2.13",
+ "@types/react-dom": "^19.2.3",
+ "@typescript-eslint/eslint-plugin": "^8.54.0",
+ "@typescript-eslint/parser": "^8.54.0",
"@vitejs/plugin-react": "^4.7.0",
- "eslint": "^9.39.1",
+ "eslint": "^9.39.2",
"eslint-plugin-react-hooks": "^5.2.0",
- "eslint-plugin-react-refresh": "^0.4.24",
+ "eslint-plugin-react-refresh": "^0.5.0",
"globals": "^16.5.0",
"jsdom": "^25.0.1",
"postcss": "^8.5.6",
@@ -86,24 +86,108 @@
"vitest": "^3.2.4"
}
},
+ "mantine-ui/node_modules/@mantine/code-highlight": {
+ "version": "8.3.14",
+ "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-8.3.14.tgz",
+ "integrity": "sha512-7ywMnadaw4O/QG9sQOCIWPZKh6Q97ibyZgkH2cjVNvVbChmZKXIlcHW/QbQJUS84Bs/eGDhnkxwnq78v9w16gQ==",
+ "license": "MIT",
+ "dependencies": {
+ "clsx": "^2.1.1"
+ },
+ "peerDependencies": {
+ "@mantine/core": "8.3.14",
+ "@mantine/hooks": "8.3.14",
+ "react": "^18.x || ^19.x",
+ "react-dom": "^18.x || ^19.x"
+ }
+ },
+ "mantine-ui/node_modules/@mantine/core": {
+ "version": "8.3.14",
+ "resolved": "https://registry.npmjs.org/@mantine/core/-/core-8.3.14.tgz",
+ "integrity": "sha512-ZOxggx65Av1Ii1NrckCuqzluRpmmG+8DyEw24wDom3rmwsPg9UV+0le2QTyI5Eo60LzPfUju1KuEPiUzNABIPg==",
+ "license": "MIT",
+ "dependencies": {
+ "@floating-ui/react": "^0.27.16",
+ "clsx": "^2.1.1",
+ "react-number-format": "^5.4.4",
+ "react-remove-scroll": "^2.7.1",
+ "react-textarea-autosize": "8.5.9",
+ "type-fest": "^4.41.0"
+ },
+ "peerDependencies": {
+ "@mantine/hooks": "8.3.14",
+ "react": "^18.x || ^19.x",
+ "react-dom": "^18.x || ^19.x"
+ }
+ },
+ "mantine-ui/node_modules/@mantine/dates": {
+ "version": "8.3.14",
+ "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-8.3.14.tgz",
+ "integrity": "sha512-NdStRo2ZQ55MoMF5B9vjhpBpHRDHF1XA9Dkb1kKSdNuLlaFXKlvoaZxj/3LfNPpn7Nqlns78nWt4X8/cgC2YIg==",
+ "license": "MIT",
+ "dependencies": {
+ "clsx": "^2.1.1"
+ },
+ "peerDependencies": {
+ "@mantine/core": "8.3.14",
+ "@mantine/hooks": "8.3.14",
+ "dayjs": ">=1.0.0",
+ "react": "^18.x || ^19.x",
+ "react-dom": "^18.x || ^19.x"
+ }
+ },
+ "mantine-ui/node_modules/@mantine/hooks": {
+ "version": "8.3.14",
+ "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-8.3.14.tgz",
+ "integrity": "sha512-0SbHnGEuHcF2QyjzBBcqidpjNmIb6n7TC3obnhkBToYhUTbMcJSK/8ei/yHtAelridJH4CPeohRlQdc0HajHyQ==",
+ "license": "MIT",
+ "peerDependencies": {
+ "react": "^18.x || ^19.x"
+ }
+ },
+ "mantine-ui/node_modules/@mantine/notifications": {
+ "version": "8.3.14",
+ "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-8.3.14.tgz",
+ "integrity": "sha512-+ia97wrcU9Zfv+jXYvgr2GdISqKTHbQE9nnEIZvGUBPAqKr9b2JAsaXQS/RsAdoXUI+kKDEtH2fyVYS7zrSi/Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@mantine/store": "8.3.14",
+ "react-transition-group": "4.4.5"
+ },
+ "peerDependencies": {
+ "@mantine/core": "8.3.14",
+ "@mantine/hooks": "8.3.14",
+ "react": "^18.x || ^19.x",
+ "react-dom": "^18.x || ^19.x"
+ }
+ },
+ "mantine-ui/node_modules/@mantine/store": {
+ "version": "8.3.14",
+ "resolved": "https://registry.npmjs.org/@mantine/store/-/store-8.3.14.tgz",
+ "integrity": "sha512-bgW+fYHDOp7Pk4+lcEm3ZF7dD/sIMKHyR985cOqSHAYJPRcVFb+zcEK/SWoFZqlyA4qh08CNrASOaod8N0XKfA==",
+ "license": "MIT",
+ "peerDependencies": {
+ "react": "^18.x || ^19.x"
+ }
+ },
"module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql",
"version": "0.309.1",
"license": "Apache-2.0",
"dependencies": {
"@prometheus-io/lezer-promql": "0.309.1",
- "lru-cache": "^11.2.2"
+ "lru-cache": "^11.2.5"
},
"devDependencies": {
- "@codemirror/autocomplete": "^6.19.1",
- "@codemirror/language": "^6.11.3",
- "@codemirror/lint": "^6.9.2",
- "@codemirror/state": "^6.5.2",
- "@codemirror/view": "^6.38.6",
- "@lezer/common": "^1.3.0",
+ "@codemirror/autocomplete": "^6.20.0",
+ "@codemirror/language": "^6.12.1",
+ "@codemirror/lint": "^6.9.3",
+ "@codemirror/state": "^6.5.4",
+ "@codemirror/view": "^6.39.12",
+ "@lezer/common": "^1.5.1",
"@lezer/highlight": "^1.2.3",
- "@lezer/lr": "^1.4.3",
- "eslint-plugin-prettier": "^5.5.4",
+ "@lezer/lr": "^1.4.8",
+ "eslint-plugin-prettier": "^5.5.5",
"isomorphic-fetch": "^3.0.0",
"nock": "^14.0.10"
},
@@ -126,7 +210,7 @@
"devDependencies": {
"@lezer/generator": "^1.8.0",
"@lezer/highlight": "^1.2.3",
- "@lezer/lr": "^1.4.3",
+ "@lezer/lr": "^1.4.8",
"@rollup/plugin-node-resolve": "^16.0.3"
},
"peerDependencies": {
@@ -727,9 +811,10 @@
"peer": true
},
"node_modules/@codemirror/autocomplete": {
- "version": "6.19.1",
- "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.19.1.tgz",
- "integrity": "sha512-q6NenYkEy2fn9+JyjIxMWcNjzTL/IhwqfzOut1/G3PrIFkrbl4AL7Wkse5tLrQUUyqGoAKU5+Pi5jnnXxH5HGw==",
+ "version": "6.20.0",
+ "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.20.0.tgz",
+ "integrity": "sha512-bOwvTOIJcG5FVo5gUUupiwYh8MioPLQ4UcqbcRf7UQ98X90tCa9E1kZ3Z7tqwpZxYyOvh1YTYbmZE9RTfTp5hg==",
+ "license": "MIT",
"dependencies": {
"@codemirror/language": "^6.0.0",
"@codemirror/state": "^6.0.0",
@@ -750,23 +835,24 @@
}
},
"node_modules/@codemirror/language": {
- "version": "6.11.3",
- "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.11.3.tgz",
- "integrity": "sha512-9HBM2XnwDj7fnu0551HkGdrUrrqmYq/WC5iv6nbY2WdicXdGbhR/gfbZOH73Aqj4351alY1+aoG9rCNfiwS1RA==",
+ "version": "6.12.1",
+ "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.12.1.tgz",
+ "integrity": "sha512-Fa6xkSiuGKc8XC8Cn96T+TQHYj4ZZ7RdFmXA3i9xe/3hLHfwPZdM+dqfX0Cp0zQklBKhVD8Yzc8LS45rkqcwpQ==",
"license": "MIT",
"dependencies": {
"@codemirror/state": "^6.0.0",
"@codemirror/view": "^6.23.0",
- "@lezer/common": "^1.1.0",
+ "@lezer/common": "^1.5.0",
"@lezer/highlight": "^1.0.0",
"@lezer/lr": "^1.0.0",
"style-mod": "^4.0.0"
}
},
"node_modules/@codemirror/lint": {
- "version": "6.9.2",
- "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.9.2.tgz",
- "integrity": "sha512-sv3DylBiIyi+xKwRCJAAsBZZZWo82shJ/RTMymLabAdtbkV5cSKwWDeCgtUq3v8flTaXS2y1kKkICuRYtUswyQ==",
+ "version": "6.9.3",
+ "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.9.3.tgz",
+ "integrity": "sha512-y3YkYhdnhjDBAe0VIA0c4wVoFOvnp8CnAvfLqi0TqotIv92wIlAAP7HELOpLBsKwjAX6W92rSflA6an/2zBvXw==",
+ "license": "MIT",
"dependencies": {
"@codemirror/state": "^6.0.0",
"@codemirror/view": "^6.35.0",
@@ -785,9 +871,9 @@
}
},
"node_modules/@codemirror/state": {
- "version": "6.5.2",
- "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.5.2.tgz",
- "integrity": "sha512-FVqsPqtPWKVVL3dPSxy8wEF/ymIEuVzF1PK3VbUgrxXpJUSHQWWZz4JMToquRxnkw+36LTamCZG2iua2Ptq0fA==",
+ "version": "6.5.4",
+ "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.5.4.tgz",
+ "integrity": "sha512-8y7xqG/hpB53l25CIoit9/ngxdfoG+fx+V3SHBrinnhOtLvKHRyAJJuHzkWrR4YXXLX8eXBsejgAAxHUOdW1yw==",
"license": "MIT",
"dependencies": {
"@marijn/find-cluster-break": "^1.0.0"
@@ -806,9 +892,10 @@
}
},
"node_modules/@codemirror/view": {
- "version": "6.38.6",
- "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.38.6.tgz",
- "integrity": "sha512-qiS0z1bKs5WOvHIAC0Cybmv4AJSkAXgX5aD6Mqd2epSLlVJsQl8NG23jCVouIgkh4All/mrbdsf2UOLFnJw0tw==",
+ "version": "6.39.12",
+ "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.39.12.tgz",
+ "integrity": "sha512-f+/VsHVn/kOA9lltk/GFzuYwVVAKmOnNjxbrhkk3tPHntFqjWeI2TbIXx006YkBkqC10wZ4NsnWXCQiFPeAISQ==",
+ "license": "MIT",
"dependencies": {
"@codemirror/state": "^6.5.0",
"crelt": "^1.0.6",
@@ -1242,9 +1329,9 @@
}
},
"node_modules/@eslint-community/eslint-utils": {
- "version": "4.9.0",
- "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz",
- "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==",
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz",
+ "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -1274,9 +1361,9 @@
}
},
"node_modules/@eslint-community/regexpp": {
- "version": "4.12.1",
- "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz",
- "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==",
+ "version": "4.12.2",
+ "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz",
+ "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==",
"dev": true,
"license": "MIT",
"engines": {
@@ -1342,9 +1429,9 @@
}
},
"node_modules/@eslint/eslintrc": {
- "version": "3.3.1",
- "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz",
- "integrity": "sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ==",
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.3.tgz",
+ "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -1354,7 +1441,7 @@
"globals": "^14.0.0",
"ignore": "^5.2.0",
"import-fresh": "^3.2.1",
- "js-yaml": "^4.1.0",
+ "js-yaml": "^4.1.1",
"minimatch": "^3.1.2",
"strip-json-comments": "^3.1.1"
},
@@ -1378,10 +1465,11 @@
}
},
"node_modules/@eslint/js": {
- "version": "9.39.1",
- "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.1.tgz",
- "integrity": "sha512-S26Stp4zCy88tH94QbBv3XCuzRQiZ9yXofEILmglYTh/Ug/a9/umqvgFtYBAo3Lp0nsI/5/qH1CCrbdK3AP1Tw==",
+ "version": "9.39.2",
+ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz",
+ "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
@@ -1412,21 +1500,21 @@
}
},
"node_modules/@floating-ui/core": {
- "version": "1.7.3",
- "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.3.tgz",
- "integrity": "sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==",
+ "version": "1.7.4",
+ "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.4.tgz",
+ "integrity": "sha512-C3HlIdsBxszvm5McXlB8PeOEWfBhcGBTZGkGlWc2U0KFY5IwG5OQEuQ8rq52DZmcHDlPLd+YFBK+cZcytwIFWg==",
"license": "MIT",
"dependencies": {
"@floating-ui/utils": "^0.2.10"
}
},
"node_modules/@floating-ui/dom": {
- "version": "1.7.4",
- "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.4.tgz",
- "integrity": "sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA==",
+ "version": "1.7.5",
+ "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.5.tgz",
+ "integrity": "sha512-N0bD2kIPInNHUHehXhMke1rBGs1dwqvC9O9KYMyyjK7iXt7GAhnro7UlcuYcGdS/yYOlq0MAVgrow8IbWJwyqg==",
"license": "MIT",
"dependencies": {
- "@floating-ui/core": "^1.7.3",
+ "@floating-ui/core": "^1.7.4",
"@floating-ui/utils": "^0.2.10"
}
},
@@ -2043,9 +2131,10 @@
}
},
"node_modules/@lezer/common": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.3.0.tgz",
- "integrity": "sha512-L9X8uHCYU310o99L3/MpJKYxPzXPOS7S0NmBaM7UO/x2Kb2WbmMLSkfvdr1KxRIFYOpbY0Jhn7CfLSUDzL8arQ=="
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.5.1.tgz",
+ "integrity": "sha512-6YRVG9vBkaY7p1IVxL4s44n5nUnaNnGM2/AckNgYOnxTG2kWh1vR8BMxPseWPjRNpb5VtXnMpeYAEAADoRV1Iw==",
+ "license": "MIT"
},
"node_modules/@lezer/generator": {
"version": "1.8.0",
@@ -2070,91 +2159,14 @@
}
},
"node_modules/@lezer/lr": {
- "version": "1.4.3",
- "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.3.tgz",
- "integrity": "sha512-yenN5SqAxAPv/qMnpWW0AT7l+SxVrgG+u0tNsRQWqbrz66HIl8DnEbBObvy21J5K7+I1v7gsAnlE2VQ5yYVSeA==",
+ "version": "1.4.8",
+ "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.8.tgz",
+ "integrity": "sha512-bPWa0Pgx69ylNlMlPvBPryqeLYQjyJjqPx+Aupm5zydLIF3NE+6MMLT8Yi23Bd9cif9VS00aUebn+6fDIGBcDA==",
+ "license": "MIT",
"dependencies": {
"@lezer/common": "^1.0.0"
}
},
- "node_modules/@mantine/code-highlight": {
- "version": "8.3.6",
- "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-8.3.6.tgz",
- "integrity": "sha512-9jPrhchbfNCA73V3hMjXVcCBYL82/UOA9LiEs5LSwxr1q4JYBEBU8znMmVuxZlXA234Ci234AqxGNXdu9f+p4w==",
- "dependencies": {
- "clsx": "^2.1.1"
- },
- "peerDependencies": {
- "@mantine/core": "8.3.6",
- "@mantine/hooks": "8.3.6",
- "react": "^18.x || ^19.x",
- "react-dom": "^18.x || ^19.x"
- }
- },
- "node_modules/@mantine/core": {
- "version": "8.3.6",
- "resolved": "https://registry.npmjs.org/@mantine/core/-/core-8.3.6.tgz",
- "integrity": "sha512-paTl+0x+O/QtgMtqVJaG8maD8sfiOdgPmLOyG485FmeGZ1L3KMdEkhxZtmdGlDFsLXhmMGQ57ducT90bvhXX5A==",
- "dependencies": {
- "@floating-ui/react": "^0.27.16",
- "clsx": "^2.1.1",
- "react-number-format": "^5.4.4",
- "react-remove-scroll": "^2.7.1",
- "react-textarea-autosize": "8.5.9",
- "type-fest": "^4.41.0"
- },
- "peerDependencies": {
- "@mantine/hooks": "8.3.6",
- "react": "^18.x || ^19.x",
- "react-dom": "^18.x || ^19.x"
- }
- },
- "node_modules/@mantine/dates": {
- "version": "8.3.6",
- "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-8.3.6.tgz",
- "integrity": "sha512-lSi1zvyL86SKeePH0J3vOjAR7ZIVNOrZm6ja7jAH6IBdcpQOKH8TXbrcAi5okEStvmvkne7pVaGu0VkdE8KnAw==",
- "dependencies": {
- "clsx": "^2.1.1"
- },
- "peerDependencies": {
- "@mantine/core": "8.3.6",
- "@mantine/hooks": "8.3.6",
- "dayjs": ">=1.0.0",
- "react": "^18.x || ^19.x",
- "react-dom": "^18.x || ^19.x"
- }
- },
- "node_modules/@mantine/hooks": {
- "version": "8.3.6",
- "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-8.3.6.tgz",
- "integrity": "sha512-liHfaWXHAkLjJy+Bkr29UsCwAoDQ/a64WrM67lksx8F0qqyjR5RQH8zVlhuOjdpQnwtlUkE/YiTvbJiPcoI0bw==",
- "peerDependencies": {
- "react": "^18.x || ^19.x"
- }
- },
- "node_modules/@mantine/notifications": {
- "version": "8.3.6",
- "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-8.3.6.tgz",
- "integrity": "sha512-d3A96lyrFOVXtrwASEXALfzooKnnA60T2LclMXFF/4k27Ay5Hwza4D+ylqgxf0RkPfF9J6LhBXk72OjL5RH5Kg==",
- "dependencies": {
- "@mantine/store": "8.3.6",
- "react-transition-group": "4.4.5"
- },
- "peerDependencies": {
- "@mantine/core": "8.3.6",
- "@mantine/hooks": "8.3.6",
- "react": "^18.x || ^19.x",
- "react-dom": "^18.x || ^19.x"
- }
- },
- "node_modules/@mantine/store": {
- "version": "8.3.6",
- "resolved": "https://registry.npmjs.org/@mantine/store/-/store-8.3.6.tgz",
- "integrity": "sha512-fo86wF6nL8RPukY8cseAFQKk+bRVv3Ga/WmHJMYRsCbNleZOEZMXXUf/OVhmr1D3t+xzCzAlJe/sQ8MIS+c+pA==",
- "peerDependencies": {
- "react": "^18.x || ^19.x"
- }
- },
"node_modules/@marijn/find-cluster-break": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@marijn/find-cluster-break/-/find-cluster-break-1.0.2.tgz",
@@ -2199,41 +2211,6 @@
"@nexucis/fuzzy": "^0.5.1"
}
},
- "node_modules/@nodelib/fs.scandir": {
- "version": "2.1.5",
- "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
- "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
- "dev": true,
- "dependencies": {
- "@nodelib/fs.stat": "2.0.5",
- "run-parallel": "^1.1.9"
- },
- "engines": {
- "node": ">= 8"
- }
- },
- "node_modules/@nodelib/fs.stat": {
- "version": "2.0.5",
- "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
- "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
- "dev": true,
- "engines": {
- "node": ">= 8"
- }
- },
- "node_modules/@nodelib/fs.walk": {
- "version": "1.2.8",
- "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
- "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
- "dev": true,
- "dependencies": {
- "@nodelib/fs.scandir": "2.1.5",
- "fastq": "^1.6.0"
- },
- "engines": {
- "node": ">= 8"
- }
- },
"node_modules/@open-draft/deferred-promise": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/@open-draft/deferred-promise/-/deferred-promise-2.2.0.tgz",
@@ -2285,13 +2262,14 @@
"link": true
},
"node_modules/@reduxjs/toolkit": {
- "version": "2.10.1",
- "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.10.1.tgz",
- "integrity": "sha512-/U17EXQ9Do9Yx4DlNGU6eVNfZvFJfYpUtRRdLf19PbPjdWBxNlxGZXywQZ1p1Nz8nMkWplTI7iD/23m07nolDA==",
+ "version": "2.11.2",
+ "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.11.2.tgz",
+ "integrity": "sha512-Kd6kAHTA6/nUpp8mySPqj3en3dm0tdMIgbttnQ1xFMVpufoj+ADi8pXLBsd4xzTRHQa7t/Jv8W5UnCuW4kuWMQ==",
+ "license": "MIT",
"dependencies": {
"@standard-schema/spec": "^1.0.0",
"@standard-schema/utils": "^0.3.0",
- "immer": "^10.2.0",
+ "immer": "^11.0.0",
"redux": "^5.0.1",
"redux-thunk": "^3.1.0",
"reselect": "^5.1.0"
@@ -2688,12 +2666,12 @@
}
},
"node_modules/@tabler/icons-react": {
- "version": "3.35.0",
- "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-3.35.0.tgz",
- "integrity": "sha512-XG7t2DYf3DyHT5jxFNp5xyLVbL4hMJYJhiSdHADzAjLRYfL7AnjlRfiHDHeXxkb2N103rEIvTsBRazxXtAUz2g==",
+ "version": "3.36.1",
+ "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-3.36.1.tgz",
+ "integrity": "sha512-/8nOXeNeMoze9xY/QyEKG65wuvRhkT3q9aytaur6Gj8bYU2A98YVJyLc9MRmc5nVvpy+bRlrrwK/Ykr8WGyUWg==",
"license": "MIT",
"dependencies": {
- "@tabler/icons": "3.35.0"
+ "@tabler/icons": ""
},
"funding": {
"type": "github",
@@ -2704,20 +2682,22 @@
}
},
"node_modules/@tanstack/query-core": {
- "version": "5.90.7",
- "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.90.7.tgz",
- "integrity": "sha512-6PN65csiuTNfBMXqQUxQhCNdtm1rV+9kC9YwWAIKcaxAauq3Wu7p18j3gQY3YIBJU70jT/wzCCZ2uqto/vQgiQ==",
+ "version": "5.90.20",
+ "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.90.20.tgz",
+ "integrity": "sha512-OMD2HLpNouXEfZJWcKeVKUgQ5n+n3A2JFmBaScpNDUqSrQSjiveC7dKMe53uJUg1nDG16ttFPz2xfilz6i2uVg==",
+ "license": "MIT",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/tannerlinsley"
}
},
"node_modules/@tanstack/react-query": {
- "version": "5.90.7",
- "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.90.7.tgz",
- "integrity": "sha512-wAHc/cgKzW7LZNFloThyHnV/AX9gTg3w5yAv0gvQHPZoCnepwqCMtzbuPbb2UvfvO32XZ46e8bPOYbfZhzVnnQ==",
+ "version": "5.90.20",
+ "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.90.20.tgz",
+ "integrity": "sha512-vXBxa+qeyveVO7OA0jX1z+DeyCA4JKnThKv411jd5SORpBKgkcVnYKCiBgECvADvniBX7tobwBmg01qq9JmMJw==",
+ "license": "MIT",
"dependencies": {
- "@tanstack/query-core": "5.90.7"
+ "@tanstack/query-core": "5.90.20"
},
"funding": {
"type": "github",
@@ -2772,9 +2752,9 @@
"license": "MIT"
},
"node_modules/@testing-library/react": {
- "version": "16.3.0",
- "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.0.tgz",
- "integrity": "sha512-kFSyxiEDwv1WLl2fgsq6pPBbw5aWKrsY2/noi1Id0TK0UParSF62oFQFGHXIyaG4pp2tEub/Zlel+fjjZILDsw==",
+ "version": "16.3.2",
+ "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.2.tgz",
+ "integrity": "sha512-XU5/SytQM+ykqMnAnvB2umaJNIOsLF3PVv//1Ew4CTcpz0/BRyy/af40qqrt7SjKpDdT1saBMc42CUok5gaw+g==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.12.5"
@@ -2963,9 +2943,9 @@
"dev": true
},
"node_modules/@types/lodash": {
- "version": "4.17.20",
- "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.20.tgz",
- "integrity": "sha512-H3MHACvFUEiujabxhaI/ImO6gUrd8oOurg7LQtS7mbwIXA/cUqWrvBsaeJ23aZEPk1TAYkurjfMbSELfoCXlGA==",
+ "version": "4.17.23",
+ "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.23.tgz",
+ "integrity": "sha512-RDvF6wTulMPjrNdCoYRC8gNR880JNGT8uB+REUpC2Ns4pRqQJhGz90wh7rgdXDPpCczF3VGktDuFGVnz8zP7HA==",
"license": "MIT"
},
"node_modules/@types/node": {
@@ -2978,19 +2958,21 @@
}
},
"node_modules/@types/react": {
- "version": "19.2.2",
- "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.2.tgz",
- "integrity": "sha512-6mDvHUFSjyT2B2yeNx2nUgMxh9LtOWvkhIU3uePn2I2oyNymUAX1NIsdgviM4CH+JSrp2D2hsMvJOkxY+0wNRA==",
+ "version": "19.2.13",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.13.tgz",
+ "integrity": "sha512-KkiJeU6VbYbUOp5ITMIc7kBfqlYkKA5KhEHVrGMmUUMt7NeaZg65ojdPk+FtNrBAOXNVM5QM72jnADjM+XVRAQ==",
"devOptional": true,
+ "license": "MIT",
"dependencies": {
- "csstype": "^3.0.2"
+ "csstype": "^3.2.2"
}
},
"node_modules/@types/react-dom": {
- "version": "19.2.2",
- "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.2.tgz",
- "integrity": "sha512-9KQPoO6mZCi7jcIStSnlOWn2nEF3mNmyr3rIAsGnAbQKYbRLyqmeSc39EVgtxXVia+LMT8j3knZLAZAh+xLmrw==",
+ "version": "19.2.3",
+ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz",
+ "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==",
"devOptional": true,
+ "license": "MIT",
"peerDependencies": {
"@types/react": "^19.2.0"
}
@@ -3042,20 +3024,20 @@
"license": "MIT"
},
"node_modules/@typescript-eslint/eslint-plugin": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.46.3.tgz",
- "integrity": "sha512-sbaQ27XBUopBkRiuY/P9sWGOWUW4rl8fDoHIUmLpZd8uldsTyB4/Zg6bWTegPoTLnKj9Hqgn3QD6cjPNB32Odw==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.54.0.tgz",
+ "integrity": "sha512-hAAP5io/7csFStuOmR782YmTthKBJ9ND3WVL60hcOjvtGFb+HJxH4O5huAcmcZ9v9G8P+JETiZ/G1B8MALnWZQ==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@eslint-community/regexpp": "^4.10.0",
- "@typescript-eslint/scope-manager": "8.46.3",
- "@typescript-eslint/type-utils": "8.46.3",
- "@typescript-eslint/utils": "8.46.3",
- "@typescript-eslint/visitor-keys": "8.46.3",
- "graphemer": "^1.4.0",
- "ignore": "^7.0.0",
+ "@eslint-community/regexpp": "^4.12.2",
+ "@typescript-eslint/scope-manager": "8.54.0",
+ "@typescript-eslint/type-utils": "8.54.0",
+ "@typescript-eslint/utils": "8.54.0",
+ "@typescript-eslint/visitor-keys": "8.54.0",
+ "ignore": "^7.0.5",
"natural-compare": "^1.4.0",
- "ts-api-utils": "^2.1.0"
+ "ts-api-utils": "^2.4.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3065,7 +3047,7 @@
"url": "https://opencollective.com/typescript-eslint"
},
"peerDependencies": {
- "@typescript-eslint/parser": "^8.46.3",
+ "@typescript-eslint/parser": "^8.54.0",
"eslint": "^8.57.0 || ^9.0.0",
"typescript": ">=4.8.4 <6.0.0"
}
@@ -3081,16 +3063,17 @@
}
},
"node_modules/@typescript-eslint/parser": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.46.3.tgz",
- "integrity": "sha512-6m1I5RmHBGTnUGS113G04DMu3CpSdxCAU/UvtjNWL4Nuf3MW9tQhiJqRlHzChIkhy6kZSAQmc+I1bcGjE3yNKg==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.54.0.tgz",
+ "integrity": "sha512-BtE0k6cjwjLZoZixN0t5AKP0kSzlGu7FctRXYuPAm//aaiZhmfq1JwdYpYr1brzEspYyFeF+8XF5j2VK6oalrA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/scope-manager": "8.46.3",
- "@typescript-eslint/types": "8.46.3",
- "@typescript-eslint/typescript-estree": "8.46.3",
- "@typescript-eslint/visitor-keys": "8.46.3",
- "debug": "^4.3.4"
+ "@typescript-eslint/scope-manager": "8.54.0",
+ "@typescript-eslint/types": "8.54.0",
+ "@typescript-eslint/typescript-estree": "8.54.0",
+ "@typescript-eslint/visitor-keys": "8.54.0",
+ "debug": "^4.4.3"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3105,14 +3088,15 @@
}
},
"node_modules/@typescript-eslint/project-service": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.46.3.tgz",
- "integrity": "sha512-Fz8yFXsp2wDFeUElO88S9n4w1I4CWDTXDqDr9gYvZgUpwXQqmZBr9+NTTql5R3J7+hrJZPdpiWaB9VNhAKYLuQ==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.54.0.tgz",
+ "integrity": "sha512-YPf+rvJ1s7MyiWM4uTRhE4DvBXrEV+d8oC3P9Y2eT7S+HBS0clybdMIPnhiATi9vZOYDc7OQ1L/i6ga6NFYK/g==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/tsconfig-utils": "^8.46.3",
- "@typescript-eslint/types": "^8.46.3",
- "debug": "^4.3.4"
+ "@typescript-eslint/tsconfig-utils": "^8.54.0",
+ "@typescript-eslint/types": "^8.54.0",
+ "debug": "^4.4.3"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3126,13 +3110,14 @@
}
},
"node_modules/@typescript-eslint/scope-manager": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.46.3.tgz",
- "integrity": "sha512-FCi7Y1zgrmxp3DfWfr+3m9ansUUFoy8dkEdeQSgA9gbm8DaHYvZCdkFRQrtKiedFf3Ha6VmoqoAaP68+i+22kg==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.54.0.tgz",
+ "integrity": "sha512-27rYVQku26j/PbHYcVfRPonmOlVI6gihHtXFbTdB5sb6qA0wdAQAbyXFVarQ5t4HRojIz64IV90YtsjQSSGlQg==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "8.46.3",
- "@typescript-eslint/visitor-keys": "8.46.3"
+ "@typescript-eslint/types": "8.54.0",
+ "@typescript-eslint/visitor-keys": "8.54.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3143,10 +3128,11 @@
}
},
"node_modules/@typescript-eslint/tsconfig-utils": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.46.3.tgz",
- "integrity": "sha512-GLupljMniHNIROP0zE7nCcybptolcH8QZfXOpCfhQDAdwJ/ZTlcaBOYebSOZotpti/3HrHSw7D3PZm75gYFsOA==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.54.0.tgz",
+ "integrity": "sha512-dRgOyT2hPk/JwxNMZDsIXDgyl9axdJI3ogZ2XWhBPsnZUv+hPesa5iuhdYt2gzwA9t8RE5ytOJ6xB0moV0Ujvw==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
@@ -3159,16 +3145,17 @@
}
},
"node_modules/@typescript-eslint/type-utils": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.46.3.tgz",
- "integrity": "sha512-ZPCADbr+qfz3aiTTYNNkCbUt+cjNwI/5McyANNrFBpVxPt7GqpEYz5ZfdwuFyGUnJ9FdDXbGODUu6iRCI6XRXw==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.54.0.tgz",
+ "integrity": "sha512-hiLguxJWHjjwL6xMBwD903ciAwd7DmK30Y9Axs/etOkftC3ZNN9K44IuRD/EB08amu+Zw6W37x9RecLkOo3pMA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "8.46.3",
- "@typescript-eslint/typescript-estree": "8.46.3",
- "@typescript-eslint/utils": "8.46.3",
- "debug": "^4.3.4",
- "ts-api-utils": "^2.1.0"
+ "@typescript-eslint/types": "8.54.0",
+ "@typescript-eslint/typescript-estree": "8.54.0",
+ "@typescript-eslint/utils": "8.54.0",
+ "debug": "^4.4.3",
+ "ts-api-utils": "^2.4.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3183,10 +3170,11 @@
}
},
"node_modules/@typescript-eslint/types": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.46.3.tgz",
- "integrity": "sha512-G7Ok9WN/ggW7e/tOf8TQYMaxgID3Iujn231hfi0Pc7ZheztIJVpO44ekY00b7akqc6nZcvregk0Jpah3kep6hA==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.54.0.tgz",
+ "integrity": "sha512-PDUI9R1BVjqu7AUDsRBbKMtwmjWcn4J3le+5LpcFgWULN3LvHC5rkc9gCVxbrsrGmO1jfPybN5s6h4Jy+OnkAA==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
@@ -3196,21 +3184,21 @@
}
},
"node_modules/@typescript-eslint/typescript-estree": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.46.3.tgz",
- "integrity": "sha512-f/NvtRjOm80BtNM5OQtlaBdM5BRFUv7gf381j9wygDNL+qOYSNOgtQ/DCndiYi80iIOv76QqaTmp4fa9hwI0OA==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.54.0.tgz",
+ "integrity": "sha512-BUwcskRaPvTk6fzVWgDPdUndLjB87KYDrN5EYGetnktoeAvPtO4ONHlAZDnj5VFnUANg0Sjm7j4usBlnoVMHwA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/project-service": "8.46.3",
- "@typescript-eslint/tsconfig-utils": "8.46.3",
- "@typescript-eslint/types": "8.46.3",
- "@typescript-eslint/visitor-keys": "8.46.3",
- "debug": "^4.3.4",
- "fast-glob": "^3.3.2",
- "is-glob": "^4.0.3",
- "minimatch": "^9.0.4",
- "semver": "^7.6.0",
- "ts-api-utils": "^2.1.0"
+ "@typescript-eslint/project-service": "8.54.0",
+ "@typescript-eslint/tsconfig-utils": "8.54.0",
+ "@typescript-eslint/types": "8.54.0",
+ "@typescript-eslint/visitor-keys": "8.54.0",
+ "debug": "^4.4.3",
+ "minimatch": "^9.0.5",
+ "semver": "^7.7.3",
+ "tinyglobby": "^0.2.15",
+ "ts-api-utils": "^2.4.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3228,6 +3216,7 @@
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0"
}
@@ -3237,6 +3226,7 @@
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
"integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
"dev": true,
+ "license": "ISC",
"dependencies": {
"brace-expansion": "^2.0.1"
},
@@ -3248,15 +3238,16 @@
}
},
"node_modules/@typescript-eslint/utils": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.46.3.tgz",
- "integrity": "sha512-VXw7qmdkucEx9WkmR3ld/u6VhRyKeiF1uxWwCy/iuNfokjJ7VhsgLSOTjsol8BunSw190zABzpwdNsze2Kpo4g==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.54.0.tgz",
+ "integrity": "sha512-9Cnda8GS57AQakvRyG0PTejJNlA2xhvyNtEVIMlDWOOeEyBkYWhGPnfrIAnqxLMTSTo6q8g12XVjjev5l1NvMA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@eslint-community/eslint-utils": "^4.7.0",
- "@typescript-eslint/scope-manager": "8.46.3",
- "@typescript-eslint/types": "8.46.3",
- "@typescript-eslint/typescript-estree": "8.46.3"
+ "@eslint-community/eslint-utils": "^4.9.1",
+ "@typescript-eslint/scope-manager": "8.54.0",
+ "@typescript-eslint/types": "8.54.0",
+ "@typescript-eslint/typescript-estree": "8.54.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3271,12 +3262,13 @@
}
},
"node_modules/@typescript-eslint/visitor-keys": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.46.3.tgz",
- "integrity": "sha512-uk574k8IU0rOF/AjniX8qbLSGURJVUCeM5e4MIMKBFFi8weeiLrG1fyQejyLXQpRZbU/1BuQasleV/RfHC3hHg==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.54.0.tgz",
+ "integrity": "sha512-VFlhGSl4opC0bprJiItPQ1RfUhGDIBokcPwaFH4yiBCaNPeld/9VeXbiPO1cLyorQi1G1vL+ecBk1x8o1axORA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "8.46.3",
+ "@typescript-eslint/types": "8.54.0",
"eslint-visitor-keys": "^4.2.1"
},
"engines": {
@@ -3288,9 +3280,10 @@
}
},
"node_modules/@uiw/codemirror-extensions-basic-setup": {
- "version": "4.25.3",
- "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.25.3.tgz",
- "integrity": "sha512-F1doRyD50CWScwGHG2bBUtUpwnOv/zqSnzkZqJcX5YAHQx6Z1CuX8jdnFMH6qktRrPU1tfpNYftTWu3QIoHiMA==",
+ "version": "4.25.4",
+ "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.25.4.tgz",
+ "integrity": "sha512-YzNwkm0AbPv1EXhCHYR5v0nqfemG2jEB0Z3Att4rBYqKrlG7AA9Rhjc3IyBaOzsBu18wtrp9/+uhTyu7TXSRng==",
+ "license": "MIT",
"dependencies": {
"@codemirror/autocomplete": "^6.0.0",
"@codemirror/commands": "^6.0.0",
@@ -3314,15 +3307,16 @@
}
},
"node_modules/@uiw/react-codemirror": {
- "version": "4.25.3",
- "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.25.3.tgz",
- "integrity": "sha512-1wtBZTXPIp8u6F/xjHvsUAYlEeF5Dic4xZBnqJyLzv7o7GjGYEUfSz9Z7bo9aK9GAx2uojG/AuBMfhA4uhvIVQ==",
+ "version": "4.25.4",
+ "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.25.4.tgz",
+ "integrity": "sha512-ipO067oyfUw+DVaXhQCxkB0ZD9b7RnY+ByrprSYSKCHaULvJ3sqWYC/Zen6zVQ8/XC4o5EPBfatGiX20kC7XGA==",
+ "license": "MIT",
"dependencies": {
"@babel/runtime": "^7.18.6",
"@codemirror/commands": "^6.1.0",
"@codemirror/state": "^6.1.1",
"@codemirror/theme-one-dark": "^6.0.0",
- "@uiw/codemirror-extensions-basic-setup": "4.25.3",
+ "@uiw/codemirror-extensions-basic-setup": "4.25.4",
"codemirror": "^6.0.0"
},
"funding": {
@@ -4117,11 +4111,16 @@
"license": "MIT"
},
"node_modules/cookie": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.0.2.tgz",
- "integrity": "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA==",
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz",
+ "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==",
+ "license": "MIT",
"engines": {
"node": ">=18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
}
},
"node_modules/create-jest": {
@@ -4200,9 +4199,9 @@
}
},
"node_modules/csstype": {
- "version": "3.1.3",
- "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz",
- "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==",
+ "version": "3.2.3",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz",
+ "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==",
"license": "MIT"
},
"node_modules/data-urls": {
@@ -4530,10 +4529,11 @@
}
},
"node_modules/eslint": {
- "version": "9.39.1",
- "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.1.tgz",
- "integrity": "sha512-BhHmn2yNOFA9H9JmmIVKJmd288g9hrVRDkdoIgRCRuSySRUHH7r/DI6aAXW9T1WwUuY3DFgrcaqB+deURBLR5g==",
+ "version": "9.39.2",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.2.tgz",
+ "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"@eslint-community/eslint-utils": "^4.8.0",
"@eslint-community/regexpp": "^4.12.1",
@@ -4541,7 +4541,7 @@
"@eslint/config-helpers": "^0.4.2",
"@eslint/core": "^0.17.0",
"@eslint/eslintrc": "^3.3.1",
- "@eslint/js": "9.39.1",
+ "@eslint/js": "9.39.2",
"@eslint/plugin-kit": "^0.4.1",
"@humanfs/node": "^0.16.6",
"@humanwhocodes/module-importer": "^1.0.1",
@@ -4605,14 +4605,14 @@
}
},
"node_modules/eslint-plugin-prettier": {
- "version": "5.5.4",
- "resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-5.5.4.tgz",
- "integrity": "sha512-swNtI95SToIz05YINMA6Ox5R057IMAmWZ26GqPxusAp1TZzj+IdY9tXNWWD3vkF/wEqydCONcwjTFpxybBqZsg==",
+ "version": "5.5.5",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-5.5.5.tgz",
+ "integrity": "sha512-hscXkbqUZ2sPithAuLm5MXL+Wph+U7wHngPBv9OMWwlP8iaflyxpjTYZkmdgB4/vPIhemRlBEoLrH7UC1n7aUw==",
"dev": true,
"license": "MIT",
"dependencies": {
- "prettier-linter-helpers": "^1.0.0",
- "synckit": "^0.11.7"
+ "prettier-linter-helpers": "^1.0.1",
+ "synckit": "^0.11.12"
},
"engines": {
"node": "^14.18.0 || >=16.0.0"
@@ -4649,12 +4649,13 @@
}
},
"node_modules/eslint-plugin-react-refresh": {
- "version": "0.4.24",
- "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.24.tgz",
- "integrity": "sha512-nLHIW7TEq3aLrEYWpVaJ1dRgFR+wLDPN8e8FpYAql/bMV2oBEfC37K0gLEGgv9fy66juNShSMV8OkTqzltcG/w==",
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.5.0.tgz",
+ "integrity": "sha512-ZYvmh7VfVgqR/7wR71I3Zl6hK/C5CcxdWYKZSpHawS5JCNgE4efhQWg/+/WPpgGAp9Ngp/rRZYyaIwmPQBq/lA==",
"dev": true,
+ "license": "MIT",
"peerDependencies": {
- "eslint": ">=8.40"
+ "eslint": ">=9"
}
},
"node_modules/eslint-scope": {
@@ -4852,34 +4853,6 @@
"dev": true,
"license": "Apache-2.0"
},
- "node_modules/fast-glob": {
- "version": "3.3.3",
- "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz",
- "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==",
- "dev": true,
- "dependencies": {
- "@nodelib/fs.stat": "^2.0.2",
- "@nodelib/fs.walk": "^1.2.3",
- "glob-parent": "^5.1.2",
- "merge2": "^1.3.0",
- "micromatch": "^4.0.8"
- },
- "engines": {
- "node": ">=8.6.0"
- }
- },
- "node_modules/fast-glob/node_modules/glob-parent": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
- "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
- "dev": true,
- "dependencies": {
- "is-glob": "^4.0.1"
- },
- "engines": {
- "node": ">= 6"
- }
- },
"node_modules/fast-json-stable-stringify": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
@@ -4894,15 +4867,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/fastq": {
- "version": "1.19.1",
- "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz",
- "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==",
- "dev": true,
- "dependencies": {
- "reusify": "^1.0.4"
- }
- },
"node_modules/fb-watchman": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz",
@@ -5132,13 +5096,6 @@
"dev": true,
"license": "ISC"
},
- "node_modules/graphemer": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz",
- "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/handlebars": {
"version": "4.7.8",
"resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz",
@@ -5295,9 +5252,10 @@
}
},
"node_modules/immer": {
- "version": "10.2.0",
- "resolved": "https://registry.npmjs.org/immer/-/immer-10.2.0.tgz",
- "integrity": "sha512-d/+XTN3zfODyjr89gM3mPq1WNX2B8pYsu7eORitdwyA2sBubnTl3laYlBk4sXY5FUa5qTZGBDPJICVbvqzjlbw==",
+ "version": "11.1.3",
+ "resolved": "https://registry.npmjs.org/immer/-/immer-11.1.3.tgz",
+ "integrity": "sha512-6jQTc5z0KJFtr1UgFpIL3N9XSC3saRaI9PwWtzM2pSqkNGtiNkYY2OSwkOGDK2XcTRcLb1pi/aNkKZz0nxVH4Q==",
+ "license": "MIT",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/immer"
@@ -6544,9 +6502,9 @@
"license": "MIT"
},
"node_modules/js-yaml": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
- "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
+ "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -6726,9 +6684,9 @@
}
},
"node_modules/lodash": {
- "version": "4.17.21",
- "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
- "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
+ "version": "4.17.23",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
+ "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
"license": "MIT"
},
"node_modules/lodash.memoize": {
@@ -6764,10 +6722,10 @@
"license": "MIT"
},
"node_modules/lru-cache": {
- "version": "11.2.2",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.2.tgz",
- "integrity": "sha512-F9ODfyqML2coTIsQpSkRHnLSZMtkU8Q+mSfcaIyKwy58u+8k5nvAYeiNhsyMARvzNcXJ9QfWVrcPsC9e9rAxtg==",
- "license": "ISC",
+ "version": "11.2.5",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.5.tgz",
+ "integrity": "sha512-vFrFJkWtJvJnD5hg+hJvVE8Lh/TcMzKnTgCWmtBipwI5yLX/iX+5UB2tfuyODF5E7k9xEzMdYgGqaSb1c0c5Yw==",
+ "license": "BlueOak-1.0.0",
"engines": {
"node": "20 || >=22"
}
@@ -6835,15 +6793,6 @@
"license": "MIT",
"peer": true
},
- "node_modules/merge2": {
- "version": "1.4.1",
- "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
- "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
- "dev": true,
- "engines": {
- "node": ">= 8"
- }
- },
"node_modules/micromatch": {
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
@@ -7551,9 +7500,9 @@
}
},
"node_modules/prettier": {
- "version": "3.7.4",
- "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.7.4.tgz",
- "integrity": "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==",
+ "version": "3.8.1",
+ "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.8.1.tgz",
+ "integrity": "sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg==",
"dev": true,
"license": "MIT",
"bin": {
@@ -7567,9 +7516,9 @@
}
},
"node_modules/prettier-linter-helpers": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/prettier-linter-helpers/-/prettier-linter-helpers-1.0.0.tgz",
- "integrity": "sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==",
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/prettier-linter-helpers/-/prettier-linter-helpers-1.0.1.tgz",
+ "integrity": "sha512-SxToR7P8Y2lWmv/kTzVLC1t/GDI2WGjMwNhLLE9qtH8Q13C+aEmuRlzDst4Up4s0Wc8sF2M+J57iB3cMLqftfg==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -7675,49 +7624,31 @@
"license": "MIT",
"peer": true
},
- "node_modules/queue-microtask": {
- "version": "1.2.3",
- "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
- "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
- "dev": true,
- "funding": [
- {
- "type": "github",
- "url": "https://github.com/sponsors/feross"
- },
- {
- "type": "patreon",
- "url": "https://www.patreon.com/feross"
- },
- {
- "type": "consulting",
- "url": "https://feross.org/support"
- }
- ]
- },
"node_modules/react": {
- "version": "19.2.0",
- "resolved": "https://registry.npmjs.org/react/-/react-19.2.0.tgz",
- "integrity": "sha512-tmbWg6W31tQLeB5cdIBOicJDJRR2KzXsV7uSK9iNfLWQ5bIZfxuPEHp7M8wiHyHnn0DD1i7w3Zmin0FtkrwoCQ==",
+ "version": "19.2.4",
+ "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz",
+ "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==",
+ "license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/react-dom": {
- "version": "19.2.0",
- "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.0.tgz",
- "integrity": "sha512-UlbRu4cAiGaIewkPyiRGJk0imDN2T3JjieT6spoL2UeSf5od4n5LB/mQ4ejmxhCFT1tYe8IvaFulzynWovsEFQ==",
+ "version": "19.2.4",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz",
+ "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==",
+ "license": "MIT",
"dependencies": {
"scheduler": "^0.27.0"
},
"peerDependencies": {
- "react": "^19.2.0"
+ "react": "^19.2.4"
}
},
"node_modules/react-infinite-scroll-component": {
- "version": "6.1.0",
- "resolved": "https://registry.npmjs.org/react-infinite-scroll-component/-/react-infinite-scroll-component-6.1.0.tgz",
- "integrity": "sha512-SQu5nCqy8DxQWpnUVLx7V7b7LcA37aM7tvoWjTLZp1dk6EJibM5/4EJKzOnl07/BsM1Y40sKLuqjCwwH/xV0TQ==",
+ "version": "6.1.1",
+ "resolved": "https://registry.npmjs.org/react-infinite-scroll-component/-/react-infinite-scroll-component-6.1.1.tgz",
+ "integrity": "sha512-R8YoOyiNDynSWmfVme5LHslsKrP+/xcRUWR2ies8UgUab9dtyw5ECnMCVPPmnmjjF4MWQmfVdRwRWcWaDgeyMA==",
"license": "MIT",
"dependencies": {
"throttle-debounce": "^2.1.0"
@@ -7823,9 +7754,10 @@
}
},
"node_modules/react-router": {
- "version": "7.12.0",
- "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.12.0.tgz",
- "integrity": "sha512-kTPDYPFzDVGIIGNLS5VJykK0HfHLY5MF3b+xj0/tTyNYL1gF1qs7u67Z9jEhQk2sQ98SUaHxlG31g1JtF7IfVw==",
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.13.0.tgz",
+ "integrity": "sha512-PZgus8ETambRT17BUm/LL8lX3Of+oiLaPuVTRH3l1eLvSPpKO3AvhAEb5N7ihAFZQrYDqkvvWfFh9p0z9VsjLw==",
+ "license": "MIT",
"dependencies": {
"cookie": "^1.0.1",
"set-cookie-parser": "^2.6.0"
@@ -7844,11 +7776,12 @@
}
},
"node_modules/react-router-dom": {
- "version": "7.9.5",
- "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.9.5.tgz",
- "integrity": "sha512-mkEmq/K8tKN63Ae2M7Xgz3c9l9YNbY+NHH6NNeUmLA3kDkhKXRsNb/ZpxaEunvGo2/3YXdk5EJU3Hxp3ocaBPw==",
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.13.0.tgz",
+ "integrity": "sha512-5CO/l5Yahi2SKC6rGZ+HDEjpjkGaG/ncEP7eWFTvFxbHP8yeeI0PxTDjimtpXYlR3b3i9/WIL4VJttPrESIf2g==",
+ "license": "MIT",
"dependencies": {
- "react-router": "7.9.5"
+ "react-router": "7.13.0"
},
"engines": {
"node": ">=20.0.0"
@@ -8026,16 +7959,6 @@
"node": ">=10"
}
},
- "node_modules/reusify": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz",
- "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==",
- "dev": true,
- "engines": {
- "iojs": ">=1.0.0",
- "node": ">=0.10.0"
- }
- },
"node_modules/rollup": {
"version": "4.34.9",
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.34.9.tgz",
@@ -8082,29 +8005,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/run-parallel": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
- "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
- "dev": true,
- "funding": [
- {
- "type": "github",
- "url": "https://github.com/sponsors/feross"
- },
- {
- "type": "patreon",
- "url": "https://www.patreon.com/feross"
- },
- {
- "type": "consulting",
- "url": "https://feross.org/support"
- }
- ],
- "dependencies": {
- "queue-microtask": "^1.2.2"
- }
- },
"node_modules/safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
@@ -8157,15 +8057,16 @@
}
},
"node_modules/serialize-query-params": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/serialize-query-params/-/serialize-query-params-2.0.2.tgz",
- "integrity": "sha512-1chMo1dST4pFA9RDXAtF0Rbjaut4is7bzFbI1Z26IuMub68pNCILku85aYmeFhvnY//BXUPUhoRMjYcsT93J/Q==",
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/serialize-query-params/-/serialize-query-params-2.0.4.tgz",
+ "integrity": "sha512-y9WzzDj3BsGgKLCh0ugiinufS//YqOfao/yVJjkXA4VLuyNCfHOLU/cbulGPxs3aeCqhvROw7qPL04JSZnCo0w==",
"license": "ISC"
},
"node_modules/set-cookie-parser": {
"version": "2.7.2",
"resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.2.tgz",
- "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw=="
+ "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==",
+ "license": "MIT"
},
"node_modules/shebang-command": {
"version": "2.0.0",
@@ -8479,9 +8380,9 @@
"license": "MIT"
},
"node_modules/synckit": {
- "version": "0.11.11",
- "resolved": "https://registry.npmjs.org/synckit/-/synckit-0.11.11.tgz",
- "integrity": "sha512-MeQTA1r0litLUf0Rp/iisCaL8761lKAZHaimlbGK4j0HysC4PLfqygQj9srcs0m2RdtDYnF8UuYyKpbjHYp7Jw==",
+ "version": "0.11.12",
+ "resolved": "https://registry.npmjs.org/synckit/-/synckit-0.11.12.tgz",
+ "integrity": "sha512-Bh7QjT8/SuKUIfObSXNHNSK6WHo6J1tHCqJsuaFDP7gP0fkzSfTxI8y85JrppZ0h8l0maIgc2tfuZQ6/t3GtnQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -8681,10 +8582,11 @@
}
},
"node_modules/ts-api-utils": {
- "version": "2.1.0",
- "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz",
- "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==",
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.4.0.tgz",
+ "integrity": "sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=18.12"
},
@@ -8948,12 +8850,12 @@
}
},
"node_modules/use-query-params": {
- "version": "2.2.1",
- "resolved": "https://registry.npmjs.org/use-query-params/-/use-query-params-2.2.1.tgz",
- "integrity": "sha512-i6alcyLB8w9i3ZK3caNftdb+UnbfBRNPDnc89CNQWkGRmDrm/gfydHvMBfVsQJRq3NoHOM2dt/ceBWG2397v1Q==",
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/use-query-params/-/use-query-params-2.2.2.tgz",
+ "integrity": "sha512-OwGab8u8/x2xZp9uSyBsx0kXlkR9IR436zbygsYVGikPYY3OJosvve6IJVGwIJPcfyb/YHwvPrUNu65/JR++Kw==",
"license": "ISC",
"dependencies": {
- "serialize-query-params": "^2.0.2"
+ "serialize-query-params": "^2.0.3"
},
"peerDependencies": {
"@reach/router": "^1.2.1",
diff --git a/web/ui/package.json b/web/ui/package.json
index e634652b41..172e646aeb 100644
--- a/web/ui/package.json
+++ b/web/ui/package.json
@@ -16,11 +16,11 @@
],
"devDependencies": {
"@types/jest": "^29.5.14",
- "@typescript-eslint/eslint-plugin": "^8.46.3",
- "@typescript-eslint/parser": "^8.46.3",
+ "@typescript-eslint/eslint-plugin": "^8.54.0",
+ "@typescript-eslint/parser": "^8.54.0",
"eslint-config-prettier": "^10.1.8",
- "prettier": "^3.6.2",
- "ts-jest": "^29.4.5",
+ "prettier": "^3.8.1",
+ "ts-jest": "^29.4.6",
"typescript": "^5.9.3",
"vite": "^6.4.1"
}
From e6b14eaf0b985c18bac68109ad62b3c98af35447 Mon Sep 17 00:00:00 2001
From: Ganesh Vernekar
Date: Thu, 5 Feb 2026 14:22:40 -0800
Subject: [PATCH 121/165] Fix critical npm vulnerabilities with npm audit fix
Signed-off-by: Ganesh Vernekar
---
web/ui/package-lock.json | 189 +++++++++++++++++++++++++++++++++++++--
1 file changed, 180 insertions(+), 9 deletions(-)
diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json
index 8254c8b51c..7669399b66 100644
--- a/web/ui/package-lock.json
+++ b/web/ui/package-lock.json
@@ -1658,9 +1658,9 @@
}
},
"node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": {
- "version": "3.14.1",
- "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
- "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "version": "3.14.2",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz",
+ "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==",
"dev": true,
"license": "MIT",
"peer": true,
@@ -3776,9 +3776,9 @@
}
},
"node_modules/brace-expansion": {
- "version": "1.1.11",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
- "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "version": "1.1.12",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
+ "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -3875,6 +3875,20 @@
"node": ">=8"
}
},
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
@@ -4406,6 +4420,21 @@
"url": "https://github.com/fb55/domutils?sponsor=1"
}
},
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/electron-to-chromium": {
"version": "1.5.228",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.228.tgz",
@@ -4458,6 +4487,26 @@
"is-arrayish": "^0.2.1"
}
},
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/es-module-lexer": {
"version": "1.7.0",
"resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz",
@@ -4465,6 +4514,35 @@
"dev": true,
"license": "MIT"
},
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-set-tostringtag": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
+ "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.6",
+ "has-tostringtag": "^1.0.2",
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/esbuild": {
"version": "0.25.0",
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.0.tgz",
@@ -4940,14 +5018,16 @@
"dev": true
},
"node_modules/form-data": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
- "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
+ "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
"dev": true,
"license": "MIT",
"dependencies": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
+ "es-set-tostringtag": "^2.1.0",
+ "hasown": "^2.0.2",
"mime-types": "^2.1.12"
},
"engines": {
@@ -5008,6 +5088,31 @@
"node": "6.* || 8.* || >= 10.*"
}
},
+ "node_modules/get-intrinsic": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
"node_modules/get-nonce": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz",
@@ -5027,6 +5132,20 @@
"node": ">=8.0.0"
}
},
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/get-stream": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
@@ -5089,6 +5208,19 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
"node_modules/graceful-fs": {
"version": "4.2.11",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
@@ -5127,6 +5259,35 @@
"node": ">=8"
}
},
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-tostringtag": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
+ "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "has-symbols": "^1.0.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
"node_modules/hasown": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
@@ -6785,6 +6946,16 @@
"tmpl": "1.0.5"
}
},
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/merge-stream": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
From 213bc8e538a1e15c3cc51c9d87b5aeb1e28e6c17 Mon Sep 17 00:00:00 2001
From: Ganesh Vernekar
Date: Thu, 5 Feb 2026 14:48:47 -0800
Subject: [PATCH 122/165] UI: Move HistoryCompleteStrategy into its own file
and fix lint
Signed-off-by: Ganesh Vernekar
---
.../src/pages/query/ExpressionInput.tsx | 46 +----
.../pages/query/HistoryCompleteStrategy.tsx | 45 +++++
.../src/pages/graph/HistogramChart.test.tsx | 18 +-
.../pages/graph/HistorgramHelpers.test.tsx | 179 ++++++++++--------
web/ui/react-app/src/utils/utils.test.ts | 6 +-
5 files changed, 165 insertions(+), 129 deletions(-)
create mode 100644 web/ui/mantine-ui/src/pages/query/HistoryCompleteStrategy.tsx
diff --git a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx
index 4c3209e53a..2193dba267 100644
--- a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx
+++ b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx
@@ -11,7 +11,6 @@ import {
useComputedColorScheme,
} from "@mantine/core";
import {
- CompleteStrategy,
PromQLExtension,
newCompleteStrategy,
} from "@prometheus-io/codemirror-promql";
@@ -36,12 +35,9 @@ import {
bracketMatching,
indentOnInput,
syntaxHighlighting,
- syntaxTree,
} from "@codemirror/language";
import classes from "./ExpressionInput.module.css";
import {
- CompletionContext,
- CompletionResult,
autocompletion,
closeBrackets,
closeBracketsKeymap,
@@ -71,50 +67,10 @@ import MetricsExplorer from "./MetricsExplorer/MetricsExplorer";
import ErrorBoundary from "../../components/ErrorBoundary";
import { useAppSelector } from "../../state/hooks";
import { inputIconStyle, menuIconStyle } from "../../styles";
+import { HistoryCompleteStrategy } from "./HistoryCompleteStrategy";
const promqlExtension = new PromQLExtension();
-// Autocompletion strategy that wraps the main one and enriches
-// it with past query items.
-export class HistoryCompleteStrategy implements CompleteStrategy {
- private complete: CompleteStrategy;
- private queryHistory: string[];
- constructor(complete: CompleteStrategy, queryHistory: string[]) {
- this.complete = complete;
- this.queryHistory = queryHistory;
- }
-
- promQL(
- context: CompletionContext
- ): Promise | CompletionResult | null {
- return Promise.resolve(this.complete.promQL(context)).then((res) => {
- const { state, pos } = context;
- const tree = syntaxTree(state).resolve(pos, -1);
- const start = res != null ? res.from : tree.from;
-
- if (start !== 0) {
- return res;
- }
-
- const historyItems: CompletionResult = {
- from: start,
- to: pos,
- options: this.queryHistory.map((q) => ({
- label: q.length < 80 ? q : q.slice(0, 76).concat("..."),
- detail: "past query",
- apply: q,
- info: q.length < 80 ? undefined : q,
- })),
- validFor: /^[a-zA-Z0-9_:]+$/,
- };
-
- if (res !== null) {
- historyItems.options = historyItems.options.concat(res.options);
- }
- return historyItems;
- });
- }
-}
interface ExpressionInputProps {
initialExpr: string;
diff --git a/web/ui/mantine-ui/src/pages/query/HistoryCompleteStrategy.tsx b/web/ui/mantine-ui/src/pages/query/HistoryCompleteStrategy.tsx
new file mode 100644
index 0000000000..e56f645fc8
--- /dev/null
+++ b/web/ui/mantine-ui/src/pages/query/HistoryCompleteStrategy.tsx
@@ -0,0 +1,45 @@
+// Autocompletion strategy that wraps the main one and enriches
+// it with past query items.
+import {CompleteStrategy} from "@prometheus-io/codemirror-promql";
+import {CompletionContext, CompletionResult} from "@codemirror/autocomplete";
+import {syntaxTree} from "@codemirror/language";
+
+export class HistoryCompleteStrategy implements CompleteStrategy {
+ private complete: CompleteStrategy;
+ private queryHistory: string[];
+ constructor(complete: CompleteStrategy, queryHistory: string[]) {
+ this.complete = complete;
+ this.queryHistory = queryHistory;
+ }
+
+ promQL(
+ context: CompletionContext
+ ): Promise | CompletionResult | null {
+ return Promise.resolve(this.complete.promQL(context)).then((res) => {
+ const { state, pos } = context;
+ const tree = syntaxTree(state).resolve(pos, -1);
+ const start = res != null ? res.from : tree.from;
+
+ if (start !== 0) {
+ return res;
+ }
+
+ const historyItems: CompletionResult = {
+ from: start,
+ to: pos,
+ options: this.queryHistory.map((q) => ({
+ label: q.length < 80 ? q : q.slice(0, 76).concat("..."),
+ detail: "past query",
+ apply: q,
+ info: q.length < 80 ? undefined : q,
+ })),
+ validFor: /^[a-zA-Z0-9_:]+$/,
+ };
+
+ if (res !== null) {
+ historyItems.options = historyItems.options.concat(res.options);
+ }
+ return historyItems;
+ });
+ }
+}
\ No newline at end of file
diff --git a/web/ui/react-app/src/pages/graph/HistogramChart.test.tsx b/web/ui/react-app/src/pages/graph/HistogramChart.test.tsx
index 27018c50ca..e9529282b1 100644
--- a/web/ui/react-app/src/pages/graph/HistogramChart.test.tsx
+++ b/web/ui/react-app/src/pages/graph/HistogramChart.test.tsx
@@ -68,7 +68,6 @@ describe('HistogramChart', () => {
scale: 'linear' as 'linear' | 'exponential',
};
-
beforeEach(() => {
mockFormat.mockClear();
mockResolvedOptions.mockClear();
@@ -163,7 +162,9 @@ describe('HistogramChart', () => {
describe('Exponential Scale', () => {
beforeEach(() => {
- wrapper = mount();
+ wrapper = mount(
+
+ );
});
it('renders the correct number of buckets', () => {
@@ -225,17 +226,24 @@ describe('HistogramChart', () => {
expect(b4.find('.histogram-bucket').prop('style')).toHaveProperty('height', `${b4Height}%`);
expect(parseFloat(b4.prop('style')?.left as string)).toBeGreaterThan(0);
expect(parseFloat(b4.prop('style')?.width as string)).toBeGreaterThan(0);
- expect(parseFloat(b4.prop('style')?.left as string) + parseFloat(b4.prop('style')?.width as string)).toBeLessThanOrEqual(100.01);
+ expect(
+ parseFloat(b4.prop('style')?.left as string) + parseFloat(b4.prop('style')?.width as string)
+ ).toBeLessThanOrEqual(100.01);
});
it('handles zero-crossing bucket correctly in exponential scale', () => {
- wrapper = mount();
+ wrapper = mount(
+
+ );
const buckets = wrapper.find('.histogram-bucket-slot');
const countMax = 15;
const b2 = buckets.at(1);
const b2Height = (5 / countMax) * 100;
- expect(b2.find('.histogram-bucket').prop('style')).toHaveProperty('height', expect.stringContaining(b2Height.toFixed(1)));
+ expect(b2.find('.histogram-bucket').prop('style')).toHaveProperty(
+ 'height',
+ expect.stringContaining(b2Height.toFixed(1))
+ );
expect(parseFloat(b2.prop('style')?.left as string)).toBeGreaterThanOrEqual(0);
expect(parseFloat(b2.prop('style')?.width as string)).toBeGreaterThan(0);
});
diff --git a/web/ui/react-app/src/pages/graph/HistorgramHelpers.test.tsx b/web/ui/react-app/src/pages/graph/HistorgramHelpers.test.tsx
index ea70a17d08..480fb3716f 100644
--- a/web/ui/react-app/src/pages/graph/HistorgramHelpers.test.tsx
+++ b/web/ui/react-app/src/pages/graph/HistorgramHelpers.test.tsx
@@ -37,34 +37,31 @@ describe('HistogramHelpers', () => {
];
const bucketsStartingWithZeroCross: Bucket[] = [
- [0, '-1', '1', '5'],
- [0, '1', '10', '20'],
- [0, '10', '100', '8'],
+ [0, '-1', '1', '5'],
+ [0, '1', '10', '20'],
+ [0, '10', '100', '8'],
];
- const bucketsEndingWithZeroCross: Bucket[] = [
- [0, '-100', '-10', '10'],
- [0, '-10', '-1', '15'],
- [0, '-1', '1', '5'],
+ const bucketsEndingWithZeroCross: Bucket[] = [
+ [0, '-100', '-10', '10'],
+ [0, '-10', '-1', '15'],
+ [0, '-1', '1', '5'],
];
- const singleZeroBucket: Bucket[] = [
- [0, '0', '0', '10'],
- ];
+ const singleZeroBucket: Bucket[] = [[0, '0', '0', '10']];
- const emptyBuckets: Bucket[] = [];
+ const emptyBuckets: Bucket[] = [];
- const bucketsWithZeroFallback: Bucket[] = [
- [0, '1', '10', '5'],
- [0, '10', '100', '15'],
- [0, '0', '0', '2']
- ];
-
- const bucketsNegThenPosNoCross: Bucket[] = [
- [0, '-10', '-1', '15'],
- [0, '5', '10', '20'],
- ];
+ const bucketsWithZeroFallback: Bucket[] = [
+ [0, '1', '10', '5'],
+ [0, '10', '100', '15'],
+ [0, '0', '0', '2'],
+ ];
+ const bucketsNegThenPosNoCross: Bucket[] = [
+ [0, '-10', '-1', '15'],
+ [0, '5', '10', '20'],
+ ];
describe('calculateDefaultExpBucketWidth', () => {
it('calculates width for a standard positive bucket', () => {
@@ -75,29 +72,30 @@ describe('HistogramHelpers', () => {
it('calculates width for a standard negative bucket', () => {
const lastBucket = bucketsAllNegative[bucketsAllNegative.length - 1];
- const expectedAbs = Math.abs(Math.log(Math.abs(parseFloat(lastBucket[2]))) - Math.log(Math.abs(parseFloat(lastBucket[1]))));
+ const expectedAbs = Math.abs(
+ Math.log(Math.abs(parseFloat(lastBucket[2]))) - Math.log(Math.abs(parseFloat(lastBucket[1])))
+ );
expect(calculateDefaultExpBucketWidth(lastBucket, bucketsAllNegative)).toBeCloseTo(expectedAbs);
});
it('uses the previous bucket if the last bucket is [0, 0]', () => {
- const lastBucket = bucketsWithZeroFallback[bucketsWithZeroFallback.length - 1];
- const expected = Math.log(100) - Math.log(10);
- expect(calculateDefaultExpBucketWidth(lastBucket, bucketsWithZeroFallback)).toBeCloseTo(expected);
+ const lastBucket = bucketsWithZeroFallback[bucketsWithZeroFallback.length - 1];
+ const expected = Math.log(100) - Math.log(10);
+ expect(calculateDefaultExpBucketWidth(lastBucket, bucketsWithZeroFallback)).toBeCloseTo(expected);
});
it('throws an error if only a single [0, 0] bucket exists', () => {
- const lastBucket = singleZeroBucket[0];
- expect(() => calculateDefaultExpBucketWidth(lastBucket, singleZeroBucket)).toThrow(
- 'Only one bucket in histogram ([-0, 0]). Cannot calculate defaultExpBucketWidth.'
- );
+ const lastBucket = singleZeroBucket[0];
+ expect(() => calculateDefaultExpBucketWidth(lastBucket, singleZeroBucket)).toThrow(
+ 'Only one bucket in histogram ([-0, 0]). Cannot calculate defaultExpBucketWidth.'
+ );
});
});
-
describe('findMinPositive', () => {
- it('returns the first positive left bound when all are positive', () => {
- expect(findMinPositive(bucketsAllPositive)).toEqual(1);
- });
+ it('returns the first positive left bound when all are positive', () => {
+ expect(findMinPositive(bucketsAllPositive)).toEqual(1);
+ });
it('returns the left bound when it is the first positive value', () => {
expect(findMinPositive(bucketsNegThenPosNoCross)).toBe(5);
@@ -108,43 +106,42 @@ describe('HistogramHelpers', () => {
});
it('returns the right bound when the first bucket crosses zero', () => {
- expect(findMinPositive(bucketsStartingWithZeroCross)).toBe(1);
+ expect(findMinPositive(bucketsStartingWithZeroCross)).toBe(1);
});
it('returns the right bound when the last bucket crosses zero', () => {
expect(findMinPositive(bucketsEndingWithZeroCross)).toBe(1);
});
- it('returns 0 when all buckets are negative', () => {
- expect(findMinPositive(bucketsAllNegative)).toBe(0);
- });
+ it('returns 0 when all buckets are negative', () => {
+ expect(findMinPositive(bucketsAllNegative)).toBe(0);
+ });
it('returns 0 for empty buckets', () => {
expect(findMinPositive(emptyBuckets)).toBe(0);
});
- it('returns 0 for only zero bucket', () => {
- expect(findMinPositive(singleZeroBucket)).toBe(0);
- });
+ it('returns 0 for only zero bucket', () => {
+ expect(findMinPositive(singleZeroBucket)).toBe(0);
+ });
it('returns 0 when buckets is undefined', () => {
expect(findMinPositive(undefined as any)).toBe(0);
});
- it('returns the correct positive bound with exact zero bucket present', () => {
- expect(findMinPositive(bucketsWithExactZeroBucket)).toBe(1);
- });
+ it('returns the correct positive bound with exact zero bucket present', () => {
+ expect(findMinPositive(bucketsWithExactZeroBucket)).toBe(1);
+ });
});
-
describe('findMaxNegative', () => {
- it('returns 0 when all buckets are positive', () => {
- expect(findMaxNegative(bucketsAllPositive)).toBe(0);
- });
+ it('returns 0 when all buckets are positive', () => {
+ expect(findMaxNegative(bucketsAllPositive)).toBe(0);
+ });
- it('returns the right bound of the last negative bucket when all are negative', () => {
- expect(findMaxNegative(bucketsAllNegative)).toEqual(-1);
- });
+ it('returns the right bound of the last negative bucket when all are negative', () => {
+ expect(findMaxNegative(bucketsAllNegative)).toEqual(-1);
+ });
it('returns the right bound of the bucket before the middle zero-crossing bucket', () => {
expect(findMaxNegative(bucketsCrossingZeroMid)).toEqual(-1);
@@ -155,7 +152,7 @@ describe('HistogramHelpers', () => {
});
it('returns the right bound of the bucket before the last zero-crossing bucket', () => {
- expect(findMaxNegative(bucketsEndingWithZeroCross)).toEqual(-1);
+ expect(findMaxNegative(bucketsEndingWithZeroCross)).toEqual(-1);
});
it('returns 0 for empty buckets', () => {
@@ -171,23 +168,28 @@ describe('HistogramHelpers', () => {
});
it('returns the right bound of the bucket before an exact zero bucket', () => {
- expect(findMaxNegative(bucketsWithExactZeroBucket)).toEqual(-1);
+ expect(findMaxNegative(bucketsWithExactZeroBucket)).toEqual(-1);
});
});
-
describe('findZeroBucket', () => {
it('returns the index of bucket strictly containing zero', () => {
expect(findZeroBucket(bucketsCrossingZeroMid)).toBe(2);
});
it('returns the index of bucket with zero as left boundary', () => {
- const buckets: Bucket[] = [[0, '-5','-1', '10'], [0, '0', '5', '15']];
+ const buckets: Bucket[] = [
+ [0, '-5', '-1', '10'],
+ [0, '0', '5', '15'],
+ ];
expect(findZeroBucket(buckets)).toBe(1);
});
it('returns the index of bucket with zero as right boundary', () => {
- const buckets: Bucket[] = [[0, '-5', '0', '10'], [0, '1', '5', '15']];
+ const buckets: Bucket[] = [
+ [0, '-5', '0', '10'],
+ [0, '1', '5', '15'],
+ ];
expect(findZeroBucket(buckets)).toBe(0);
});
@@ -208,49 +210,51 @@ describe('HistogramHelpers', () => {
});
it('returns 0 if the first bucket crosses zero', () => {
- expect(findZeroBucket(bucketsStartingWithZeroCross)).toBe(0);
+ expect(findZeroBucket(bucketsStartingWithZeroCross)).toBe(0);
});
- it('returns the last index if the last bucket crosses zero', () => {
- expect(findZeroBucket(bucketsEndingWithZeroCross)).toBe(2);
- });
+ it('returns the last index if the last bucket crosses zero', () => {
+ expect(findZeroBucket(bucketsEndingWithZeroCross)).toBe(2);
+ });
it('returns -1 when buckets array is empty', () => {
expect(findZeroBucket(emptyBuckets)).toBe(-1);
});
});
-
describe('findZeroAxisLeft', () => {
it('calculates correctly for linear scale crossing zero', () => {
- const rangeMin = -100; const rangeMax = 100;
+ const rangeMin = -100;
+ const rangeMax = 100;
const expected = '50%';
const result = findZeroAxisLeft('linear', rangeMin, rangeMax, 1, -1, 2, 0, 0, 0);
expect(result).toEqual(expected);
});
- it('calculates correctly for asymmetric linear scale crossing zero', () => {
- const rangeMin = -10; const rangeMax = 90;
- const expectedNumber = ((0 - rangeMin) / (rangeMax - rangeMin)) * 100;
- const resultString = findZeroAxisLeft('linear', rangeMin, rangeMax, 1, -1, 0, 0, 0, 0);
- expect(parseFloat(resultString)).toBeCloseTo(expectedNumber, 1);
- });
+ it('calculates correctly for asymmetric linear scale crossing zero', () => {
+ const rangeMin = -10;
+ const rangeMax = 90;
+ const expectedNumber = ((0 - rangeMin) / (rangeMax - rangeMin)) * 100;
+ const resultString = findZeroAxisLeft('linear', rangeMin, rangeMax, 1, -1, 0, 0, 0, 0);
+ expect(parseFloat(resultString)).toBeCloseTo(expectedNumber, 1);
+ });
it('calculates correctly for linear scale all positive (off-scale left)', () => {
- const rangeMin = 10; const rangeMax = 100;
+ const rangeMin = 10;
+ const rangeMax = 100;
const expectedNumber = ((0 - rangeMin) / (rangeMax - rangeMin)) * 100;
const resultString = findZeroAxisLeft('linear', rangeMin, rangeMax, 10, 0, -1, 0, 0, 0);
expect(parseFloat(resultString)).toBeCloseTo(expectedNumber, 1);
});
it('calculates correctly for linear scale all negative (off-scale right)', () => {
- const rangeMin = -100; const rangeMax = -10;
+ const rangeMin = -100;
+ const rangeMax = -10;
const expectedNumber = ((0 - rangeMin) / (rangeMax - rangeMin)) * 100;
const resultString = findZeroAxisLeft('linear', rangeMin, rangeMax, 0, -10, -1, 0, 0, 0);
expect(parseFloat(resultString)).toBeCloseTo(expectedNumber, 1);
});
-
const expMinPos = 1;
const expMaxNeg = -1;
const expZeroIdx = 2;
@@ -264,22 +268,46 @@ describe('HistogramHelpers', () => {
});
it('returns 100% for exponential scale when minPositive is 0', () => {
- expect(findZeroAxisLeft('exponential', -100, -1, 0, -1, -1, expNegWidth, expNegWidth + defaultExpBW, defaultExpBW)).toEqual('100%');
+ expect(
+ findZeroAxisLeft('exponential', -100, -1, 0, -1, -1, expNegWidth, expNegWidth + defaultExpBW, defaultExpBW)
+ ).toEqual('100%');
});
it('calculates position between buckets when zeroBucketIdx is -1 (exponential)', () => {
- const minPos = 5; const maxNeg = -1; const zeroIdx = -1;
+ const minPos = 5;
+ const maxNeg = -1;
+ const zeroIdx = -1;
const negW = Math.log(Math.abs(-1)) - Math.log(Math.abs(-10));
const posW = Math.log(10) - Math.log(5);
const totalW = Math.abs(negW) + posW + defaultExpBW;
const expectedNumber = (Math.abs(negW) / totalW) * 100;
- const resultString = findZeroAxisLeft('exponential', -10, 10, minPos, maxNeg, zeroIdx, Math.abs(negW), totalW, defaultExpBW);
+ const resultString = findZeroAxisLeft(
+ 'exponential',
+ -10,
+ 10,
+ minPos,
+ maxNeg,
+ zeroIdx,
+ Math.abs(negW),
+ totalW,
+ defaultExpBW
+ );
expect(parseFloat(resultString)).toBeCloseTo(expectedNumber, 1);
});
it('calculates position using bucket width when zeroBucketIdx exists (exponential)', () => {
const expectedNumber = ((expNegWidth + 0.5 * defaultExpBW) / expTotalWidth) * 100;
- const resultString = findZeroAxisLeft('exponential', -100, 100, expMinPos, expMaxNeg, expZeroIdx, expNegWidth, expTotalWidth, defaultExpBW);
+ const resultString = findZeroAxisLeft(
+ 'exponential',
+ -100,
+ 100,
+ expMinPos,
+ expMaxNeg,
+ expZeroIdx,
+ expNegWidth,
+ expTotalWidth,
+ defaultExpBW
+ );
expect(parseFloat(resultString)).toBeCloseTo(expectedNumber, 1);
});
@@ -288,7 +316,6 @@ describe('HistogramHelpers', () => {
});
});
-
describe('showZeroAxis', () => {
it('returns true when axis is between 5% and 95%', () => {
expect(showZeroAxis('5.01%')).toBe(true);
@@ -308,4 +335,4 @@ describe('HistogramHelpers', () => {
expect(showZeroAxis('120%')).toBe(false);
});
});
-});
\ No newline at end of file
+});
diff --git a/web/ui/react-app/src/utils/utils.test.ts b/web/ui/react-app/src/utils/utils.test.ts
index 93174df87b..61fcd733ab 100644
--- a/web/ui/react-app/src/utils/utils.test.ts
+++ b/web/ui/react-app/src/utils/utils.test.ts
@@ -333,13 +333,13 @@ describe('Utils', () => {
expect(parsePrometheusFloat('-1.7e+01')).toEqual(-17);
});
});
- describe('createExpressionLink',()=>{
- it('<....>builds link',()=>{
+ describe('createExpressionLink', () => {
+ it('<....>builds link', () => {
expect(createExpressionLink('up')).toEqual(
`../graph?g0.expr=up&g0.tab=1&g0.display_mode=${GraphDisplayMode.Lines}&g0.show_exemplars=0&g0.range_input=1h`
);
});
- it('url-encodes PromQL',() =>{
+ it('url-encodes PromQL', () => {
expect(createExpressionLink('ALERTS{alertname="HighCPU"}')).toEqual(
`../graph?g0.expr=ALERTS%7Balertname%3D%22High%20CPU%22%7D&g0.tab=1&g0.display_mode=${GraphDisplayMode.Lines}&g0.show_exemplars=0&g0.range_input=1h`
);
From eaf47798af7bf31821c1b2b3c6eac36513ff676c Mon Sep 17 00:00:00 2001
From: Julien <291750+roidelapluie@users.noreply.github.com>
Date: Fri, 6 Feb 2026 09:30:15 +0100
Subject: [PATCH 123/165] promql: fix panic with @ modifier on empty ranges
(#18020)
When using the @ modifier with a timestamp that has no data, several
PromQL range functions were panicking with "index out of range [0]
with length 0". This was introduced by #16797 which changed function
signatures to use concrete types instead of interfaces.
The panic occurred because functions were accessing array elements
(matrixVal[0], vectorVals[0][0]) without checking if the arrays were
empty first.
Fixes #18018
Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
---
promql/functions.go | 57 +++++++++++++++++++++
promql/promqltest/testdata/at_modifier.test | 40 +++++++++++++++
2 files changed, 97 insertions(+)
diff --git a/promql/functions.go b/promql/functions.go
index 3a6bc3348d..04a3d55370 100644
--- a/promql/functions.go
+++ b/promql/functions.go
@@ -557,6 +557,9 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 {
// trend factor increases the influence. of trends. Algorithm taken from
// https://en.wikipedia.org/wiki/Exponential_smoothing .
func funcDoubleExponentialSmoothing(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(vectorVals) < 2 || len(vectorVals[0]) == 0 || len(vectorVals[1]) == 0 || len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
samples := matrixVal[0]
// The smoothing factor argument.
sf := vectorVals[0][0].F
@@ -771,12 +774,18 @@ func funcScalar(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNo
}
func aggrOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector {
+ if len(matrixVal) == 0 {
+ return enh.Out
+ }
el := matrixVal[0]
return append(enh.Out, Sample{F: aggrFn(el)})
}
func aggrHistOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
el := matrixVal[0]
res, err := aggrFn(el)
@@ -785,6 +794,9 @@ func aggrHistOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series)
// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
firstSeries := matrixVal[0]
if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 {
return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(firstSeries.Metric), args[0].PositionRange()))
@@ -910,6 +922,9 @@ func funcCountOverTime(_ []Vector, matrixVals Matrix, _ parser.Expressions, enh
// === first_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcFirstOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
el := matrixVal[0]
var f FPoint
@@ -938,6 +953,9 @@ func funcFirstOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *
// === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
el := matrixVal[0]
var f FPoint
@@ -964,6 +982,9 @@ func funcLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *E
// === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcMadOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
samples := matrixVal[0]
var annos annotations.Annotations
if len(samples.Floats) == 0 {
@@ -988,6 +1009,9 @@ func funcMadOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
// === ts_of_first_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcTsOfFirstOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
el := matrixVal[0]
var tf int64 = math.MaxInt64
@@ -1008,6 +1032,9 @@ func funcTsOfFirstOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, e
// === ts_of_last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcTsOfLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
el := matrixVal[0]
var tf int64
@@ -1042,6 +1069,9 @@ func funcTsOfMinOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions,
// compareOverTime is a helper used by funcMaxOverTime and funcMinOverTime.
func compareOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper, compareFn func(float64, float64) bool, returnTimestamp bool) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
samples := matrixVal[0]
var annos annotations.Annotations
if len(samples.Floats) == 0 {
@@ -1082,6 +1112,9 @@ func funcMinOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh
// === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
firstSeries := matrixVal[0]
if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 {
return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(firstSeries.Metric), args[0].PositionRange()))
@@ -1145,6 +1178,9 @@ func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
// === quantile_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcQuantileOverTime(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(vectorVals) == 0 || len(vectorVals[0]) == 0 || len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
q := vectorVals[0][0].F
el := matrixVal[0]
if len(el.Floats) == 0 {
@@ -1166,6 +1202,9 @@ func funcQuantileOverTime(vectorVals []Vector, matrixVal Matrix, args parser.Exp
}
func varianceOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
samples := matrixVal[0]
var annos annotations.Annotations
if len(samples.Floats) == 0 {
@@ -1461,6 +1500,9 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f
// === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcDeriv(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
samples := matrixVal[0]
// No sense in trying to compute a derivative without at least two float points.
@@ -1485,6 +1527,9 @@ func funcDeriv(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalN
// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) (Vector, Annotations) ===
func funcPredictLinear(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(vectorVals) == 0 || len(vectorVals[0]) == 0 || len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
samples := matrixVal[0]
duration := vectorVals[0][0].F
@@ -1591,6 +1636,9 @@ func funcHistogramStdVar(vectorVals []Vector, _ Matrix, _ parser.Expressions, en
// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramFraction(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(vectorVals) < 3 || len(vectorVals[0]) == 0 || len(vectorVals[1]) == 0 {
+ return enh.Out, nil
+ }
lower := vectorVals[0][0].F
upper := vectorVals[1][0].F
inVec := vectorVals[2]
@@ -1636,6 +1684,9 @@ func funcHistogramFraction(vectorVals []Vector, _ Matrix, args parser.Expression
// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(vectorVals) < 2 || len(vectorVals[0]) == 0 {
+ return enh.Out, nil
+ }
q := vectorVals[0][0].F
inVec := vectorVals[1]
var annos annotations.Annotations
@@ -1709,6 +1760,9 @@ func pickFirstSampleIndex(floats []FPoint, args parser.Expressions, enh *EvalNod
// === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcResets(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
floats := matrixVal[0].Floats
histograms := matrixVal[0].Histograms
resets := 0
@@ -1758,6 +1812,9 @@ func funcResets(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *Eval
// === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcChanges(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
floats := matrixVal[0].Floats
histograms := matrixVal[0].Histograms
changes := 0
diff --git a/promql/promqltest/testdata/at_modifier.test b/promql/promqltest/testdata/at_modifier.test
index 4091f7eabf..194c877803 100644
--- a/promql/promqltest/testdata/at_modifier.test
+++ b/promql/promqltest/testdata/at_modifier.test
@@ -215,3 +215,43 @@ eval instant at 0s sum_over_time(timestamp(timestamp(metric{job="1"} @ 999))[10s
clear
+
+# Tests for @ modifier with empty data.
+# Data only at 0s, 10s, 20s. Eval at timestamp with no data.
+load 10s
+ up 1 2 3
+
+# Functions that should return empty results when @ modifier points to timestamp with no data.
+# These were panicking before the fix.
+
+eval instant at 1111111s quantile_over_time(scalar(up) + 1, {__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s predict_linear({__name__="up"}[1h:1m] @ 1111111, 0.1)
+
+eval instant at 1111111s deriv({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s changes({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s resets({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s first_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s last_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s sum_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s avg_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s min_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s max_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s count_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s stddev_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s stdvar_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s mad_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+clear
From fe5cb190e67f8f0cf93e70188ec2af623685a52c Mon Sep 17 00:00:00 2001
From: Ganesh Vernekar
Date: Fri, 6 Feb 2026 01:05:56 -0800
Subject: [PATCH 124/165] tsdb: Add metrics for stale series compaction
(#17957)
Signed-off-by: Ganesh Vernekar
---
tsdb/db.go | 65 ++++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 48 insertions(+), 17 deletions(-)
diff --git a/tsdb/db.go b/tsdb/db.go
index 3e98b1e8d9..1d73628bfd 100644
--- a/tsdb/db.go
+++ b/tsdb/db.go
@@ -331,20 +331,23 @@ type DB struct {
}
type dbMetrics struct {
- loadedBlocks prometheus.GaugeFunc
- symbolTableSize prometheus.GaugeFunc
- reloads prometheus.Counter
- reloadsFailed prometheus.Counter
- compactionsFailed prometheus.Counter
- compactionsTriggered prometheus.Counter
- compactionsSkipped prometheus.Counter
- sizeRetentionCount prometheus.Counter
- timeRetentionCount prometheus.Counter
- startTime prometheus.GaugeFunc
- tombCleanTimer prometheus.Histogram
- blocksBytes prometheus.Gauge
- maxBytes prometheus.Gauge
- retentionDuration prometheus.Gauge
+ loadedBlocks prometheus.GaugeFunc
+ symbolTableSize prometheus.GaugeFunc
+ reloads prometheus.Counter
+ reloadsFailed prometheus.Counter
+ compactionsFailed prometheus.Counter
+ compactionsTriggered prometheus.Counter
+ compactionsSkipped prometheus.Counter
+ sizeRetentionCount prometheus.Counter
+ timeRetentionCount prometheus.Counter
+ startTime prometheus.GaugeFunc
+ tombCleanTimer prometheus.Histogram
+ blocksBytes prometheus.Gauge
+ maxBytes prometheus.Gauge
+ retentionDuration prometheus.Gauge
+ staleSeriesCompactionsTriggered prometheus.Counter
+ staleSeriesCompactionsFailed prometheus.Counter
+ staleSeriesCompactionDuration prometheus.Histogram
}
func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
@@ -429,6 +432,22 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
Name: "prometheus_tsdb_size_retentions_total",
Help: "The number of times that blocks were deleted because the maximum number of bytes was exceeded.",
})
+ m.staleSeriesCompactionsTriggered = prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "prometheus_tsdb_stale_series_compactions_triggered_total",
+ Help: "Total number of triggered stale series compactions.",
+ })
+ m.staleSeriesCompactionsFailed = prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "prometheus_tsdb_stale_series_compactions_failed_total",
+ Help: "Total number of stale series compactions that failed.",
+ })
+ m.staleSeriesCompactionDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Name: "prometheus_tsdb_stale_series_compaction_duration_seconds",
+ Help: "Duration of stale series compaction runs.",
+ Buckets: prometheus.ExponentialBuckets(1, 2, 14),
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ })
if r != nil {
r.MustRegister(
@@ -446,6 +465,9 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
m.blocksBytes,
m.maxBytes,
m.retentionDuration,
+ m.staleSeriesCompactionsTriggered,
+ m.staleSeriesCompactionsFailed,
+ m.staleSeriesCompactionDuration,
)
}
return m
@@ -1624,9 +1646,16 @@ func (db *DB) compactHead(head *RangeHead) error {
return nil
}
-func (db *DB) CompactStaleHead() error {
+func (db *DB) CompactStaleHead() (err error) {
db.cmtx.Lock()
- defer db.cmtx.Unlock()
+ defer func() {
+ db.cmtx.Unlock()
+ if err != nil {
+ db.metrics.staleSeriesCompactionsFailed.Inc()
+ }
+ }()
+
+ db.metrics.staleSeriesCompactionsTriggered.Inc()
db.logger.Info("Starting stale series compaction")
start := time.Now()
@@ -1666,7 +1695,9 @@ func (db *DB) CompactStaleHead() error {
}
db.head.RebuildSymbolTable(db.logger)
- db.logger.Info("Ending stale series compaction", "num_series", meta.Stats.NumSeries, "duration", time.Since(start))
+ elapsed := time.Since(start)
+ db.metrics.staleSeriesCompactionDuration.Observe(elapsed.Seconds())
+ db.logger.Info("Ending stale series compaction", "num_series", len(staleSeriesRefs), "duration", elapsed)
return nil
}
From 5e46e777547344c15d01fd819aaf03ff6331f0b8 Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Fri, 6 Feb 2026 09:51:40 +0000
Subject: [PATCH 125/165] refactor: use Appender mock for otlptranslator tests
(#17999)
Signed-off-by: bwplotka
---
.../combined_appender_test.go | 100 ----
.../prometheusremotewrite/helper_test.go | 217 ++++-----
.../prometheusremotewrite/histograms_test.go | 221 ++++-----
.../metrics_to_prw_test.go | 439 ++++++++++--------
.../number_data_points_test.go | 141 +++---
5 files changed, 533 insertions(+), 585 deletions(-)
delete mode 100644 storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go
deleted file mode 100644
index 69d11ed6bd..0000000000
--- a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheusremotewrite
-
-import (
- "errors"
- "testing"
-
- "github.com/google/go-cmp/cmp"
-
- "github.com/prometheus/prometheus/model/exemplar"
- "github.com/prometheus/prometheus/model/histogram"
- "github.com/prometheus/prometheus/model/labels"
- "github.com/prometheus/prometheus/model/metadata"
- "github.com/prometheus/prometheus/storage"
- "github.com/prometheus/prometheus/util/testutil"
-)
-
-// TODO(bwplotka): Move to teststorage.Appendable. This require slight refactor of tests and I couldn't do this before
-// switching to AppenderV2 (I would need to adjust AppenderV1 mock exemplar flow which is pointless since we don't plan
-// to use it). For now keeping tests diff small for confidence.
-type mockCombinedAppender struct {
- pendingSamples []combinedSample
- pendingHistograms []combinedHistogram
-
- samples []combinedSample
- histograms []combinedHistogram
-}
-
-type combinedSample struct {
- metricFamilyName string
- ls labels.Labels
- meta metadata.Metadata
- t int64
- st int64
- v float64
- es []exemplar.Exemplar
-}
-
-type combinedHistogram struct {
- metricFamilyName string
- ls labels.Labels
- meta metadata.Metadata
- t int64
- st int64
- h *histogram.Histogram
- es []exemplar.Exemplar
-}
-
-func (m *mockCombinedAppender) Append(_ storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, _ *histogram.FloatHistogram, opts storage.AOptions) (_ storage.SeriesRef, err error) {
- if h != nil {
- m.pendingHistograms = append(m.pendingHistograms, combinedHistogram{
- metricFamilyName: opts.MetricFamilyName,
- ls: ls,
- meta: opts.Metadata,
- t: t,
- st: st,
- h: h,
- es: opts.Exemplars,
- })
- return 0, nil
- }
- m.pendingSamples = append(m.pendingSamples, combinedSample{
- metricFamilyName: opts.MetricFamilyName,
- ls: ls,
- meta: opts.Metadata,
- t: t,
- st: st,
- v: v,
- es: opts.Exemplars,
- })
- return 0, nil
-}
-
-func (m *mockCombinedAppender) Commit() error {
- m.samples = append(m.samples, m.pendingSamples...)
- m.pendingSamples = m.pendingSamples[:0]
- m.histograms = append(m.histograms, m.pendingHistograms...)
- m.pendingHistograms = m.pendingHistograms[:0]
- return nil
-}
-
-func (*mockCombinedAppender) Rollback() error {
- return errors.New("not implemented")
-}
-
-func requireEqual(t testing.TB, expected, actual any, msgAndArgs ...any) {
- testutil.RequireEqualWithOptions(t, expected, actual, []cmp.Option{cmp.AllowUnexported(combinedSample{}, combinedHistogram{})}, msgAndArgs...)
-}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
index 3b5a1c4b34..c3fecc813b 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
@@ -34,9 +34,12 @@ import (
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil"
)
+type sample = teststorage.Sample
+
func TestPrometheusConverter_createAttributes(t *testing.T) {
resourceAttrs := map[string]string{
"service.name": "service name",
@@ -403,7 +406,7 @@ func TestPrometheusConverter_createAttributes(t *testing.T) {
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- c := NewPrometheusConverter(&mockCombinedAppender{})
+ c := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
settings := Settings{
PromoteResourceAttributes: NewPromoteResourceAttributes(config.OTLPConfig{
PromoteAllResourceAttributes: tc.promoteAllResourceAttributes,
@@ -450,8 +453,7 @@ func TestPrometheusConverter_createAttributes(t *testing.T) {
attrsWithNameLabel.PutStr("__name__", "wrong_metric_name")
attrsWithNameLabel.PutStr("other_attr", "value")
- mockAppender := &mockCombinedAppender{}
- c := NewPrometheusConverter(mockAppender)
+ c := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
settings := Settings{}
require.NoError(t, c.setResourceContext(resource, settings))
@@ -496,8 +498,7 @@ func TestPrometheusConverter_createAttributes(t *testing.T) {
attrsWithTypeAndUnit.PutStr(model.MetricUnitLabel, "wrong_unit")
attrsWithTypeAndUnit.PutStr("other_attr", "value")
- mockAppender := &mockCombinedAppender{}
- c := NewPrometheusConverter(mockAppender)
+ c := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
settings := Settings{EnableTypeAndUnitLabels: true}
require.NoError(t, c.setResourceContext(resource, settings))
@@ -577,7 +578,7 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
metric func() pmetric.Metric
scope scope
promoteScope bool
- want func() []combinedSample
+ want func() []sample
}{
{
name: "summary with start time and without scope promotion",
@@ -594,25 +595,25 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
- return []combinedSample{
+ want: func() []sample {
+ return []sample{
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary"+sumStr,
),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary"+countStr,
),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -632,7 +633,7 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: true,
- want: func() []combinedSample {
+ want: func() []sample {
scopeLabels := []string{
"otel_scope_attr1", "value1",
"otel_scope_attr2", "value2",
@@ -640,22 +641,22 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
"otel_scope_schema_url", defaultScope.schemaURL,
"otel_scope_version", defaultScope.version,
}
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(append(scopeLabels,
+ MF: "test_summary",
+ L: labels.FromStrings(append(scopeLabels,
model.MetricNameLabel, "test_summary"+sumStr)...),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(append(scopeLabels,
+ MF: "test_summary",
+ L: labels.FromStrings(append(scopeLabels,
model.MetricNameLabel, "test_summary"+countStr)...),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -674,23 +675,23 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
- return []combinedSample{
+ want: func() []sample {
+ return []sample{
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary"+sumStr,
),
- t: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ V: 0,
},
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary"+countStr,
),
- t: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -718,41 +719,41 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
- return []combinedSample{
+ want: func() []sample {
+ return []sample{
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary"+sumStr,
),
- t: convertTimeStamp(ts),
- v: 100,
+ T: convertTimeStamp(ts),
+ V: 100,
},
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary"+countStr,
),
- t: convertTimeStamp(ts),
- v: 50,
+ T: convertTimeStamp(ts),
+ V: 50,
},
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary",
quantileStr, "0.5",
),
- t: convertTimeStamp(ts),
- v: 30,
+ T: convertTimeStamp(ts),
+ V: 30,
},
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary",
quantileStr, "0.9",
),
- t: convertTimeStamp(ts),
- v: 40,
+ T: convertTimeStamp(ts),
+ V: 40,
},
}
},
@@ -761,8 +762,9 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
metric := tt.metric()
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
settings := Settings{
PromoteScopeMetadata: tt.promoteScope,
}
@@ -772,17 +774,16 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
require.NoError(t, converter.setResourceContext(resource, settings))
require.NoError(t, converter.setScopeContext(tt.scope, settings))
- converter.addSummaryDataPoints(
+ require.NoError(t, converter.addSummaryDataPoints(
context.Background(),
metric.Summary().DataPoints(),
settings,
storage.AOptions{
MetricFamilyName: metric.Name(),
},
- )
- require.NoError(t, mockAppender.Commit())
-
- requireEqual(t, tt.want(), mockAppender.samples)
+ ))
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tt.want(), appTest.ResultSamples())
})
}
}
@@ -805,7 +806,7 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
metric func() pmetric.Metric
scope scope
promoteScope bool
- want func() []combinedSample
+ want func() []sample
}{
{
name: "histogram with start time and without scope promotion",
@@ -822,26 +823,26 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
- return []combinedSample{
+ want: func() []sample {
+ return []sample{
{
- metricFamilyName: "test_hist",
- ls: labels.FromStrings(
+ MF: "test_hist",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_hist"+countStr,
),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
{
- metricFamilyName: "test_hist",
- ls: labels.FromStrings(
+ MF: "test_hist",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_hist_bucket",
model.BucketLabel, "+Inf",
),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -861,7 +862,7 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: true,
- want: func() []combinedSample {
+ want: func() []sample {
scopeLabels := []string{
"otel_scope_attr1", "value1",
"otel_scope_attr2", "value2",
@@ -869,23 +870,23 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
"otel_scope_schema_url", defaultScope.schemaURL,
"otel_scope_version", defaultScope.version,
}
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test_hist",
- ls: labels.FromStrings(append(scopeLabels,
+ MF: "test_hist",
+ L: labels.FromStrings(append(scopeLabels,
model.MetricNameLabel, "test_hist"+countStr)...),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
{
- metricFamilyName: "test_hist",
- ls: labels.FromStrings(append(scopeLabels,
+ MF: "test_hist",
+ L: labels.FromStrings(append(scopeLabels,
model.MetricNameLabel, "test_hist_bucket",
model.BucketLabel, "+Inf")...),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -902,24 +903,24 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
return metric
},
- want: func() []combinedSample {
- return []combinedSample{
+ want: func() []sample {
+ return []sample{
{
- metricFamilyName: "test_hist",
- ls: labels.FromStrings(
+ MF: "test_hist",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_hist"+countStr,
),
- t: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ V: 0,
},
{
- metricFamilyName: "test_hist",
- ls: labels.FromStrings(
+ MF: "test_hist",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_hist_bucket",
model.BucketLabel, "+Inf",
),
- t: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -928,8 +929,9 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
metric := tt.metric()
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
settings := Settings{
PromoteScopeMetadata: tt.promoteScope,
}
@@ -939,24 +941,23 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
require.NoError(t, converter.setResourceContext(resource, settings))
require.NoError(t, converter.setScopeContext(tt.scope, settings))
- converter.addHistogramDataPoints(
+ require.NoError(t, converter.addHistogramDataPoints(
context.Background(),
metric.Histogram().DataPoints(),
settings,
storage.AOptions{
MetricFamilyName: metric.Name(),
},
- )
- require.NoError(t, mockAppender.Commit())
-
- requireEqual(t, tt.want(), mockAppender.samples)
+ ))
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tt.want(), appTest.ResultSamples())
})
}
}
func TestGetPromExemplars(t *testing.T) {
ctx := context.Background()
- c := NewPrometheusConverter(&mockCombinedAppender{})
+ c := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
t.Run("Exemplars with int value", func(t *testing.T) {
es := pmetric.NewExemplarSlice()
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
index 58d7c4e835..5422796002 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
@@ -33,6 +33,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/teststorage"
)
type expectedBucketLayout struct {
@@ -383,8 +384,8 @@ func TestConvertBucketsLayout(t *testing.T) {
for scaleDown, wantLayout := range tt.wantLayout {
t.Run(fmt.Sprintf("%s-scaleby-%d", tt.name, scaleDown), func(t *testing.T) {
gotSpans, gotDeltas := convertBucketsLayout(tt.buckets().BucketCounts().AsRaw(), tt.buckets().Offset(), scaleDown, true)
- requireEqual(t, wantLayout.wantSpans, gotSpans)
- requireEqual(t, wantLayout.wantDeltas, gotDeltas)
+ require.Equal(t, wantLayout.wantSpans, gotSpans)
+ require.Equal(t, wantLayout.wantDeltas, gotDeltas)
})
}
}
@@ -634,7 +635,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
metric func() pmetric.Metric
scope scope
promoteScope bool
- wantSeries func() []combinedHistogram
+ wantSeries func() []sample
}{
{
name: "histogram data points with same labels and without scope promotion",
@@ -663,19 +664,19 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- wantSeries: func() []combinedHistogram {
+ wantSeries: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_hist",
"attr", "test_attr",
)
- return []combinedHistogram{
+ return []sample{
{
- metricFamilyName: "test_hist",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 7,
Schema: 1,
ZeroThreshold: defaultZeroThreshold,
@@ -683,15 +684,15 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
PositiveBuckets: []int64{4, -2},
},
- es: []exemplar.Exemplar{{Value: 1}},
+ ES: []exemplar.Exemplar{{Value: 1}},
},
{
- metricFamilyName: "test_hist",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 4,
Schema: 1,
ZeroThreshold: defaultZeroThreshold,
@@ -699,7 +700,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}},
PositiveBuckets: []int64{4, -2, -1},
},
- es: []exemplar.Exemplar{{Value: 2}},
+ ES: []exemplar.Exemplar{{Value: 2}},
},
}
},
@@ -731,7 +732,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: true,
- wantSeries: func() []combinedHistogram {
+ wantSeries: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_hist",
"attr", "test_attr",
@@ -741,14 +742,14 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
"otel_scope_attr1", "value1",
"otel_scope_attr2", "value2",
)
- return []combinedHistogram{
+ return []sample{
{
- metricFamilyName: "test_hist",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 7,
Schema: 1,
ZeroThreshold: defaultZeroThreshold,
@@ -756,15 +757,15 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
PositiveBuckets: []int64{4, -2},
},
- es: []exemplar.Exemplar{{Value: 1}},
+ ES: []exemplar.Exemplar{{Value: 1}},
},
{
- metricFamilyName: "test_hist",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 4,
Schema: 1,
ZeroThreshold: defaultZeroThreshold,
@@ -772,7 +773,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}},
PositiveBuckets: []int64{4, -2, -1},
},
- es: []exemplar.Exemplar{{Value: 2}},
+ ES: []exemplar.Exemplar{{Value: 2}},
},
}
},
@@ -804,7 +805,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- wantSeries: func() []combinedHistogram {
+ wantSeries: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_hist",
"attr", "test_attr",
@@ -814,14 +815,14 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
"attr", "test_attr_two",
)
- return []combinedHistogram{
+ return []sample{
{
- metricFamilyName: "test_hist",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 7,
Schema: 1,
ZeroThreshold: defaultZeroThreshold,
@@ -829,15 +830,15 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
PositiveBuckets: []int64{4, -2},
},
- es: []exemplar.Exemplar{{Value: 1}},
+ ES: []exemplar.Exemplar{{Value: 1}},
},
{
- metricFamilyName: "test_hist",
- ls: labelsAnother,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist",
+ L: labelsAnother,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 4,
Schema: 1,
ZeroThreshold: defaultZeroThreshold,
@@ -845,7 +846,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
NegativeSpans: []histogram.Span{{Offset: 0, Length: 3}},
NegativeBuckets: []int64{4, -2, -1},
},
- es: []exemplar.Exemplar{{Value: 2}},
+ ES: []exemplar.Exemplar{{Value: 2}},
},
}
},
@@ -855,8 +856,9 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
metric := tt.metric()
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
namer := otlptranslator.MetricNamer{
WithMetricSuffixes: true,
}
@@ -883,9 +885,8 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
-
- requireEqual(t, tt.wantSeries(), mockAppender.histograms)
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tt.wantSeries(), appTest.ResultSamples())
})
}
}
@@ -1112,7 +1113,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
metric func() pmetric.Metric
scope scope
promoteScope bool
- wantSeries func() []combinedHistogram
+ wantSeries func() []sample
}{
{
name: "histogram data points with same labels and without scope promotion",
@@ -1141,19 +1142,19 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- wantSeries: func() []combinedHistogram {
+ wantSeries: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_hist_to_nhcb",
"attr", "test_attr",
)
- return []combinedHistogram{
+ return []sample{
{
- metricFamilyName: "test_hist_to_nhcb",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist_to_nhcb",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 3,
Sum: 3,
Schema: -53,
@@ -1161,15 +1162,15 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
PositiveBuckets: []int64{2, -2, 1},
CustomValues: []float64{5, 10},
},
- es: []exemplar.Exemplar{{Value: 1}},
+ ES: []exemplar.Exemplar{{Value: 1}},
},
{
- metricFamilyName: "test_hist_to_nhcb",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist_to_nhcb",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 11,
Sum: 5,
Schema: -53,
@@ -1177,7 +1178,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
PositiveBuckets: []int64{3, 5, -8},
CustomValues: []float64{0, 1},
},
- es: []exemplar.Exemplar{{Value: 2}},
+ ES: []exemplar.Exemplar{{Value: 2}},
},
}
},
@@ -1209,7 +1210,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: true,
- wantSeries: func() []combinedHistogram {
+ wantSeries: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_hist_to_nhcb",
"attr", "test_attr",
@@ -1219,14 +1220,14 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
"otel_scope_attr1", "value1",
"otel_scope_attr2", "value2",
)
- return []combinedHistogram{
+ return []sample{
{
- metricFamilyName: "test_hist_to_nhcb",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist_to_nhcb",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 3,
Sum: 3,
Schema: -53,
@@ -1234,15 +1235,15 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
PositiveBuckets: []int64{2, -2, 1},
CustomValues: []float64{5, 10},
},
- es: []exemplar.Exemplar{{Value: 1}},
+ ES: []exemplar.Exemplar{{Value: 1}},
},
{
- metricFamilyName: "test_hist_to_nhcb",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist_to_nhcb",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 11,
Sum: 5,
Schema: -53,
@@ -1250,7 +1251,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
PositiveBuckets: []int64{3, 5, -8},
CustomValues: []float64{0, 1},
},
- es: []exemplar.Exemplar{{Value: 2}},
+ ES: []exemplar.Exemplar{{Value: 2}},
},
}
},
@@ -1282,7 +1283,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- wantSeries: func() []combinedHistogram {
+ wantSeries: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_hist_to_nhcb",
"attr", "test_attr",
@@ -1292,14 +1293,14 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
"attr", "test_attr_two",
)
- return []combinedHistogram{
+ return []sample{
{
- metricFamilyName: "test_hist_to_nhcb",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist_to_nhcb",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 6,
Sum: 3,
Schema: -53,
@@ -1307,15 +1308,15 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
PositiveBuckets: []int64{4, -2},
CustomValues: []float64{0, 1},
},
- es: []exemplar.Exemplar{{Value: 1}},
+ ES: []exemplar.Exemplar{{Value: 1}},
},
{
- metricFamilyName: "test_hist_to_nhcb",
- ls: labelsAnother,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist_to_nhcb",
+ L: labelsAnother,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 11,
Sum: 5,
Schema: -53,
@@ -1323,7 +1324,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
PositiveBuckets: []int64{3, 5},
CustomValues: []float64{0, 1},
},
- es: []exemplar.Exemplar{{Value: 2}},
+ ES: []exemplar.Exemplar{{Value: 2}},
},
}
},
@@ -1333,8 +1334,9 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
metric := tt.metric()
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
namer := otlptranslator.MetricNamer{
WithMetricSuffixes: true,
}
@@ -1363,9 +1365,8 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
-
- requireEqual(t, tt.wantSeries(), mockAppender.histograms)
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tt.wantSeries(), appTest.ResultSamples())
})
}
}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
index 8ac860a291..647105e640 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
@@ -34,6 +34,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/teststorage"
)
func TestFromMetrics(t *testing.T) {
@@ -79,8 +80,9 @@ func TestFromMetrics(t *testing.T) {
},
} {
t.Run(tc.name, func(t *testing.T) {
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
payload, wantPromMetrics := createExportRequest(5, 128, 128, 2, 0, tc.settings, tc.temporality)
seenFamilyNames := map[string]struct{}{}
for _, wantMetric := range wantPromMetrics {
@@ -102,14 +104,14 @@ func TestFromMetrics(t *testing.T) {
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
+ require.NoError(t, app.Commit())
- ts := mockAppender.samples
- require.Len(t, ts, 1536+1) // +1 for the target_info.
+ got := appTest.ResultSamples()
+ require.Len(t, got, 1536+1) // +1 for the target_info.
tgtInfoCount := 0
- for _, s := range ts {
- lbls := s.ls
+ for _, s := range got {
+ lbls := s.L
if lbls.Get(labels.MetricName) == "target_info" {
tgtInfoCount++
require.Equal(t, "test-namespace/test-service", lbls.Get("job"))
@@ -148,11 +150,14 @@ func TestFromMetrics(t *testing.T) {
h.SetCount(15)
h.SetSum(155)
+ h.BucketCounts().FromRaw([]uint64{3, 11, 0})
+ h.ExplicitBounds().FromRaw([]float64{0.124, 1.123})
generateAttributes(h.Attributes(), "series", 1)
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
annots, err := converter.FromMetrics(
context.Background(),
request.Metrics(),
@@ -160,21 +165,56 @@ func TestFromMetrics(t *testing.T) {
)
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
+ require.NoError(t, app.Commit())
- if convertHistogramsToNHCB {
- require.Len(t, mockAppender.histograms, 1)
- require.Empty(t, mockAppender.samples)
- } else {
- require.Empty(t, mockAppender.histograms)
- require.Len(t, mockAppender.samples, 3)
+ expectedSamples := []sample{
+ {
+ MF: "histogram_1", M: metadata.Metadata{Type: model.MetricTypeHistogram},
+ L: labels.FromStrings("__name__", "histogram_1_sum", "series_name_1", "value-1"),
+ T: ts.AsTime().UnixMilli(), V: 155,
+ },
+ {
+ MF: "histogram_1", M: metadata.Metadata{Type: model.MetricTypeHistogram},
+ L: labels.FromStrings("__name__", "histogram_1_count", "series_name_1", "value-1"),
+ T: ts.AsTime().UnixMilli(), V: 15,
+ },
+ {
+ MF: "histogram_1", M: metadata.Metadata{Type: model.MetricTypeHistogram},
+ L: labels.FromStrings("__name__", "histogram_1_bucket", "le", "0.124", "series_name_1", "value-1"),
+ T: ts.AsTime().UnixMilli(), V: 3,
+ },
+ {
+ MF: "histogram_1", M: metadata.Metadata{Type: model.MetricTypeHistogram},
+ L: labels.FromStrings("__name__", "histogram_1_bucket", "le", "1.123", "series_name_1", "value-1"),
+ T: ts.AsTime().UnixMilli(), V: 14,
+ },
+ {
+ MF: "histogram_1", M: metadata.Metadata{Type: model.MetricTypeHistogram},
+ L: labels.FromStrings("__name__", "histogram_1_bucket", "le", "+Inf", "series_name_1", "value-1"),
+ T: ts.AsTime().UnixMilli(), V: 15,
+ },
}
+ if convertHistogramsToNHCB {
+ expectedSamples = []sample{
+ {
+ MF: "histogram_1", M: metadata.Metadata{Type: model.MetricTypeHistogram},
+ L: labels.FromStrings("__name__", "histogram_1", "series_name_1", "value-1"),
+ T: ts.AsTime().UnixMilli(), H: &histogram.Histogram{
+ Schema: -53, Count: 15, Sum: 155,
+ PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}},
+ PositiveBuckets: []int64{3, 8, -11},
+ CustomValues: []float64{0.124, 1.123},
+ },
+ },
+ }
+ }
+ teststorage.RequireEqual(t, expectedSamples, appTest.ResultSamples())
})
}
t.Run("context cancellation", func(t *testing.T) {
settings := Settings{}
- converter := NewPrometheusConverter(&mockCombinedAppender{})
+ converter := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
ctx, cancel := context.WithCancel(context.Background())
// Verify that converter.FromMetrics respects cancellation.
cancel()
@@ -187,7 +227,7 @@ func TestFromMetrics(t *testing.T) {
t.Run("context timeout", func(t *testing.T) {
settings := Settings{}
- converter := NewPrometheusConverter(&mockCombinedAppender{})
+ converter := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
// Verify that converter.FromMetrics respects timeout.
ctx, cancel := context.WithTimeout(context.Background(), 0)
t.Cleanup(cancel)
@@ -220,7 +260,7 @@ func TestFromMetrics(t *testing.T) {
generateAttributes(h.Attributes(), "series", 10)
}
- converter := NewPrometheusConverter(&mockCombinedAppender{})
+ converter := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
annots, err := converter.FromMetrics(context.Background(), request.Metrics(), Settings{})
require.NoError(t, err)
require.NotEmpty(t, annots)
@@ -253,7 +293,7 @@ func TestFromMetrics(t *testing.T) {
generateAttributes(h.Attributes(), "series", 10)
}
- converter := NewPrometheusConverter(&mockCombinedAppender{})
+ converter := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
annots, err := converter.FromMetrics(
context.Background(),
request.Metrics(),
@@ -301,8 +341,9 @@ func TestFromMetrics(t *testing.T) {
}
}
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
annots, err := converter.FromMetrics(
context.Background(),
request.Metrics(),
@@ -312,8 +353,11 @@ func TestFromMetrics(t *testing.T) {
)
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
- require.Len(t, mockAppender.samples, 22)
+ require.NoError(t, app.Commit())
+
+ got := appTest.ResultSamples()
+ require.Len(t, got, 22)
+
// There should be a target_info sample at the earliest metric timestamp, then two spaced lookback delta/2 apart,
// then one at the latest metric timestamp.
targetInfoLabels := labels.FromStrings(
@@ -330,36 +374,36 @@ func TestFromMetrics(t *testing.T) {
Type: model.MetricTypeGauge,
Help: "Target metadata",
}
- requireEqual(t, []combinedSample{
+ teststorage.RequireEqual(t, []sample{
{
- metricFamilyName: "target_info",
- v: 1,
- t: ts.AsTime().UnixMilli(),
- ls: targetInfoLabels,
- meta: targetInfoMeta,
+ MF: "target_info",
+ V: 1,
+ T: ts.AsTime().UnixMilli(),
+ L: targetInfoLabels,
+ M: targetInfoMeta,
},
{
- metricFamilyName: "target_info",
- v: 1,
- t: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(),
- ls: targetInfoLabels,
- meta: targetInfoMeta,
+ MF: "target_info",
+ V: 1,
+ T: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(),
+ L: targetInfoLabels,
+ M: targetInfoMeta,
},
{
- metricFamilyName: "target_info",
- v: 1,
- t: ts.AsTime().Add(defaultLookbackDelta).UnixMilli(),
- ls: targetInfoLabels,
- meta: targetInfoMeta,
+ MF: "target_info",
+ V: 1,
+ T: ts.AsTime().Add(defaultLookbackDelta).UnixMilli(),
+ L: targetInfoLabels,
+ M: targetInfoMeta,
},
{
- metricFamilyName: "target_info",
- v: 1,
- t: ts.AsTime().Add(defaultLookbackDelta + defaultLookbackDelta/4).UnixMilli(),
- ls: targetInfoLabels,
- meta: targetInfoMeta,
+ MF: "target_info",
+ V: 1,
+ T: ts.AsTime().Add(defaultLookbackDelta + defaultLookbackDelta/4).UnixMilli(),
+ L: targetInfoLabels,
+ M: targetInfoMeta,
},
- }, mockAppender.samples[len(mockAppender.samples)-4:])
+ }, got[len(got)-4:])
})
t.Run("target_info deduplication across multiple resources with same labels", func(t *testing.T) {
@@ -401,8 +445,9 @@ func TestFromMetrics(t *testing.T) {
generateAttributes(point2.Attributes(), "series", 1)
}
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
annots, err := converter.FromMetrics(
context.Background(),
request.Metrics(),
@@ -412,11 +457,11 @@ func TestFromMetrics(t *testing.T) {
)
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
+ require.NoError(t, app.Commit())
- var targetInfoSamples []combinedSample
- for _, s := range mockAppender.samples {
- if s.ls.Get(labels.MetricName) == "target_info" {
+ var targetInfoSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "target_info" {
targetInfoSamples = append(targetInfoSamples, s)
}
}
@@ -437,20 +482,20 @@ func TestFromMetrics(t *testing.T) {
Type: model.MetricTypeGauge,
Help: "Target metadata",
}
- requireEqual(t, []combinedSample{
+ teststorage.RequireEqual(t, []sample{
{
- metricFamilyName: "target_info",
- v: 1,
- t: ts.AsTime().UnixMilli(),
- ls: targetInfoLabels,
- meta: targetInfoMeta,
+ MF: "target_info",
+ V: 1,
+ T: ts.AsTime().UnixMilli(),
+ L: targetInfoLabels,
+ M: targetInfoMeta,
},
{
- metricFamilyName: "target_info",
- v: 1,
- t: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(),
- ls: targetInfoLabels,
- meta: targetInfoMeta,
+ MF: "target_info",
+ V: 1,
+ T: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(),
+ L: targetInfoLabels,
+ M: targetInfoMeta,
},
}, targetInfoSamples)
})
@@ -483,8 +528,9 @@ func TestFromMetrics(t *testing.T) {
point.SetTimestamp(ts)
point.SetDoubleValue(1.0)
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
annots, err := converter.FromMetrics(
context.Background(),
request.Metrics(),
@@ -495,12 +541,12 @@ func TestFromMetrics(t *testing.T) {
)
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
+ require.NoError(t, app.Commit())
// Find target_info samples.
- var targetInfoSamples []combinedSample
- for _, s := range mockAppender.samples {
- if s.ls.Get(labels.MetricName) == "target_info" {
+ var targetInfoSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "target_info" {
targetInfoSamples = append(targetInfoSamples, s)
}
}
@@ -508,22 +554,23 @@ func TestFromMetrics(t *testing.T) {
// Verify target_info does NOT have scope labels.
for _, s := range targetInfoSamples {
- require.Empty(t, s.ls.Get("otel_scope_name"), "target_info should not have otel_scope_name")
- require.Empty(t, s.ls.Get("otel_scope_version"), "target_info should not have otel_scope_version")
- require.Empty(t, s.ls.Get("otel_scope_schema_url"), "target_info should not have otel_scope_schema_url")
- require.Empty(t, s.ls.Get("otel_scope_scope_attr"), "target_info should not have scope attributes")
+ require.Empty(t, s.L.Get("otel_scope_name"), "target_info should not have otel_scope_name")
+ require.Empty(t, s.L.Get("otel_scope_version"), "target_info should not have otel_scope_version")
+ require.Empty(t, s.L.Get("otel_scope_schema_url"), "target_info should not have otel_scope_schema_url")
+ require.Empty(t, s.L.Get("otel_scope_scope_attr"), "target_info should not have scope attributes")
}
// Verify the metric itself DOES have scope labels.
- var metricSamples []combinedSample
- for _, s := range mockAppender.samples {
- if s.ls.Get(labels.MetricName) == "test_gauge" {
+ var metricSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "test_gauge" {
metricSamples = append(metricSamples, s)
}
}
+
require.NotEmpty(t, metricSamples, "expected metric samples")
- require.Equal(t, "my-scope", metricSamples[0].ls.Get("otel_scope_name"), "metric should have otel_scope_name")
- require.Equal(t, "1.0.0", metricSamples[0].ls.Get("otel_scope_version"), "metric should have otel_scope_version")
+ require.Equal(t, "my-scope", metricSamples[0].L.Get("otel_scope_name"), "metric should have otel_scope_name")
+ require.Equal(t, "1.0.0", metricSamples[0].L.Get("otel_scope_version"), "metric should have otel_scope_version")
})
t.Run("target_info should include promoted resource attributes", func(t *testing.T) {
@@ -548,8 +595,9 @@ func TestFromMetrics(t *testing.T) {
point.SetTimestamp(ts)
point.SetDoubleValue(1.0)
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
annots, err := converter.FromMetrics(
context.Background(),
request.Metrics(),
@@ -562,12 +610,12 @@ func TestFromMetrics(t *testing.T) {
)
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
+ require.NoError(t, app.Commit())
// Find target_info samples.
- var targetInfoSamples []combinedSample
- for _, s := range mockAppender.samples {
- if s.ls.Get(labels.MetricName) == "target_info" {
+ var targetInfoSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "target_info" {
targetInfoSamples = append(targetInfoSamples, s)
}
}
@@ -575,19 +623,19 @@ func TestFromMetrics(t *testing.T) {
// Verify target_info has the promoted resource attribute.
for _, s := range targetInfoSamples {
- require.Equal(t, "promoted-value", s.ls.Get("custom_promoted_attr"), "target_info should have promoted resource attributes")
- require.Equal(t, "another-value", s.ls.Get("another_resource_attr"), "target_info should have non-promoted resource attributes")
+ require.Equal(t, "promoted-value", s.L.Get("custom_promoted_attr"), "target_info should have promoted resource attributes")
+ require.Equal(t, "another-value", s.L.Get("another_resource_attr"), "target_info should have non-promoted resource attributes")
}
// Verify the metric also has the promoted resource attribute.
- var metricSamples []combinedSample
- for _, s := range mockAppender.samples {
- if s.ls.Get(labels.MetricName) == "test_gauge" {
+ var metricSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "test_gauge" {
metricSamples = append(metricSamples, s)
}
}
require.NotEmpty(t, metricSamples, "expected metric samples")
- require.Equal(t, "promoted-value", metricSamples[0].ls.Get("custom_promoted_attr"), "metric should have promoted resource attribute")
+ require.Equal(t, "promoted-value", metricSamples[0].L.Get("custom_promoted_attr"), "metric should have promoted resource attribute")
})
t.Run("target_info should include promoted attributes when KeepIdentifyingResourceAttributes is enabled", func(t *testing.T) {
@@ -613,8 +661,9 @@ func TestFromMetrics(t *testing.T) {
point.SetTimestamp(ts)
point.SetDoubleValue(1.0)
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
annots, err := converter.FromMetrics(
context.Background(),
request.Metrics(),
@@ -628,11 +677,11 @@ func TestFromMetrics(t *testing.T) {
)
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
+ require.NoError(t, app.Commit())
- var targetInfoSamples []combinedSample
- for _, s := range mockAppender.samples {
- if s.ls.Get(labels.MetricName) == "target_info" {
+ var targetInfoSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "target_info" {
targetInfoSamples = append(targetInfoSamples, s)
}
}
@@ -640,24 +689,24 @@ func TestFromMetrics(t *testing.T) {
// Verify target_info has the promoted resource attribute.
for _, s := range targetInfoSamples {
- require.Equal(t, "promoted-value", s.ls.Get("custom_promoted_attr"), "target_info should have promoted resource attributes")
+ require.Equal(t, "promoted-value", s.L.Get("custom_promoted_attr"), "target_info should have promoted resource attributes")
// And it should have the identifying attributes (since KeepIdentifyingResourceAttributes is true).
- require.Equal(t, "test-service", s.ls.Get("service_name"), "target_info should have service.name when KeepIdentifyingResourceAttributes is true")
- require.Equal(t, "test-namespace", s.ls.Get("service_namespace"), "target_info should have service.namespace when KeepIdentifyingResourceAttributes is true")
- require.Equal(t, "instance-1", s.ls.Get("service_instance_id"), "target_info should have service.instance.id when KeepIdentifyingResourceAttributes is true")
+ require.Equal(t, "test-service", s.L.Get("service_name"), "target_info should have service.name when KeepIdentifyingResourceAttributes is true")
+ require.Equal(t, "test-namespace", s.L.Get("service_namespace"), "target_info should have service.namespace when KeepIdentifyingResourceAttributes is true")
+ require.Equal(t, "instance-1", s.L.Get("service_instance_id"), "target_info should have service.instance.id when KeepIdentifyingResourceAttributes is true")
// And the non-promoted resource attribute.
- require.Equal(t, "another-value", s.ls.Get("another_resource_attr"), "target_info should have non-promoted resource attributes")
+ require.Equal(t, "another-value", s.L.Get("another_resource_attr"), "target_info should have non-promoted resource attributes")
}
// Verify the metric also has the promoted resource attribute.
- var metricSamples []combinedSample
- for _, s := range mockAppender.samples {
- if s.ls.Get(labels.MetricName) == "test_gauge" {
+ var metricSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "test_gauge" {
metricSamples = append(metricSamples, s)
}
}
require.NotEmpty(t, metricSamples, "expected metric samples")
- require.Equal(t, "promoted-value", metricSamples[0].ls.Get("custom_promoted_attr"), "metric should have promoted resource attribute")
+ require.Equal(t, "promoted-value", metricSamples[0].L.Get("custom_promoted_attr"), "metric should have promoted resource attribute")
})
}
@@ -665,13 +714,12 @@ func TestTemporality(t *testing.T) {
ts := time.Unix(100, 0)
tests := []struct {
- name string
- allowDelta bool
- convertToNHCB bool
- inputSeries []pmetric.Metric
- expectedSamples []combinedSample
- expectedHistograms []combinedHistogram
- expectedError string
+ name string
+ allowDelta bool
+ convertToNHCB bool
+ inputSeries []pmetric.Metric
+ expectedSamples []sample
+ expectedError string
}{
{
name: "all cumulative when delta not allowed",
@@ -680,7 +728,7 @@ func TestTemporality(t *testing.T) {
createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedSamples: []combinedSample{
+ expectedSamples: []sample{
createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter),
createPromFloatSeries("test_metric_2", ts, model.MetricTypeCounter),
},
@@ -692,7 +740,7 @@ func TestTemporality(t *testing.T) {
createOtelSum("test_metric_1", pmetric.AggregationTemporalityDelta, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityDelta, ts),
},
- expectedSamples: []combinedSample{
+ expectedSamples: []sample{
createPromFloatSeries("test_metric_1", ts, model.MetricTypeUnknown),
createPromFloatSeries("test_metric_2", ts, model.MetricTypeUnknown),
},
@@ -704,7 +752,7 @@ func TestTemporality(t *testing.T) {
createOtelSum("test_metric_1", pmetric.AggregationTemporalityDelta, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedSamples: []combinedSample{
+ expectedSamples: []sample{
createPromFloatSeries("test_metric_1", ts, model.MetricTypeUnknown),
createPromFloatSeries("test_metric_2", ts, model.MetricTypeCounter),
},
@@ -716,7 +764,7 @@ func TestTemporality(t *testing.T) {
createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityDelta, ts),
},
- expectedSamples: []combinedSample{
+ expectedSamples: []sample{
createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter),
},
expectedError: `invalid temporality and type combination for metric "test_metric_2"`,
@@ -728,7 +776,7 @@ func TestTemporality(t *testing.T) {
createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityUnspecified, ts),
},
- expectedSamples: []combinedSample{
+ expectedSamples: []sample{
createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter),
},
expectedError: `invalid temporality and type combination for metric "test_metric_2"`,
@@ -739,7 +787,7 @@ func TestTemporality(t *testing.T) {
inputSeries: []pmetric.Metric{
createOtelExponentialHistogram("test_histogram", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedHistograms: []combinedHistogram{
+ expectedSamples: []sample{
createPromNativeHistogramSeries("test_histogram", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram),
},
},
@@ -750,7 +798,7 @@ func TestTemporality(t *testing.T) {
createOtelExponentialHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExponentialHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedHistograms: []combinedHistogram{
+ expectedSamples: []sample{
createPromNativeHistogramSeries("test_histogram_1", histogram.GaugeType, ts, model.MetricTypeUnknown),
createPromNativeHistogramSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram),
},
@@ -762,7 +810,7 @@ func TestTemporality(t *testing.T) {
createOtelExponentialHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExponentialHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedHistograms: []combinedHistogram{
+ expectedSamples: []sample{
createPromNativeHistogramSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram),
},
expectedError: `invalid temporality and type combination for metric "test_histogram_1"`,
@@ -774,7 +822,7 @@ func TestTemporality(t *testing.T) {
inputSeries: []pmetric.Metric{
createOtelExplicitHistogram("test_histogram", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedHistograms: []combinedHistogram{
+ expectedSamples: []sample{
createPromNHCBSeries("test_histogram", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram),
},
},
@@ -786,7 +834,7 @@ func TestTemporality(t *testing.T) {
createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedHistograms: []combinedHistogram{
+ expectedSamples: []sample{
createPromNHCBSeries("test_histogram_1", histogram.GaugeType, ts, model.MetricTypeUnknown),
createPromNHCBSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram),
},
@@ -799,7 +847,7 @@ func TestTemporality(t *testing.T) {
createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedHistograms: []combinedHistogram{
+ expectedSamples: []sample{
createPromNHCBSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram),
},
expectedError: `invalid temporality and type combination for metric "test_histogram_1"`,
@@ -840,7 +888,7 @@ func TestTemporality(t *testing.T) {
inputSeries: []pmetric.Metric{
createOtelGauge("test_gauge_1", ts),
},
- expectedSamples: []combinedSample{
+ expectedSamples: []sample{
createPromFloatSeries("test_gauge_1", ts, model.MetricTypeGauge),
},
},
@@ -863,25 +911,22 @@ func TestTemporality(t *testing.T) {
s.CopyTo(sm.Metrics().AppendEmpty())
}
- mockAppender := &mockCombinedAppender{}
- c := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ c := NewPrometheusConverter(app)
settings := Settings{
AllowDeltaTemporality: tc.allowDelta,
ConvertHistogramsToNHCB: tc.convertToNHCB,
}
_, err := c.FromMetrics(context.Background(), metrics, settings)
-
if tc.expectedError != "" {
require.EqualError(t, err, tc.expectedError)
} else {
require.NoError(t, err)
}
- require.NoError(t, mockAppender.Commit())
-
- // Sort series to make the test deterministic.
- requireEqual(t, tc.expectedSamples, mockAppender.samples)
- requireEqual(t, tc.expectedHistograms, mockAppender.histograms)
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tc.expectedSamples, appTest.ResultSamples())
})
}
}
@@ -900,13 +945,13 @@ func createOtelSum(name string, temporality pmetric.AggregationTemporality, ts t
return m
}
-func createPromFloatSeries(name string, ts time.Time, typ model.MetricType) combinedSample {
- return combinedSample{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name, "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 5,
- meta: metadata.Metadata{
+func createPromFloatSeries(name string, ts time.Time, typ model.MetricType) sample {
+ return sample{
+ MF: name,
+ L: labels.FromStrings("__name__", name, "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 5,
+ M: metadata.Metadata{
Type: typ,
},
}
@@ -938,15 +983,15 @@ func createOtelExponentialHistogram(name string, temporality pmetric.Aggregation
return m
}
-func createPromNativeHistogramSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) combinedHistogram {
- return combinedHistogram{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name, "test_label", "test_value"),
- t: ts.UnixMilli(),
- meta: metadata.Metadata{
+func createPromNativeHistogramSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) sample {
+ return sample{
+ MF: name,
+ L: labels.FromStrings("__name__", name, "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ M: metadata.Metadata{
Type: typ,
},
- h: &histogram.Histogram{
+ H: &histogram.Histogram{
Count: 1,
Sum: 5,
Schema: 0,
@@ -973,15 +1018,15 @@ func createOtelExplicitHistogram(name string, temporality pmetric.AggregationTem
return m
}
-func createPromNHCBSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) combinedHistogram {
- return combinedHistogram{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name, "test_label", "test_value"),
- meta: metadata.Metadata{
+func createPromNHCBSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) sample {
+ return sample{
+ MF: name,
+ L: labels.FromStrings("__name__", name, "test_label", "test_value"),
+ M: metadata.Metadata{
Type: typ,
},
- t: ts.UnixMilli(),
- h: &histogram.Histogram{
+ T: ts.UnixMilli(),
+ H: &histogram.Histogram{
Count: 20,
Sum: 30,
Schema: -53,
@@ -998,50 +1043,50 @@ func createPromNHCBSeries(name string, hint histogram.CounterResetHint, ts time.
}
}
-func createPromClassicHistogramSeries(name string, ts time.Time, typ model.MetricType) []combinedSample {
- return []combinedSample{
+func createPromClassicHistogramSeries(name string, ts time.Time, typ model.MetricType) []sample {
+ return []sample{
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 30,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 30,
+ M: metadata.Metadata{
Type: typ,
},
},
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 20,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 20,
+ M: metadata.Metadata{
Type: typ,
},
},
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_bucket", "le", "1", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 10,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_bucket", "le", "1", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 10,
+ M: metadata.Metadata{
Type: typ,
},
},
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_bucket", "le", "2", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 20,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_bucket", "le", "2", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 20,
+ M: metadata.Metadata{
Type: typ,
},
},
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_bucket", "le", "+Inf", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 20,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_bucket", "le", "+Inf", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 20,
+ M: metadata.Metadata{
Type: typ,
},
},
@@ -1064,32 +1109,32 @@ func createOtelSummary(name string, ts time.Time) pmetric.Metric {
return m
}
-func createPromSummarySeries(name string, ts time.Time) []combinedSample {
- return []combinedSample{
+func createPromSummarySeries(name string, ts time.Time) []sample {
+ return []sample{
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 18,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 18,
+ M: metadata.Metadata{
Type: model.MetricTypeSummary,
},
},
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 9,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 9,
+ M: metadata.Metadata{
Type: model.MetricTypeSummary,
},
},
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name, "quantile", "0.5", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 2,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name, "quantile", "0.5", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 2,
+ M: metadata.Metadata{
Type: model.MetricTypeSummary,
},
},
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
index 67961a2943..66e7e4c3bb 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
@@ -30,6 +30,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/teststorage"
)
func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
@@ -50,7 +51,7 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
metric func() pmetric.Metric
scope scope
promoteScope bool
- want func() []combinedSample
+ want func() []sample
}{
{
name: "gauge without scope promotion",
@@ -63,17 +64,17 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(pcommon.Timestamp(ts)),
- v: 1,
+ MF: "test",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(pcommon.Timestamp(ts)),
+ V: 1,
},
}
},
@@ -89,7 +90,7 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: true,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test",
"otel_scope_name", defaultScope.name,
@@ -98,13 +99,13 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
"otel_scope_attr1", "value1",
"otel_scope_attr2", "value2",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(pcommon.Timestamp(ts)),
- v: 1,
+ MF: "test",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(pcommon.Timestamp(ts)),
+ V: 1,
},
}
},
@@ -113,8 +114,9 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
metric := tt.metric()
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
settings := Settings{
PromoteScopeMetadata: tt.promoteScope,
}
@@ -132,9 +134,8 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
MetricFamilyName: metric.Name(),
},
)
- require.NoError(t, mockAppender.Commit())
-
- requireEqual(t, tt.want(), mockAppender.samples)
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tt.want(), appTest.ResultSamples())
})
}
}
@@ -157,7 +158,7 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
metric func() pmetric.Metric
scope scope
promoteScope bool
- want func() []combinedSample
+ want func() []sample
}{
{
name: "sum without scope promotion",
@@ -171,17 +172,17 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(ts),
- v: 1,
+ MF: "test",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(ts),
+ V: 1,
},
}
},
@@ -198,7 +199,7 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: true,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test",
"otel_scope_name", defaultScope.name,
@@ -207,13 +208,13 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
"otel_scope_attr1", "value1",
"otel_scope_attr2", "value2",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(ts),
- v: 1,
+ MF: "test",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(ts),
+ V: 1,
},
}
},
@@ -232,18 +233,18 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(ts),
- v: 1,
- es: []exemplar.Exemplar{
+ MF: "test",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(ts),
+ V: 1,
+ ES: []exemplar.Exemplar{
{Value: 2},
},
},
@@ -267,18 +268,18 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_sum",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test_sum",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 1,
+ MF: "test_sum",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 1,
},
}
},
@@ -298,17 +299,17 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_sum",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test_sum",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(ts),
- v: 0,
+ MF: "test_sum",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -328,17 +329,17 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_sum",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test_sum",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(ts),
- v: 0,
+ MF: "test_sum",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -347,8 +348,9 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
metric := tt.metric()
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
settings := Settings{
PromoteScopeMetadata: tt.promoteScope,
}
@@ -366,9 +368,8 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
MetricFamilyName: metric.Name(),
},
)
- require.NoError(t, mockAppender.Commit())
-
- requireEqual(t, tt.want(), mockAppender.samples)
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tt.want(), appTest.ResultSamples())
})
}
}
From 3155c95c1f58542ed98d265b0bae6675df433f8a Mon Sep 17 00:00:00 2001
From: Bartlomiej Plotka
Date: Fri, 6 Feb 2026 10:23:54 +0000
Subject: [PATCH 126/165] feat: add fgprof debug pprof (wall-time profiling
capability) (#18027)
Signed-off-by: bwplotka
---
go.mod | 1 +
go.sum | 18 ++++++++++++++++++
web/web.go | 5 +++++
3 files changed, 24 insertions(+)
diff --git a/go.mod b/go.mod
index 668029856e..dcad44eb9c 100644
--- a/go.mod
+++ b/go.mod
@@ -29,6 +29,7 @@ require (
github.com/envoyproxy/go-control-plane/envoy v1.36.0
github.com/envoyproxy/protoc-gen-validate v1.3.0
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
+ github.com/felixge/fgprof v0.9.5
github.com/fsnotify/fsnotify v1.9.0
github.com/go-openapi/strfmt v0.25.0
github.com/go-zookeeper/zk v1.0.4
diff --git a/go.sum b/go.sum
index 9d2022c63d..661c8af7c7 100644
--- a/go.sum
+++ b/go.sum
@@ -103,6 +103,12 @@ github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F9
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
+github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
+github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
+github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
+github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
+github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0=
@@ -149,6 +155,8 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
+github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
@@ -222,6 +230,9 @@ github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I
github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
+github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
+github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
@@ -248,6 +259,7 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/pprof v0.0.0-20260111202518-71be6bfdd440 h1:oKBqR+eQXiIM7X8K1JEg9aoTEePLq/c6Awe484abOuA=
github.com/google/pprof v0.0.0-20260111202518-71be6bfdd440/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
@@ -323,10 +335,12 @@ github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.33.0 h1:g9hwuo60IXbupXJCYMlO4xDXgxxMPuFk31iOpLXDCV4=
github.com/hetznercloud/hcloud-go/v2 v2.33.0/go.mod h1:GzYEl7slIGKc6Ttt08hjiJvGj8/PbWzcQf6IUi02dIs=
+github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/ionos-cloud/sdk-go/v6 v6.3.6 h1:l/TtKgdQ1wUH3DDe2SfFD78AW+TJWdEbDpQhHkWd6CM=
github.com/ionos-cloud/sdk-go/v6 v6.3.6/go.mod h1:nUGHP4kZHAZngCVr4v6C8nuargFrtvt7GrzH/hqn7c4=
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
@@ -361,8 +375,10 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
github.com/linode/linodego v1.63.0 h1:MdjizfXNJDVJU6ggoJmMO5O9h4KGPGivNX0fzrAnstk=
github.com/linode/linodego v1.63.0/go.mod h1:GoiwLVuLdBQcAebxAVKVL3mMYUgJZR/puOUSla04xBE=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -444,6 +460,7 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE=
github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
@@ -705,6 +722,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/web/web.go b/web/web.go
index 5d44cedd97..854ecaf765 100644
--- a/web/web.go
+++ b/web/web.go
@@ -36,6 +36,7 @@ import (
"time"
"github.com/alecthomas/units"
+ "github.com/felixge/fgprof"
"github.com/grafana/regexp"
"github.com/mwitkow/go-conntrack"
remoteapi "github.com/prometheus/client_golang/exp/api/remote"
@@ -112,6 +113,8 @@ const (
Stopping
)
+var fgprofHandler = fgprof.Handler()
+
// withStackTracer logs the stack trace in case the request panics. The function
// will re-raise the error which will then be handled by the net/http package.
// It is needed because the go-kit log package doesn't manage properly the
@@ -618,6 +621,8 @@ func serveDebug(w http.ResponseWriter, req *http.Request) {
pprof.Symbol(w, req)
case "trace":
pprof.Trace(w, req)
+ case "fgprof":
+ fgprofHandler.ServeHTTP(w, req)
default:
req.URL.Path = "/debug/pprof/" + subpath
pprof.Index(w, req)
From 258fcbda6b3a655b5f78db57c6c44f73402c7b49 Mon Sep 17 00:00:00 2001
From: Bryan Boreham
Date: Thu, 13 Nov 2025 18:31:54 +0000
Subject: [PATCH 127/165] [REFACTOR] Relabel: Remove unnecessary Process()
function
All uses can be replaced by ProcessBuilder, which is more efficient.
Signed-off-by: Bryan Boreham
---
model/relabel/relabel.go | 18 ++----------------
model/relabel/relabel_test.go | 9 ++++++---
2 files changed, 8 insertions(+), 19 deletions(-)
diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go
index 6087253d11..4045cc65db 100644
--- a/model/relabel/relabel.go
+++ b/model/relabel/relabel.go
@@ -269,22 +269,8 @@ func (re Regexp) String() string {
return str[5 : len(str)-2]
}
-// Process returns a relabeled version of the given label set. The relabel configurations
-// are applied in order of input.
-// There are circumstances where Process will modify the input label.
-// If you want to avoid issues with the input label set being modified, at the cost of
-// higher memory usage, you can use lbls.Copy().
-// If a label set is dropped, EmptyLabels and false is returned.
-func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) {
- lb := labels.NewBuilder(lbls)
- if !ProcessBuilder(lb, cfgs...) {
- return labels.EmptyLabels(), false
- }
- return lb.Labels(), true
-}
-
-// ProcessBuilder is like Process, but the caller passes a labels.Builder
-// containing the initial set of labels, which is mutated by the rules.
+// ProcessBuilder applies relabeling configurations (rules) to the labels in lb.
+// The rules are applied in order of input. Returns false if the rule says to drop.
func ProcessBuilder(lb *labels.Builder, cfgs ...*Config) (keep bool) {
for _, cfg := range cfgs {
keep = relabel(cfg, lb)
diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go
index a3eb925995..8c2ba55ea7 100644
--- a/model/relabel/relabel_test.go
+++ b/model/relabel/relabel_test.go
@@ -751,10 +751,11 @@ func TestRelabel(t *testing.T) {
require.NoError(t, cfg.Validate(model.UTF8Validation))
}
- res, keep := Process(test.input, test.relabel...)
+ lb := labels.NewBuilder(test.input)
+ keep := ProcessBuilder(lb, test.relabel...)
require.Equal(t, !test.drop, keep)
if keep {
- testutil.RequireEqual(t, test.output, res)
+ testutil.RequireEqual(t, test.output, lb.Labels())
}
}
}
@@ -1064,9 +1065,11 @@ func BenchmarkRelabel(b *testing.B) {
require.NoError(b, err)
}
for _, tt := range tests {
+ lb := labels.NewBuilder(labels.EmptyLabels())
b.Run(tt.name, func(b *testing.B) {
for b.Loop() {
- _, _ = Process(tt.lbls, tt.cfgs...)
+ lb.Reset(tt.lbls)
+ _ = ProcessBuilder(lb, tt.cfgs...)
}
})
}
From 1dcdb07d30fc37cd0ef11c7cf0552da71f9823be Mon Sep 17 00:00:00 2001
From: Sasha <103973965+crush-on-anechka@users.noreply.github.com>
Date: Sun, 8 Feb 2026 02:52:22 +0300
Subject: [PATCH 128/165] promql: use Kahan summation for Native Histograms
(#15687)
As for float samples, Kahan summation is used for the `sum` and `avg` aggregation and for the respective `_over_time` functions.
Kahan summation is not perfect. This commit also adds tests that even Kahan summation cannot reliably pass.
These tests are commented out.
Note that the behavior might be different on other hardware platforms. We have to keep an eye on test failing on other hardware platforms and adjust them accordingly.
Signed-off-by: Aleksandr Smirnov <5targazer@mail.ru>
---
model/histogram/float_histogram.go | 537 ++++++++++++++++--
model/histogram/float_histogram_test.go | 299 ++++++++++
model/histogram/generic.go | 81 ++-
model/histogram/histogram.go | 8 +-
promql/engine.go | 126 ++--
promql/functions.go | 125 ++--
promql/functions_internal_test.go | 3 +-
promql/promqltest/testdata/aggregators.test | 5 +
.../testdata/native_histograms.test | 134 ++++-
util/kahansum/kahansum.go | 39 ++
10 files changed, 1186 insertions(+), 171 deletions(-)
create mode 100644 util/kahansum/kahansum.go
diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go
index 75021d2c62..d457d8ab25 100644
--- a/model/histogram/float_histogram.go
+++ b/model/histogram/float_histogram.go
@@ -18,6 +18,8 @@ import (
"fmt"
"math"
"strings"
+
+ "github.com/prometheus/prometheus/util/kahansum"
)
// FloatHistogram is similar to Histogram but uses float64 for all
@@ -353,7 +355,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (res *FloatHistogram, counte
}
counterResetCollision = h.adjustCounterReset(other)
if !h.UsesCustomBuckets() {
- otherZeroCount := h.reconcileZeroBuckets(other)
+ otherZeroCount, _ := h.reconcileZeroBuckets(other, nil)
h.ZeroCount += otherZeroCount
}
h.Count += other.Count
@@ -374,11 +376,11 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (res *FloatHistogram, counte
intersectedBounds := intersectCustomBucketBounds(h.CustomValues, other.CustomValues)
// Add with mapping - maps both histograms to intersected layout.
- h.PositiveSpans, h.PositiveBuckets = addCustomBucketsWithMismatches(
+ h.PositiveSpans, h.PositiveBuckets, _ = addCustomBucketsWithMismatches(
false,
hPositiveSpans, hPositiveBuckets, h.CustomValues,
otherPositiveSpans, otherPositiveBuckets, other.CustomValues,
- intersectedBounds)
+ nil, intersectedBounds)
h.CustomValues = intersectedBounds
}
return h, counterResetCollision, nhcbBoundsReconciled, nil
@@ -408,6 +410,121 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (res *FloatHistogram, counte
return h, counterResetCollision, nhcbBoundsReconciled, nil
}
+// KahanAdd works like Add but using the Kahan summation algorithm to minimize numerical errors.
+// c is a histogram holding the Kahan compensation term. It is modified in-place if non-nil.
+// If c is nil, a new compensation histogram is created inside the function. In this case,
+// the caller must use the returned updatedC, because the original c variable is not modified.
+func (h *FloatHistogram) KahanAdd(other, c *FloatHistogram) (updatedC *FloatHistogram, counterResetCollision, nhcbBoundsReconciled bool, err error) {
+ if err := h.checkSchemaAndBounds(other); err != nil {
+ return nil, false, false, err
+ }
+
+ counterResetCollision = h.adjustCounterReset(other)
+
+ if c == nil {
+ c = h.newCompensationHistogram()
+ }
+ if !h.UsesCustomBuckets() {
+ otherZeroCount, otherCZeroCount := h.reconcileZeroBuckets(other, c)
+ h.ZeroCount, c.ZeroCount = kahansum.Inc(otherZeroCount, h.ZeroCount, c.ZeroCount)
+ h.ZeroCount, c.ZeroCount = kahansum.Inc(otherCZeroCount, h.ZeroCount, c.ZeroCount)
+ }
+ h.Count, c.Count = kahansum.Inc(other.Count, h.Count, c.Count)
+ h.Sum, c.Sum = kahansum.Inc(other.Sum, h.Sum, c.Sum)
+
+ var (
+ hPositiveSpans = h.PositiveSpans
+ hPositiveBuckets = h.PositiveBuckets
+ otherPositiveSpans = other.PositiveSpans
+ otherPositiveBuckets = other.PositiveBuckets
+ cPositiveBuckets = c.PositiveBuckets
+ )
+
+ if h.UsesCustomBuckets() {
+ if CustomBucketBoundsMatch(h.CustomValues, other.CustomValues) {
+ h.PositiveSpans, h.PositiveBuckets, c.PositiveBuckets = kahanAddBuckets(
+ h.Schema, h.ZeroThreshold, false,
+ hPositiveSpans, hPositiveBuckets,
+ otherPositiveSpans, otherPositiveBuckets,
+ cPositiveBuckets, nil,
+ )
+ } else {
+ nhcbBoundsReconciled = true
+ intersectedBounds := intersectCustomBucketBounds(h.CustomValues, other.CustomValues)
+
+ // Add with mapping - maps both histograms to intersected layout.
+ h.PositiveSpans, h.PositiveBuckets, c.PositiveBuckets = addCustomBucketsWithMismatches(
+ false,
+ hPositiveSpans, hPositiveBuckets, h.CustomValues,
+ otherPositiveSpans, otherPositiveBuckets, other.CustomValues,
+ cPositiveBuckets, intersectedBounds)
+ h.CustomValues = intersectedBounds
+ c.CustomValues = intersectedBounds
+ }
+ c.PositiveSpans = h.PositiveSpans
+ return c, counterResetCollision, nhcbBoundsReconciled, nil
+ }
+
+ otherC := other.newCompensationHistogram()
+
+ var (
+ hNegativeSpans = h.NegativeSpans
+ hNegativeBuckets = h.NegativeBuckets
+ otherNegativeSpans = other.NegativeSpans
+ otherNegativeBuckets = other.NegativeBuckets
+ cNegativeBuckets = c.NegativeBuckets
+ otherCPositiveBuckets = otherC.PositiveBuckets
+ otherCNegativeBuckets = otherC.NegativeBuckets
+ )
+
+ switch {
+ case other.Schema < h.Schema:
+ hPositiveSpans, hPositiveBuckets, cPositiveBuckets = kahanReduceResolution(
+ hPositiveSpans, hPositiveBuckets, cPositiveBuckets,
+ h.Schema, other.Schema,
+ true,
+ )
+ hNegativeSpans, hNegativeBuckets, cNegativeBuckets = kahanReduceResolution(
+ hNegativeSpans, hNegativeBuckets, cNegativeBuckets,
+ h.Schema, other.Schema,
+ true,
+ )
+ h.Schema = other.Schema
+
+ case other.Schema > h.Schema:
+ otherPositiveSpans, otherPositiveBuckets, otherCPositiveBuckets = kahanReduceResolution(
+ otherPositiveSpans, otherPositiveBuckets, otherCPositiveBuckets,
+ other.Schema, h.Schema,
+ false,
+ )
+ otherNegativeSpans, otherNegativeBuckets, otherCNegativeBuckets = kahanReduceResolution(
+ otherNegativeSpans, otherNegativeBuckets, otherCNegativeBuckets,
+ other.Schema, h.Schema,
+ false,
+ )
+ }
+
+ h.PositiveSpans, h.PositiveBuckets, c.PositiveBuckets = kahanAddBuckets(
+ h.Schema, h.ZeroThreshold, false,
+ hPositiveSpans, hPositiveBuckets,
+ otherPositiveSpans, otherPositiveBuckets,
+ cPositiveBuckets, otherCPositiveBuckets,
+ )
+ h.NegativeSpans, h.NegativeBuckets, c.NegativeBuckets = kahanAddBuckets(
+ h.Schema, h.ZeroThreshold, false,
+ hNegativeSpans, hNegativeBuckets,
+ otherNegativeSpans, otherNegativeBuckets,
+ cNegativeBuckets, otherCNegativeBuckets,
+ )
+
+ c.Schema = h.Schema
+ c.ZeroThreshold = h.ZeroThreshold
+ c.PositiveSpans = h.PositiveSpans
+ c.NegativeSpans = h.NegativeSpans
+
+ return c, counterResetCollision, nhcbBoundsReconciled, nil
+}
+
// Sub works like Add but subtracts the other histogram. It uses the same logic
// to adjust the counter reset hint. This is useful where this method is used
// for incremental mean calculation. However, if it is used for the actual "-"
@@ -419,7 +536,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) (res *FloatHistogram, counte
}
counterResetCollision = h.adjustCounterReset(other)
if !h.UsesCustomBuckets() {
- otherZeroCount := h.reconcileZeroBuckets(other)
+ otherZeroCount, _ := h.reconcileZeroBuckets(other, nil)
h.ZeroCount -= otherZeroCount
}
h.Count -= other.Count
@@ -440,11 +557,11 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) (res *FloatHistogram, counte
intersectedBounds := intersectCustomBucketBounds(h.CustomValues, other.CustomValues)
// Subtract with mapping - maps both histograms to intersected layout.
- h.PositiveSpans, h.PositiveBuckets = addCustomBucketsWithMismatches(
+ h.PositiveSpans, h.PositiveBuckets, _ = addCustomBucketsWithMismatches(
true,
hPositiveSpans, hPositiveBuckets, h.CustomValues,
otherPositiveSpans, otherPositiveBuckets, other.CustomValues,
- intersectedBounds)
+ nil, intersectedBounds)
h.CustomValues = intersectedBounds
}
return h, counterResetCollision, nhcbBoundsReconciled, nil
@@ -576,15 +693,28 @@ func (h *FloatHistogram) Size() int {
// easier to iterate through. Still, the safest bet is to use maxEmptyBuckets==0
// and only use a larger number if you know what you are doing.
func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram {
- h.PositiveBuckets, h.PositiveSpans = compactBuckets(
- h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, false,
+ h.PositiveBuckets, _, h.PositiveSpans = compactBuckets(
+ h.PositiveBuckets, nil, h.PositiveSpans, maxEmptyBuckets, false,
)
- h.NegativeBuckets, h.NegativeSpans = compactBuckets(
- h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, false,
+ h.NegativeBuckets, _, h.NegativeSpans = compactBuckets(
+ h.NegativeBuckets, nil, h.NegativeSpans, maxEmptyBuckets, false,
)
return h
}
+// kahanCompact works like Compact, but it is specialized for FloatHistogram's KahanAdd method.
+// c is a histogram holding the Kahan compensation term.
+func (h *FloatHistogram) kahanCompact(maxEmptyBuckets int, c *FloatHistogram,
+) (updatedH, updatedC *FloatHistogram) {
+ h.PositiveBuckets, c.PositiveBuckets, h.PositiveSpans = compactBuckets(
+ h.PositiveBuckets, c.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, false,
+ )
+ h.NegativeBuckets, c.NegativeBuckets, h.NegativeSpans = compactBuckets(
+ h.NegativeBuckets, c.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, false,
+ )
+ return h, c
+}
+
// DetectReset returns true if the receiving histogram is missing any buckets
// that have a non-zero population in the provided previous histogram. It also
// returns true if any count (in any bucket, in the zero count, or in the count
@@ -652,7 +782,7 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
// ZeroThreshold decreased.
return true
}
- previousZeroCount, newThreshold := previous.zeroCountForLargerThreshold(h.ZeroThreshold)
+ previousZeroCount, newThreshold, _ := previous.zeroCountForLargerThreshold(h.ZeroThreshold, nil)
if newThreshold != h.ZeroThreshold {
// ZeroThreshold is within a populated bucket in previous
// histogram.
@@ -847,30 +977,42 @@ func (h *FloatHistogram) Validate() error {
}
// zeroCountForLargerThreshold returns what the histogram's zero count would be
-// if the ZeroThreshold had the provided larger (or equal) value. If the
-// provided value is less than the histogram's ZeroThreshold, the method panics.
+// if the ZeroThreshold had the provided larger (or equal) value. It also returns the
+// zero count of the compensation histogram `c` if provided (used for Kahan summation).
+//
+// If the provided ZeroThreshold is less than the histogram's ZeroThreshold, the method panics.
// If the largerThreshold ends up within a populated bucket of the histogram, it
// is adjusted upwards to the lower limit of that bucket (all in terms of
// absolute values) and that bucket's count is included in the returned
// count. The adjusted threshold is returned, too.
-func (h *FloatHistogram) zeroCountForLargerThreshold(largerThreshold float64) (count, threshold float64) {
+func (h *FloatHistogram) zeroCountForLargerThreshold(
+ largerThreshold float64, c *FloatHistogram) (hZeroCount, threshold, cZeroCount float64,
+) {
+ if c != nil {
+ cZeroCount = c.ZeroCount
+ }
// Fast path.
if largerThreshold == h.ZeroThreshold {
- return h.ZeroCount, largerThreshold
+ return h.ZeroCount, largerThreshold, cZeroCount
}
if largerThreshold < h.ZeroThreshold {
panic(fmt.Errorf("new threshold %f is less than old threshold %f", largerThreshold, h.ZeroThreshold))
}
outer:
for {
- count = h.ZeroCount
+ hZeroCount = h.ZeroCount
i := h.PositiveBucketIterator()
+ bucketsIdx := 0
for i.Next() {
b := i.At()
if b.Lower >= largerThreshold {
break
}
- count += b.Count // Bucket to be merged into zero bucket.
+ // Bucket to be merged into zero bucket.
+ hZeroCount, cZeroCount = kahansum.Inc(b.Count, hZeroCount, cZeroCount)
+ if c != nil {
+ hZeroCount, cZeroCount = kahansum.Inc(c.PositiveBuckets[bucketsIdx], hZeroCount, cZeroCount)
+ }
if b.Upper > largerThreshold {
// New threshold ended up within a bucket. if it's
// populated, we need to adjust largerThreshold before
@@ -880,14 +1022,20 @@ outer:
}
break
}
+ bucketsIdx++
}
i = h.NegativeBucketIterator()
+ bucketsIdx = 0
for i.Next() {
b := i.At()
if b.Upper <= -largerThreshold {
break
}
- count += b.Count // Bucket to be merged into zero bucket.
+ // Bucket to be merged into zero bucket.
+ hZeroCount, cZeroCount = kahansum.Inc(b.Count, hZeroCount, cZeroCount)
+ if c != nil {
+ hZeroCount, cZeroCount = kahansum.Inc(c.NegativeBuckets[bucketsIdx], hZeroCount, cZeroCount)
+ }
if b.Lower < -largerThreshold {
// New threshold ended up within a bucket. If
// it's populated, we need to adjust
@@ -900,15 +1048,17 @@ outer:
}
break
}
+ bucketsIdx++
}
- return count, largerThreshold
+ return hZeroCount, largerThreshold, cZeroCount
}
}
// trimBucketsInZeroBucket removes all buckets that are within the zero
// bucket. It assumes that the zero threshold is at a bucket boundary and that
// the counts in the buckets to remove are already part of the zero count.
-func (h *FloatHistogram) trimBucketsInZeroBucket() {
+// c is a histogram holding the Kahan compensation term.
+func (h *FloatHistogram) trimBucketsInZeroBucket(c *FloatHistogram) {
i := h.PositiveBucketIterator()
bucketsIdx := 0
for i.Next() {
@@ -917,6 +1067,9 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() {
break
}
h.PositiveBuckets[bucketsIdx] = 0
+ if c != nil {
+ c.PositiveBuckets[bucketsIdx] = 0
+ }
bucketsIdx++
}
i = h.NegativeBucketIterator()
@@ -927,34 +1080,46 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() {
break
}
h.NegativeBuckets[bucketsIdx] = 0
+ if c != nil {
+ c.NegativeBuckets[bucketsIdx] = 0
+ }
bucketsIdx++
}
// We are abusing Compact to trim the buckets set to zero
// above. Premature compacting could cause additional cost, but this
// code path is probably rarely used anyway.
- h.Compact(0)
+ if c != nil {
+ h.kahanCompact(0, c)
+ } else {
+ h.Compact(0)
+ }
}
// reconcileZeroBuckets finds a zero bucket large enough to include the zero
// buckets of both histograms (the receiving histogram and the other histogram)
// with a zero threshold that is not within a populated bucket in either
-// histogram. This method modifies the receiving histogram accordingly, but
-// leaves the other histogram as is. Instead, it returns the zero count the
-// other histogram would have if it were modified.
-func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
- otherZeroCount := other.ZeroCount
+// histogram. This method modifies the receiving histogram accordingly, and
+// also modifies the compensation histogram `c` (used for Kahan summation) if provided,
+// but leaves the other histogram as is. Instead, it returns the zero count the
+// other histogram would have if it were modified, as well as its Kahan compensation term.
+func (h *FloatHistogram) reconcileZeroBuckets(other, c *FloatHistogram) (otherZeroCount, otherCZeroCount float64) {
+ otherZeroCount = other.ZeroCount
otherZeroThreshold := other.ZeroThreshold
for otherZeroThreshold != h.ZeroThreshold {
if h.ZeroThreshold > otherZeroThreshold {
- otherZeroCount, otherZeroThreshold = other.zeroCountForLargerThreshold(h.ZeroThreshold)
+ otherZeroCount, otherZeroThreshold, otherCZeroCount = other.zeroCountForLargerThreshold(h.ZeroThreshold, nil)
}
if otherZeroThreshold > h.ZeroThreshold {
- h.ZeroCount, h.ZeroThreshold = h.zeroCountForLargerThreshold(otherZeroThreshold)
- h.trimBucketsInZeroBucket()
+ var cZeroCount float64
+ h.ZeroCount, h.ZeroThreshold, cZeroCount = h.zeroCountForLargerThreshold(otherZeroThreshold, c)
+ if c != nil {
+ c.ZeroCount = cZeroCount
+ }
+ h.trimBucketsInZeroBucket(c)
}
}
- return otherZeroCount
+ return otherZeroCount, otherCZeroCount
}
// floatBucketIterator is a low-level constructor for bucket iterators.
@@ -1369,6 +1534,145 @@ func addBuckets(
return spansA, bucketsA
}
+// kahanAddBuckets works like addBuckets but it is used in FloatHistogram's KahanAdd method
+// and takes additional arguments, compensationBucketsA and compensationBucketsB,
+// which hold the Kahan compensation values associated with histograms A and B.
+// It returns the resulting spans/buckets and compensation buckets.
+func kahanAddBuckets(
+ schema int32, threshold float64, negative bool,
+ spansA []Span, bucketsA []float64,
+ spansB []Span, bucketsB []float64,
+ compensationBucketsA, compensationBucketsB []float64,
+) (newSpans []Span, newBucketsA, newBucketsC []float64) {
+ var (
+ iSpan = -1
+ iBucket = -1
+ iInSpan int32
+ indexA int32
+ indexB int32
+ bIdxB int
+ bucketB float64
+ compensationBucketB float64
+ deltaIndex int32
+ lowerThanThreshold = true
+ )
+
+ for _, spanB := range spansB {
+ indexB += spanB.Offset
+ for j := 0; j < int(spanB.Length); j++ {
+ if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold {
+ goto nextLoop
+ }
+ lowerThanThreshold = false
+
+ bucketB = bucketsB[bIdxB]
+ if compensationBucketsB != nil {
+ compensationBucketB = compensationBucketsB[bIdxB]
+ }
+ if negative {
+ bucketB *= -1
+ compensationBucketB *= -1
+ }
+
+ if iSpan == -1 {
+ if len(spansA) == 0 || spansA[0].Offset > indexB {
+ // Add bucket before all others.
+ bucketsA = append(bucketsA, 0)
+ copy(bucketsA[1:], bucketsA)
+ bucketsA[0] = bucketB
+ compensationBucketsA = append(compensationBucketsA, 0)
+ copy(compensationBucketsA[1:], compensationBucketsA)
+ compensationBucketsA[0] = compensationBucketB
+ if len(spansA) > 0 && spansA[0].Offset == indexB+1 {
+ spansA[0].Length++
+ spansA[0].Offset--
+ goto nextLoop
+ }
+ spansA = append(spansA, Span{})
+ copy(spansA[1:], spansA)
+ spansA[0] = Span{Offset: indexB, Length: 1}
+ if len(spansA) > 1 {
+ // Convert the absolute offset in the formerly
+ // first span to a relative offset.
+ spansA[1].Offset -= indexB + 1
+ }
+ goto nextLoop
+ } else if spansA[0].Offset == indexB {
+ // Just add to first bucket.
+ bucketsA[0], compensationBucketsA[0] = kahansum.Inc(bucketB, bucketsA[0], compensationBucketsA[0])
+ bucketsA[0], compensationBucketsA[0] = kahansum.Inc(compensationBucketB, bucketsA[0], compensationBucketsA[0])
+ goto nextLoop
+ }
+ iSpan, iBucket, iInSpan = 0, 0, 0
+ indexA = spansA[0].Offset
+ }
+ deltaIndex = indexB - indexA
+ for {
+ remainingInSpan := int32(spansA[iSpan].Length) - iInSpan
+ if deltaIndex < remainingInSpan {
+ // Bucket is in current span.
+ iBucket += int(deltaIndex)
+ iInSpan += deltaIndex
+ bucketsA[iBucket], compensationBucketsA[iBucket] = kahansum.Inc(bucketB, bucketsA[iBucket], compensationBucketsA[iBucket])
+ bucketsA[iBucket], compensationBucketsA[iBucket] = kahansum.Inc(compensationBucketB, bucketsA[iBucket], compensationBucketsA[iBucket])
+ break
+ }
+ deltaIndex -= remainingInSpan
+ iBucket += int(remainingInSpan)
+ iSpan++
+ if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset {
+ // Bucket is in gap behind previous span (or there are no further spans).
+ bucketsA = append(bucketsA, 0)
+ copy(bucketsA[iBucket+1:], bucketsA[iBucket:])
+ bucketsA[iBucket] = bucketB
+ compensationBucketsA = append(compensationBucketsA, 0)
+ copy(compensationBucketsA[iBucket+1:], compensationBucketsA[iBucket:])
+ compensationBucketsA[iBucket] = compensationBucketB
+ switch {
+ case deltaIndex == 0:
+ // Directly after previous span, extend previous span.
+ if iSpan < len(spansA) {
+ spansA[iSpan].Offset--
+ }
+ iSpan--
+ iInSpan = int32(spansA[iSpan].Length)
+ spansA[iSpan].Length++
+ goto nextLoop
+ case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1:
+ // Directly before next span, extend next span.
+ iInSpan = 0
+ spansA[iSpan].Offset--
+ spansA[iSpan].Length++
+ goto nextLoop
+ default:
+ // No next span, or next span is not directly adjacent to new bucket.
+ // Add new span.
+ iInSpan = 0
+ if iSpan < len(spansA) {
+ spansA[iSpan].Offset -= deltaIndex + 1
+ }
+ spansA = append(spansA, Span{})
+ copy(spansA[iSpan+1:], spansA[iSpan:])
+ spansA[iSpan] = Span{Length: 1, Offset: deltaIndex}
+ goto nextLoop
+ }
+ } else {
+ // Try start of next span.
+ deltaIndex -= spansA[iSpan].Offset
+ iInSpan = 0
+ }
+ }
+
+ nextLoop:
+ indexA = indexB
+ indexB++
+ bIdxB++
+ }
+ }
+
+ return spansA, bucketsA, compensationBucketsA
+}
+
// floatBucketsMatch compares bucket values of two float histograms using binary float comparison
// and returns true if all values match.
func floatBucketsMatch(b1, b2 []float64) bool {
@@ -1496,15 +1800,18 @@ func intersectCustomBucketBounds(boundsA, boundsB []float64) []float64 {
// addCustomBucketsWithMismatches handles adding/subtracting custom bucket histograms
// with mismatched bucket layouts by mapping both to an intersected layout.
+// It also processes the Kahan compensation term if provided.
func addCustomBucketsWithMismatches(
negative bool,
spansA []Span, bucketsA, boundsA []float64,
spansB []Span, bucketsB, boundsB []float64,
+ bucketsC []float64,
intersectedBounds []float64,
-) ([]Span, []float64) {
+) ([]Span, []float64, []float64) {
targetBuckets := make([]float64, len(intersectedBounds)+1)
+ cTargetBuckets := make([]float64, len(intersectedBounds)+1)
- mapBuckets := func(spans []Span, buckets, bounds []float64, negative bool) {
+ mapBuckets := func(spans []Span, buckets, bounds []float64, negative, withCompensation bool) {
srcIdx := 0
bucketIdx := 0
intersectIdx := 0
@@ -1530,9 +1837,12 @@ func addCustomBucketsWithMismatches(
}
if negative {
- targetBuckets[targetIdx] -= value
+ targetBuckets[targetIdx], cTargetBuckets[targetIdx] = kahansum.Dec(value, targetBuckets[targetIdx], cTargetBuckets[targetIdx])
} else {
- targetBuckets[targetIdx] += value
+ targetBuckets[targetIdx], cTargetBuckets[targetIdx] = kahansum.Inc(value, targetBuckets[targetIdx], cTargetBuckets[targetIdx])
+ if withCompensation && bucketsC != nil {
+ targetBuckets[targetIdx], cTargetBuckets[targetIdx] = kahansum.Inc(bucketsC[bucketIdx], targetBuckets[targetIdx], cTargetBuckets[targetIdx])
+ }
}
}
srcIdx++
@@ -1541,21 +1851,23 @@ func addCustomBucketsWithMismatches(
}
}
- // Map both histograms to the intersected layout.
- mapBuckets(spansA, bucketsA, boundsA, false)
- mapBuckets(spansB, bucketsB, boundsB, negative)
+ // Map histograms to the intersected layout.
+ mapBuckets(spansA, bucketsA, boundsA, false, true)
+ mapBuckets(spansB, bucketsB, boundsB, negative, false)
// Build spans and buckets, excluding zero-valued buckets from the final result.
- destSpans := spansA[:0] // Reuse spansA capacity for destSpans since we don't need it anymore.
- destBuckets := targetBuckets[:0] // Reuse targetBuckets capacity for destBuckets since it's guaranteed to be large enough.
+ destSpans := spansA[:0] // Reuse spansA capacity for destSpans since we don't need it anymore.
+ destBuckets := targetBuckets[:0] // Reuse targetBuckets capacity for destBuckets since it's guaranteed to be large enough.
+ cDestBuckets := cTargetBuckets[:0] // Reuse cTargetBuckets capacity for cDestBuckets since it's guaranteed to be large enough.
lastIdx := int32(-1)
- for i, count := range targetBuckets {
- if count == 0 {
+ for i := range targetBuckets {
+ if targetBuckets[i] == 0 && cTargetBuckets[i] == 0 {
continue
}
- destBuckets = append(destBuckets, count)
+ destBuckets = append(destBuckets, targetBuckets[i])
+ cDestBuckets = append(cDestBuckets, cTargetBuckets[i])
idx := int32(i)
if len(destSpans) > 0 && idx == lastIdx+1 {
@@ -1578,7 +1890,7 @@ func addCustomBucketsWithMismatches(
lastIdx = idx
}
- return destSpans, destBuckets
+ return destSpans, destBuckets, cDestBuckets
}
// ReduceResolution reduces the float histogram's spans, buckets into target schema.
@@ -1618,6 +1930,121 @@ func (h *FloatHistogram) ReduceResolution(targetSchema int32) error {
return nil
}
+// kahanReduceResolution works like reduceResolution, but it is specialized for FloatHistogram's KahanAdd method.
+// Unlike reduceResolution, which supports both float and integer buckets, this function only operates on float buckets.
+// It also takes an additional argument, originCompensationBuckets, representing the compensation buckets for the origin histogram.
+// Modifies both the origin histogram buckets and their associated compensation buckets.
+func kahanReduceResolution(
+ originSpans []Span,
+ originReceivingBuckets []float64,
+ originCompensationBuckets []float64,
+ originSchema,
+ targetSchema int32,
+ inplace bool,
+) (newSpans []Span, newReceivingBuckets, newCompensationBuckets []float64) {
+ var (
+ targetSpans []Span // The spans in the target schema.
+ targetReceivingBuckets []float64 // The receiving bucket counts in the target schema.
+ targetCompensationBuckets []float64 // The compensation bucket counts in the target schema.
+ bucketIdx int32 // The index of bucket in the origin schema.
+ bucketCountIdx int // The position of a bucket in origin bucket count slice `originBuckets`.
+ targetBucketIdx int32 // The index of bucket in the target schema.
+ lastTargetBucketIdx int32 // The index of the last added target bucket.
+ )
+
+ if inplace {
+ // Slice reuse is safe because when reducing the resolution,
+ // target slices don't grow faster than origin slices are being read.
+ targetSpans = originSpans[:0]
+ targetReceivingBuckets = originReceivingBuckets[:0]
+ targetCompensationBuckets = originCompensationBuckets[:0]
+ }
+
+ for _, span := range originSpans {
+ // Determine the index of the first bucket in this span.
+ bucketIdx += span.Offset
+ for j := 0; j < int(span.Length); j++ {
+ // Determine the index of the bucket in the target schema from the index in the original schema.
+ targetBucketIdx = targetIdx(bucketIdx, originSchema, targetSchema)
+
+ switch {
+ case len(targetSpans) == 0:
+ // This is the first span in the targetSpans.
+ span := Span{
+ Offset: targetBucketIdx,
+ Length: 1,
+ }
+ targetSpans = append(targetSpans, span)
+ targetReceivingBuckets = append(targetReceivingBuckets, originReceivingBuckets[bucketCountIdx])
+ lastTargetBucketIdx = targetBucketIdx
+ targetCompensationBuckets = append(targetCompensationBuckets, originCompensationBuckets[bucketCountIdx])
+
+ case lastTargetBucketIdx == targetBucketIdx:
+ // The current bucket has to be merged into the same target bucket as the previous bucket.
+ lastBucketIdx := len(targetReceivingBuckets) - 1
+ targetReceivingBuckets[lastBucketIdx], targetCompensationBuckets[lastBucketIdx] = kahansum.Inc(
+ originReceivingBuckets[bucketCountIdx],
+ targetReceivingBuckets[lastBucketIdx],
+ targetCompensationBuckets[lastBucketIdx],
+ )
+ targetReceivingBuckets[lastBucketIdx], targetCompensationBuckets[lastBucketIdx] = kahansum.Inc(
+ originCompensationBuckets[bucketCountIdx],
+ targetReceivingBuckets[lastBucketIdx],
+ targetCompensationBuckets[lastBucketIdx],
+ )
+
+ case (lastTargetBucketIdx + 1) == targetBucketIdx:
+ // The current bucket has to go into a new target bucket,
+ // and that bucket is next to the previous target bucket,
+ // so we add it to the current target span.
+ targetSpans[len(targetSpans)-1].Length++
+ lastTargetBucketIdx++
+ targetReceivingBuckets = append(targetReceivingBuckets, originReceivingBuckets[bucketCountIdx])
+ targetCompensationBuckets = append(targetCompensationBuckets, originCompensationBuckets[bucketCountIdx])
+
+ case (lastTargetBucketIdx + 1) < targetBucketIdx:
+ // The current bucket has to go into a new target bucket,
+ // and that bucket is separated by a gap from the previous target bucket,
+ // so we need to add a new target span.
+ span := Span{
+ Offset: targetBucketIdx - lastTargetBucketIdx - 1,
+ Length: 1,
+ }
+ targetSpans = append(targetSpans, span)
+ lastTargetBucketIdx = targetBucketIdx
+ targetReceivingBuckets = append(targetReceivingBuckets, originReceivingBuckets[bucketCountIdx])
+ targetCompensationBuckets = append(targetCompensationBuckets, originCompensationBuckets[bucketCountIdx])
+ }
+
+ bucketIdx++
+ bucketCountIdx++
+ }
+ }
+
+ return targetSpans, targetReceivingBuckets, targetCompensationBuckets
+}
+
+// newCompensationHistogram initializes a new compensation histogram that can be used
+// alongside the current FloatHistogram in Kahan summation.
+// The compensation histogram is structured to match the receiving histogram's bucket layout
+// including its schema, zero threshold and custom values, and it shares spans with the receiving
+// histogram. However, the bucket values in the compensation histogram are initialized to zero.
+func (h *FloatHistogram) newCompensationHistogram() *FloatHistogram {
+ c := &FloatHistogram{
+ CounterResetHint: h.CounterResetHint,
+ Schema: h.Schema,
+ ZeroThreshold: h.ZeroThreshold,
+ CustomValues: h.CustomValues,
+ PositiveBuckets: make([]float64, len(h.PositiveBuckets)),
+ PositiveSpans: h.PositiveSpans,
+ NegativeSpans: h.NegativeSpans,
+ }
+ if !h.UsesCustomBuckets() {
+ c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
+ }
+ return c
+}
+
// checkSchemaAndBounds checks if two histograms are compatible because they
// both use a standard exponential schema or because they both are NHCBs.
func (h *FloatHistogram) checkSchemaAndBounds(other *FloatHistogram) error {
@@ -1659,3 +2086,27 @@ func (h *FloatHistogram) adjustCounterReset(other *FloatHistogram) (counterReset
}
return false
}
+
+// HasOverflow reports whether any of the FloatHistogram's fields contain an infinite value.
+// This can happen when aggregating multiple histograms and exceeding float64 capacity.
+func (h *FloatHistogram) HasOverflow() bool {
+ if math.IsInf(h.ZeroCount, 0) || math.IsInf(h.Count, 0) || math.IsInf(h.Sum, 0) {
+ return true
+ }
+ for _, v := range h.PositiveBuckets {
+ if math.IsInf(v, 0) {
+ return true
+ }
+ }
+ for _, v := range h.NegativeBuckets {
+ if math.IsInf(v, 0) {
+ return true
+ }
+ }
+ for _, v := range h.CustomValues {
+ if math.IsInf(v, 0) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go
index 5c29544c8f..caf77b6256 100644
--- a/model/histogram/float_histogram_test.go
+++ b/model/histogram/float_histogram_test.go
@@ -2514,6 +2514,243 @@ func TestFloatHistogramAdd(t *testing.T) {
t.Run(c.name, func(t *testing.T) {
testHistogramAdd(t, c.in1, c.in2, c.expected, c.expErrMsg, c.expCounterResetCollision, c.expNHCBBoundsReconciled)
testHistogramAdd(t, c.in2, c.in1, c.expected, c.expErrMsg, c.expCounterResetCollision, c.expNHCBBoundsReconciled)
+ testHistogramKahanAdd(t, c.in1, nil, c.in2, c.expected, c.expErrMsg, c.expCounterResetCollision, c.expNHCBBoundsReconciled)
+ testHistogramKahanAdd(t, c.in2, nil, c.in1, c.expected, c.expErrMsg, c.expCounterResetCollision, c.expNHCBBoundsReconciled)
+ })
+ }
+}
+
+// TestKahanAddWithCompHistogram tests KahanAdd.
+// Test cases provide two float histograms and a compensation histogram with predefined values.
+func TestKahanAddWithCompHistogram(t *testing.T) {
+ cases := []struct {
+ name string
+ in1, comp, in2, expectedSum *FloatHistogram
+ expErrMsg string
+ expCounterResetCollision bool
+ expNHCBBoundsReconciled bool
+ }{
+ {
+ name: "larger zero bucket in first histogram",
+ in1: &FloatHistogram{
+ ZeroThreshold: 1,
+ ZeroCount: 17,
+ Count: 21,
+ Sum: 1.234,
+ PositiveSpans: []Span{{1, 2}, {0, 3}},
+ PositiveBuckets: []float64{2, 3, 6, 2, 5},
+ NegativeSpans: []Span{{4, 2}, {1, 2}},
+ NegativeBuckets: []float64{1, 1, 4, 4},
+ },
+ comp: &FloatHistogram{
+ ZeroThreshold: 1,
+ PositiveSpans: []Span{{1, 2}, {0, 3}},
+ PositiveBuckets: []float64{0.02, 0.03, 0.06, 0.02, 0.05},
+ NegativeSpans: []Span{{4, 2}, {1, 2}},
+ NegativeBuckets: []float64{0.01, 0.01, 0.04, 0.04},
+ },
+ in2: &FloatHistogram{
+ ZeroThreshold: 0.01,
+ ZeroCount: 11,
+ Count: 30,
+ Sum: 2.345,
+ PositiveSpans: []Span{{-2, 2}, {2, 3}},
+ PositiveBuckets: []float64{1, 0, 3, 4, 7},
+ NegativeSpans: []Span{{3, 2}, {3, 2}},
+ NegativeBuckets: []float64{3, 1, 5, 6},
+ },
+ expectedSum: &FloatHistogram{
+ ZeroThreshold: 1,
+ ZeroCount: 29,
+ Count: 51,
+ Sum: 3.579,
+ PositiveSpans: []Span{{1, 2}, {0, 3}},
+ PositiveBuckets: []float64{2.02, 6.03, 10.06, 9.02, 5.05},
+ NegativeSpans: []Span{{3, 3}, {1, 3}},
+ NegativeBuckets: []float64{3, 2.01, 1.01, 4.04, 9.04, 6},
+ },
+ expErrMsg: "",
+ expCounterResetCollision: false,
+ expNHCBBoundsReconciled: false,
+ },
+ {
+ name: "smaller zero bucket in first histogram",
+ in1: &FloatHistogram{
+ ZeroThreshold: 0.01,
+ ZeroCount: 11,
+ Count: 40,
+ Sum: 2.345,
+ PositiveSpans: []Span{{-2, 2}, {2, 3}},
+ PositiveBuckets: []float64{1, 2, 3, 4, 7},
+ NegativeSpans: []Span{{3, 2}, {3, 2}},
+ NegativeBuckets: []float64{3, 1, 5, 6},
+ },
+ comp: &FloatHistogram{
+ ZeroThreshold: 0.01,
+ ZeroCount: 0,
+ PositiveSpans: []Span{{-2, 2}, {2, 3}},
+ PositiveBuckets: []float64{0.02, 0.03, 0.06, 0.07, 0.05},
+ NegativeSpans: []Span{{3, 2}, {3, 2}},
+ NegativeBuckets: []float64{0.01, 0.01, 0.04, 0.04},
+ },
+ in2: &FloatHistogram{
+ ZeroThreshold: 1,
+ ZeroCount: 17,
+ Count: 11,
+ Sum: 1.234,
+ PositiveSpans: []Span{{1, 2}, {0, 3}},
+ PositiveBuckets: []float64{2, 3, 6, 2, 5},
+ NegativeSpans: []Span{{4, 2}, {1, 2}},
+ NegativeBuckets: []float64{1, 1, 4, 4},
+ },
+ expectedSum: &FloatHistogram{
+ ZeroThreshold: 1,
+ ZeroCount: 31.05,
+ Count: 51,
+ Sum: 3.579,
+ PositiveSpans: []Span{{1, 5}},
+ PositiveBuckets: []float64{2, 6.06, 10.07, 9.05, 5},
+ NegativeSpans: []Span{{3, 3}, {1, 3}},
+ NegativeBuckets: []float64{3.01, 2.01, 1, 4, 9.04, 6.04},
+ },
+ expErrMsg: "",
+ expCounterResetCollision: false,
+ expNHCBBoundsReconciled: false,
+ },
+ {
+ name: "first histogram contains zero buckets and Compact is called",
+ in1: &FloatHistogram{
+ ZeroThreshold: 0.01,
+ ZeroCount: 11,
+ Count: 30,
+ Sum: 2.345,
+ PositiveSpans: []Span{{-2, 2}, {1, 1}, {1, 3}},
+ PositiveBuckets: []float64{1, 3, 3, 0, 7, -6},
+ },
+ comp: &FloatHistogram{
+ ZeroThreshold: 0.01,
+ PositiveSpans: []Span{{-2, 2}, {1, 1}, {1, 3}},
+ PositiveBuckets: []float64{7, 2, 0.03, 0, 0.05, 0.06},
+ },
+ in2: &FloatHistogram{
+ ZeroThreshold: 1,
+ ZeroCount: 17,
+ Count: 21,
+ Sum: 1.234,
+ PositiveSpans: []Span{{1, 2}, {1, 2}},
+ PositiveBuckets: []float64{2, 3, 2, 5},
+ },
+ expectedSum: &FloatHistogram{
+ ZeroThreshold: 1,
+ ZeroCount: 41,
+ Count: 51,
+ Sum: 3.579,
+ PositiveSpans: []Span{{1, 2}, {1, 2}},
+ PositiveBuckets: []float64{5.03, 3, 9.05, -0.94},
+ },
+ expErrMsg: "",
+ expCounterResetCollision: false,
+ expNHCBBoundsReconciled: false,
+ },
+ {
+ name: "reduce resolution",
+ in1: &FloatHistogram{
+ Schema: 2,
+ ZeroThreshold: 0.01,
+ ZeroCount: 11,
+ Count: 30,
+ Sum: 2.345,
+ PositiveSpans: []Span{{-2, 2}, {1, 1}, {1, 3}},
+ PositiveBuckets: []float64{1, 3, 1e100, 0, 7, -6},
+ },
+ comp: &FloatHistogram{
+ Schema: 2,
+ ZeroThreshold: 0.01,
+ ZeroCount: 1,
+ PositiveSpans: []Span{{-2, 2}, {1, 1}, {1, 3}},
+ PositiveBuckets: []float64{7, 2, 0.03, 0, 0.05, 0.06},
+ },
+ in2: &FloatHistogram{
+ Schema: 1,
+ ZeroThreshold: 1,
+ ZeroCount: 17,
+ Count: 21,
+ Sum: 1.234,
+ PositiveSpans: []Span{{1, 2}, {1, 2}},
+ PositiveBuckets: []float64{-1e100, 3, 2, 5},
+ },
+ expectedSum: &FloatHistogram{
+ Schema: 1,
+ ZeroThreshold: 1,
+ ZeroCount: 42,
+ Count: 51,
+ Sum: 3.579,
+ PositiveSpans: []Span{{1, 5}},
+ PositiveBuckets: []float64{0.03, 10.05, -5.94, 2, 5},
+ },
+ expErrMsg: "",
+ expCounterResetCollision: false,
+ expNHCBBoundsReconciled: false,
+ },
+ {
+ name: "reduce resolution of 'other' histogram",
+ in1: &FloatHistogram{
+ Schema: 0,
+ ZeroThreshold: 1,
+ ZeroCount: 17,
+ Count: 21,
+ Sum: 1.234,
+ PositiveSpans: []Span{{1, 2}, {1, 2}},
+ PositiveBuckets: []float64{2, 3, 2, 5},
+ },
+ comp: &FloatHistogram{
+ Schema: 0,
+ ZeroThreshold: 1,
+ ZeroCount: 1,
+ PositiveSpans: []Span{{1, 2}, {1, 2}},
+ PositiveBuckets: []float64{17, 2, 0.03, 0},
+ },
+ in2: &FloatHistogram{
+ Schema: 2,
+ ZeroThreshold: 0.01,
+ ZeroCount: 11,
+ Count: 30,
+ Sum: 2.345,
+ PositiveSpans: []Span{{-2, 3}, {1, 1}, {1, 3}},
+ PositiveBuckets: []float64{1e100, 4.1, -1e100, 2.1, 0, 7, -6},
+ },
+ expectedSum: &FloatHistogram{
+ Schema: 0,
+ ZeroThreshold: 1,
+ ZeroCount: 33.1,
+ Count: 51,
+ Sum: 3.579,
+ PositiveSpans: []Span{{1, 2}, {1, 2}},
+ PositiveBuckets: []float64{21.1, 6, 2.03, 5},
+ },
+ expErrMsg: "",
+ expCounterResetCollision: false,
+ expNHCBBoundsReconciled: false,
+ },
+ {
+ name: "warn on counter reset hint collision",
+ in1: &FloatHistogram{
+ Schema: CustomBucketsSchema,
+ CounterResetHint: CounterReset,
+ },
+ in2: &FloatHistogram{
+ Schema: CustomBucketsSchema,
+ CounterResetHint: NotCounterReset,
+ },
+ expErrMsg: "",
+ expCounterResetCollision: true,
+ expNHCBBoundsReconciled: false,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ testHistogramKahanAdd(t, c.in1, c.comp, c.in2, c.expectedSum, c.expErrMsg, c.expCounterResetCollision, c.expNHCBBoundsReconciled)
})
}
}
@@ -2557,6 +2794,68 @@ func testHistogramAdd(t *testing.T, a, b, expected *FloatHistogram, expErrMsg st
}
}
+func testHistogramKahanAdd(
+ t *testing.T, a, c, b, expectedSum *FloatHistogram, expErrMsg string, expCounterResetCollision, expNHCBBoundsReconciled bool,
+) {
+ var (
+ aCopy = a.Copy()
+ bCopy = b.Copy()
+ cCopy *FloatHistogram
+ expectedSumCopy *FloatHistogram
+ )
+
+ if c != nil {
+ cCopy = c.Copy()
+ }
+
+ if expectedSum != nil {
+ expectedSumCopy = expectedSum.Copy()
+ }
+
+ comp, counterResetCollision, nhcbBoundsReconciled, err := aCopy.KahanAdd(bCopy, cCopy)
+ if expErrMsg != "" {
+ require.EqualError(t, err, expErrMsg)
+ } else {
+ require.NoError(t, err)
+ }
+
+ var res *FloatHistogram
+ if comp != nil {
+ // Check that aCopy and its compensation histogram layouts match after addition.
+ require.Equal(t, aCopy.Schema, comp.Schema)
+ require.Equal(t, aCopy.ZeroThreshold, comp.ZeroThreshold)
+ require.Equal(t, aCopy.PositiveSpans, comp.PositiveSpans)
+ require.Equal(t, aCopy.NegativeSpans, comp.NegativeSpans)
+ require.Len(t, aCopy.CustomValues, len(comp.CustomValues))
+ require.Len(t, aCopy.PositiveBuckets, len(comp.PositiveBuckets))
+ require.Len(t, aCopy.NegativeBuckets, len(comp.NegativeBuckets))
+
+ res, _, _, err = aCopy.Add(comp)
+ if expErrMsg != "" {
+ require.EqualError(t, err, expErrMsg)
+ } else {
+ require.NoError(t, err)
+ }
+ }
+
+ // Check that the warnings are correct.
+ require.Equal(t, expCounterResetCollision, counterResetCollision)
+ require.Equal(t, expNHCBBoundsReconciled, nhcbBoundsReconciled)
+
+ if expectedSum != nil {
+ res.Compact(0)
+ expectedSumCopy.Compact(0)
+
+ require.Equal(t, expectedSumCopy, res)
+
+ // Has it also happened in-place?
+ require.Equal(t, expectedSumCopy, aCopy)
+
+ // Check that the argument was not mutated.
+ require.Equal(t, b, bCopy)
+ }
+}
+
func TestFloatHistogramSub(t *testing.T) {
// This has fewer test cases than TestFloatHistogramAdd because Add and
// Sub share most of the trickier code.
diff --git a/model/histogram/generic.go b/model/histogram/generic.go
index 61fc5067f2..9ec9e9cd4b 100644
--- a/model/histogram/generic.go
+++ b/model/histogram/generic.go
@@ -230,14 +230,29 @@ func (b *baseBucketIterator[BC, IBC]) strippedAt() strippedBucket[BC] {
// compactBuckets is a generic function used by both Histogram.Compact and
// FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are
// deltas. Set it to false if the buckets contain absolute counts.
-func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) {
+// For float histograms, deltaBuckets is always false.
+// primaryBuckets hold the main histogram values, while compensationBuckets (if provided) store
+// Kahan compensation values. compensationBuckets can only be provided for float histograms
+// and are processed in parallel with primaryBuckets to maintain synchronization.
+func compactBuckets[IBC InternalBucketCount](
+ primaryBuckets []IBC, compensationBuckets []float64,
+ spans []Span, maxEmptyBuckets int, deltaBuckets bool,
+) (updatedPrimaryBuckets []IBC, updatedCompensationBuckets []float64, updatedSpans []Span) {
+ if deltaBuckets && compensationBuckets != nil {
+ panic("histogram type mismatch: deltaBuckets cannot be true when compensationBuckets is provided")
+ } else if compensationBuckets != nil && len(primaryBuckets) != len(compensationBuckets) {
+ panic(fmt.Errorf(
+ "primary buckets layout (%v) mismatch against associated compensation buckets layout (%v)",
+ primaryBuckets, compensationBuckets),
+ )
+ }
// Fast path: If there are no empty buckets AND no offset in any span is
// <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return
// immediately. We check that first because it's cheap and presumably
// common.
nothingToDo := true
var currentBucketAbsolute IBC
- for _, bucket := range buckets {
+ for _, bucket := range primaryBuckets {
if deltaBuckets {
currentBucketAbsolute += bucket
} else {
@@ -256,7 +271,7 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
}
}
if nothingToDo {
- return buckets, spans
+ return primaryBuckets, compensationBuckets, spans
}
}
@@ -268,12 +283,19 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
emptyBucketsHere := func() int {
i := 0
abs := currentBucketAbsolute
- for uint32(i)+posInSpan < spans[iSpan].Length && abs == 0 {
+ comp := float64(0)
+ if compensationBuckets != nil {
+ comp = compensationBuckets[iBucket]
+ }
+ for uint32(i)+posInSpan < spans[iSpan].Length && abs == 0 && comp == 0 {
i++
- if i+iBucket >= len(buckets) {
+ if i+iBucket >= len(primaryBuckets) {
break
}
- abs = buckets[i+iBucket]
+ abs = primaryBuckets[i+iBucket]
+ if compensationBuckets != nil {
+ comp = compensationBuckets[i+iBucket]
+ }
}
return i
}
@@ -313,11 +335,11 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
// Cut out empty buckets from start and end of spans, no matter
// what. Also cut out empty buckets from the middle of a span but only
// if there are more than maxEmptyBuckets consecutive empty buckets.
- for iBucket < len(buckets) {
+ for iBucket < len(primaryBuckets) {
if deltaBuckets {
- currentBucketAbsolute += buckets[iBucket]
+ currentBucketAbsolute += primaryBuckets[iBucket]
} else {
- currentBucketAbsolute = buckets[iBucket]
+ currentBucketAbsolute = primaryBuckets[iBucket]
}
if nEmpty := emptyBucketsHere(); nEmpty > 0 {
if posInSpan > 0 &&
@@ -334,11 +356,14 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
continue
}
// In all other cases, we cut out the empty buckets.
- if deltaBuckets && iBucket+nEmpty < len(buckets) {
- currentBucketAbsolute = -buckets[iBucket]
- buckets[iBucket+nEmpty] += buckets[iBucket]
+ if deltaBuckets && iBucket+nEmpty < len(primaryBuckets) {
+ currentBucketAbsolute = -primaryBuckets[iBucket]
+ primaryBuckets[iBucket+nEmpty] += primaryBuckets[iBucket]
+ }
+ primaryBuckets = append(primaryBuckets[:iBucket], primaryBuckets[iBucket+nEmpty:]...)
+ if compensationBuckets != nil {
+ compensationBuckets = append(compensationBuckets[:iBucket], compensationBuckets[iBucket+nEmpty:]...)
}
- buckets = append(buckets[:iBucket], buckets[iBucket+nEmpty:]...)
if posInSpan == 0 {
// Start of span.
if nEmpty == int(spans[iSpan].Length) {
@@ -388,8 +413,8 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
iSpan++
}
}
- if maxEmptyBuckets == 0 || len(buckets) == 0 {
- return buckets, spans
+ if maxEmptyBuckets == 0 || len(primaryBuckets) == 0 {
+ return primaryBuckets, compensationBuckets, spans
}
// Finally, check if any offsets between spans are small enough to merge
@@ -397,7 +422,7 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
iBucket = int(spans[0].Length)
if deltaBuckets {
currentBucketAbsolute = 0
- for _, bucket := range buckets[:iBucket] {
+ for _, bucket := range primaryBuckets[:iBucket] {
currentBucketAbsolute += bucket
}
}
@@ -406,7 +431,7 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
if int(spans[iSpan].Offset) > maxEmptyBuckets {
l := int(spans[iSpan].Length)
if deltaBuckets {
- for _, bucket := range buckets[iBucket : iBucket+l] {
+ for _, bucket := range primaryBuckets[iBucket : iBucket+l] {
currentBucketAbsolute += bucket
}
}
@@ -418,22 +443,28 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
offset := int(spans[iSpan].Offset)
spans[iSpan-1].Length += uint32(offset) + spans[iSpan].Length
spans = append(spans[:iSpan], spans[iSpan+1:]...)
- newBuckets := make([]IBC, len(buckets)+offset)
- copy(newBuckets, buckets[:iBucket])
- copy(newBuckets[iBucket+offset:], buckets[iBucket:])
+ newPrimaryBuckets := make([]IBC, len(primaryBuckets)+offset)
+ copy(newPrimaryBuckets, primaryBuckets[:iBucket])
+ copy(newPrimaryBuckets[iBucket+offset:], primaryBuckets[iBucket:])
if deltaBuckets {
- newBuckets[iBucket] = -currentBucketAbsolute
- newBuckets[iBucket+offset] += currentBucketAbsolute
+ newPrimaryBuckets[iBucket] = -currentBucketAbsolute
+ newPrimaryBuckets[iBucket+offset] += currentBucketAbsolute
+ }
+ primaryBuckets = newPrimaryBuckets
+ if compensationBuckets != nil {
+ newCompensationBuckets := make([]float64, len(compensationBuckets)+offset)
+ copy(newCompensationBuckets, compensationBuckets[:iBucket])
+ copy(newCompensationBuckets[iBucket+offset:], compensationBuckets[iBucket:])
+ compensationBuckets = newCompensationBuckets
}
iBucket += offset
- buckets = newBuckets
- currentBucketAbsolute = buckets[iBucket]
+ currentBucketAbsolute = primaryBuckets[iBucket]
// Note that with many merges, it would be more efficient to
// first record all the chunks of empty buckets to insert and
// then do it in one go through all the buckets.
}
- return buckets, spans
+ return primaryBuckets, compensationBuckets, spans
}
func checkHistogramSpans(spans []Span, numBuckets int) error {
diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go
index 5be60174fc..6ed02aed57 100644
--- a/model/histogram/histogram.go
+++ b/model/histogram/histogram.go
@@ -349,11 +349,11 @@ func allEmptySpans(s []Span) bool {
// Compact works like FloatHistogram.Compact. See there for detailed
// explanations.
func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram {
- h.PositiveBuckets, h.PositiveSpans = compactBuckets(
- h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, true,
+ h.PositiveBuckets, _, h.PositiveSpans = compactBuckets(
+ h.PositiveBuckets, nil, h.PositiveSpans, maxEmptyBuckets, true,
)
- h.NegativeBuckets, h.NegativeSpans = compactBuckets(
- h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, true,
+ h.NegativeBuckets, _, h.NegativeSpans = compactBuckets(
+ h.NegativeBuckets, nil, h.NegativeSpans, maxEmptyBuckets, true,
)
return h
}
diff --git a/promql/engine.go b/promql/engine.go
index afe82bc38f..cb27af3f46 100644
--- a/promql/engine.go
+++ b/promql/engine.go
@@ -50,6 +50,7 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/features"
+ "github.com/prometheus/prometheus/util/kahansum"
"github.com/prometheus/prometheus/util/logging"
"github.com/prometheus/prometheus/util/stats"
"github.com/prometheus/prometheus/util/zeropool"
@@ -3239,23 +3240,26 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
}
type groupedAggregation struct {
- floatValue float64
- histogramValue *histogram.FloatHistogram
- floatMean float64
- floatKahanC float64 // "Compensating value" for Kahan summation.
- groupCount float64
- heap vectorByValueHeap
+ floatValue float64
+ floatMean float64
+ floatKahanC float64 // Compensation float for Kahan summation.
+ histogramValue *histogram.FloatHistogram
+ histogramMean *histogram.FloatHistogram
+ histogramKahanC *histogram.FloatHistogram // Compensation histogram for Kahan summation.
+ groupCount float64
+ heap vectorByValueHeap
// All bools together for better packing within the struct.
- seen bool // Was this output groups seen in the input at this timestamp.
- hasFloat bool // Has at least 1 float64 sample aggregated.
- hasHistogram bool // Has at least 1 histogram sample aggregated.
- incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets.
- groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group.
- incrementalMean bool // True after reverting to incremental calculation of the mean value.
- counterResetSeen bool // Counter reset hint CounterReset seen. Currently only used for histogram samples.
- notCounterResetSeen bool // Counter reset hint NotCounterReset seen. Currently only used for histogram samples.
- dropName bool // True if any sample in this group has DropName set.
+ seen bool // Was this output groups seen in the input at this timestamp.
+ hasFloat bool // Has at least 1 float64 sample aggregated.
+ hasHistogram bool // Has at least 1 histogram sample aggregated.
+ incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets.
+ groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group.
+ floatIncrementalMean bool // True after reverting to incremental calculation for float-based mean value.
+ histogramIncrementalMean bool // True after reverting to incremental calculation for histogram-based mean value.
+ counterResetSeen bool // Counter reset hint CounterReset seen. Currently only used for histogram samples.
+ notCounterResetSeen bool // Counter reset hint NotCounterReset seen. Currently only used for histogram samples.
+ dropName bool // True if any sample in this group has DropName set.
}
// aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix.
@@ -3345,6 +3349,11 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
group.dropName = true
}
+ var (
+ nhcbBoundsReconciled bool
+ err error
+ )
+
switch op {
case parser.SUM:
if h != nil {
@@ -3356,7 +3365,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
case histogram.NotCounterReset:
group.notCounterResetSeen = true
}
- _, _, nhcbBoundsReconciled, err := group.histogramValue.Add(h)
+ group.histogramKahanC, _, nhcbBoundsReconciled, err = group.histogramValue.KahanAdd(h, group.histogramKahanC)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
@@ -3370,18 +3379,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// point in copying the histogram in that case.
} else {
group.hasFloat = true
- group.floatValue, group.floatKahanC = kahanSumInc(f, group.floatValue, group.floatKahanC)
+ group.floatValue, group.floatKahanC = kahansum.Inc(f, group.floatValue, group.floatKahanC)
}
case parser.AVG:
- // For the average calculation of histograms, we use
- // incremental mean calculation without the help of
- // Kahan summation (but this should change, see
- // https://github.com/prometheus/prometheus/issues/14105
- // ). For floats, we improve the accuracy with the help
- // of Kahan summation. For a while, we assumed that
- // incremental mean calculation combined with Kahan
- // summation (see
+ // We improve the accuracy with the help of Kahan summation.
+ // For a while, we assumed that incremental mean calculation
+ // combined with Kahan summation (see
// https://stackoverflow.com/questions/61665473/is-it-beneficial-for-precision-to-calculate-the-incremental-mean-average
// for inspiration) is generally the preferred solution.
// However, it then turned out that direct mean
@@ -3416,20 +3420,37 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
case histogram.NotCounterReset:
group.notCounterResetSeen = true
}
- left := h.Copy().Div(group.groupCount)
- right := group.histogramValue.Copy().Div(group.groupCount)
-
- toAdd, _, nhcbBoundsReconciled, err := left.Sub(right)
- if err != nil {
- handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
- group.incompatibleHistograms = true
- continue
+ if !group.histogramIncrementalMean {
+ v := group.histogramValue.Copy()
+ var c *histogram.FloatHistogram
+ if group.histogramKahanC != nil {
+ c = group.histogramKahanC.Copy()
+ }
+ c, _, nhcbBoundsReconciled, err = v.KahanAdd(h, c)
+ if err != nil {
+ handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
+ group.incompatibleHistograms = true
+ continue
+ }
+ if nhcbBoundsReconciled {
+ annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(e.Expr.PositionRange(), annotations.HistogramAgg))
+ }
+ if !v.HasOverflow() {
+ group.histogramValue, group.histogramKahanC = v, c
+ break
+ }
+ group.histogramIncrementalMean = true
+ group.histogramMean = group.histogramValue.Copy().Div(group.groupCount - 1)
+ if group.histogramKahanC != nil {
+ group.histogramKahanC.Div(group.groupCount - 1)
+ }
}
- if nhcbBoundsReconciled {
- annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(e.Expr.PositionRange(), annotations.HistogramAgg))
+ q := (group.groupCount - 1) / group.groupCount
+ if group.histogramKahanC != nil {
+ group.histogramKahanC.Mul(q)
}
-
- _, _, nhcbBoundsReconciled, err = group.histogramValue.Add(toAdd)
+ toAdd := h.Copy().Div(group.groupCount)
+ group.histogramKahanC, _, nhcbBoundsReconciled, err = group.histogramMean.Mul(q).KahanAdd(toAdd, group.histogramKahanC)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
@@ -3444,8 +3465,8 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// point in copying the histogram in that case.
} else {
group.hasFloat = true
- if !group.incrementalMean {
- newV, newC := kahanSumInc(f, group.floatValue, group.floatKahanC)
+ if !group.floatIncrementalMean {
+ newV, newC := kahansum.Inc(f, group.floatValue, group.floatKahanC)
if !math.IsInf(newV, 0) {
// The sum doesn't overflow, so we propagate it to the
// group struct and continue with the regular
@@ -3456,12 +3477,12 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// If we are here, we know that the sum _would_ overflow. So
// instead of continue to sum up, we revert to incremental
// calculation of the mean value from here on.
- group.incrementalMean = true
+ group.floatIncrementalMean = true
group.floatMean = group.floatValue / (group.groupCount - 1)
group.floatKahanC /= group.groupCount - 1
}
q := (group.groupCount - 1) / group.groupCount
- group.floatMean, group.floatKahanC = kahanSumInc(
+ group.floatMean, group.floatKahanC = kahansum.Inc(
f/group.groupCount,
q*group.floatMean,
q*group.floatKahanC,
@@ -3536,8 +3557,24 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
case aggr.incompatibleHistograms:
continue
case aggr.hasHistogram:
+ if aggr.histogramIncrementalMean {
+ if aggr.histogramKahanC != nil {
+ aggr.histogramValue, _, _, _ = aggr.histogramMean.Add(aggr.histogramKahanC)
+ // Add can theoretically return ErrHistogramsIncompatibleSchema, but at
+ // this stage errors should not occur if earlier KahanAdd calls succeeded.
+ } else {
+ aggr.histogramValue = aggr.histogramMean
+ }
+ } else {
+ aggr.histogramValue.Div(aggr.groupCount)
+ if aggr.histogramKahanC != nil {
+ aggr.histogramValue, _, _, _ = aggr.histogramValue.Add(aggr.histogramKahanC.Div(aggr.groupCount))
+ // Add can theoretically return ErrHistogramsIncompatibleSchema, but at
+ // this stage errors should not occur if earlier KahanAdd calls succeeded.
+ }
+ }
aggr.histogramValue = aggr.histogramValue.Compact(0)
- case aggr.incrementalMean:
+ case aggr.floatIncrementalMean:
aggr.floatValue = aggr.floatMean + aggr.floatKahanC
default:
aggr.floatValue = aggr.floatValue/aggr.groupCount + aggr.floatKahanC/aggr.groupCount
@@ -3565,6 +3602,11 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
case aggr.incompatibleHistograms:
continue
case aggr.hasHistogram:
+ if aggr.histogramKahanC != nil {
+ aggr.histogramValue, _, _, _ = aggr.histogramValue.Add(aggr.histogramKahanC)
+ // Add can theoretically return ErrHistogramsIncompatibleSchema, but at
+ // this stage errors should not occur if earlier KahanAdd calls succeeded.
+ }
aggr.histogramValue.Compact(0)
default:
aggr.floatValue += aggr.floatKahanC
diff --git a/promql/functions.go b/promql/functions.go
index 04a3d55370..f02262ac40 100644
--- a/promql/functions.go
+++ b/promql/functions.go
@@ -33,6 +33,7 @@ import (
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/schema"
"github.com/prometheus/prometheus/util/annotations"
+ "github.com/prometheus/prometheus/util/kahansum"
)
// FunctionCall is the type of a PromQL function implementation
@@ -801,10 +802,7 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 {
return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(firstSeries.Metric), args[0].PositionRange()))
}
- // For the average calculation of histograms, we use incremental mean
- // calculation without the help of Kahan summation (but this should
- // change, see https://github.com/prometheus/prometheus/issues/14105 ).
- // For floats, we improve the accuracy with the help of Kahan summation.
+ // We improve the accuracy with the help of Kahan summation.
// For a while, we assumed that incremental mean calculation combined
// with Kahan summation (see
// https://stackoverflow.com/questions/61665473/is-it-beneficial-for-precision-to-calculate-the-incremental-mean-average
@@ -847,23 +845,47 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
}
}()
- mean := s.Histograms[0].H.Copy()
- trackCounterReset(mean)
+ var (
+ sum = s.Histograms[0].H.Copy()
+ mean, kahanC *histogram.FloatHistogram
+ count float64
+ incrementalMean bool
+ nhcbBoundsReconciled bool
+ err error
+ )
+ trackCounterReset(sum)
for i, h := range s.Histograms[1:] {
trackCounterReset(h.H)
- count := float64(i + 2)
- left := h.H.Copy().Div(count)
- right := mean.Copy().Div(count)
-
- toAdd, _, nhcbBoundsReconciled, err := left.Sub(right)
- if err != nil {
- return mean, err
+ count = float64(i + 2)
+ if !incrementalMean {
+ sumCopy := sum.Copy()
+ var cCopy *histogram.FloatHistogram
+ if kahanC != nil {
+ cCopy = kahanC.Copy()
+ }
+ cCopy, _, nhcbBoundsReconciled, err = sumCopy.KahanAdd(h.H, cCopy)
+ if err != nil {
+ return sumCopy.Div(count), err
+ }
+ if nhcbBoundsReconciled {
+ nhcbBoundsReconciledSeen = true
+ }
+ if !sumCopy.HasOverflow() {
+ sum, kahanC = sumCopy, cCopy
+ continue
+ }
+ incrementalMean = true
+ mean = sum.Copy().Div(count - 1)
+ if kahanC != nil {
+ kahanC.Div(count - 1)
+ }
}
- if nhcbBoundsReconciled {
- nhcbBoundsReconciledSeen = true
+ q := (count - 1) / count
+ if kahanC != nil {
+ kahanC.Mul(q)
}
-
- _, _, nhcbBoundsReconciled, err = mean.Add(toAdd)
+ toAdd := h.H.Copy().Div(count)
+ kahanC, _, nhcbBoundsReconciled, err = mean.Mul(q).KahanAdd(toAdd, kahanC)
if err != nil {
return mean, err
}
@@ -871,7 +893,18 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
nhcbBoundsReconciledSeen = true
}
}
- return mean, nil
+ if incrementalMean {
+ if kahanC != nil {
+ _, _, _, err := mean.Add(kahanC)
+ return mean, err
+ }
+ return mean, nil
+ }
+ if kahanC != nil {
+ _, _, _, err := sum.Div(count).Add(kahanC.Div(count))
+ return sum, err
+ }
+ return sum.Div(count), nil
})
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
@@ -890,7 +923,7 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
for i, f := range s.Floats[1:] {
count = float64(i + 2)
if !incrementalMean {
- newSum, newC := kahanSumInc(f.F, sum, kahanC)
+ newSum, newC := kahansum.Inc(f.F, sum, kahanC)
// Perform regular mean calculation as long as
// the sum doesn't overflow.
if !math.IsInf(newSum, 0) {
@@ -904,7 +937,7 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
kahanC /= (count - 1)
}
q := (count - 1) / count
- mean, kahanC = kahanSumInc(f.F/count, q*mean, q*kahanC)
+ mean, kahanC = kahansum.Inc(f.F/count, q*mean, q*kahanC)
}
if incrementalMean {
return mean + kahanC
@@ -1145,9 +1178,14 @@ func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
sum := s.Histograms[0].H.Copy()
trackCounterReset(sum)
+ var (
+ comp *histogram.FloatHistogram
+ nhcbBoundsReconciled bool
+ err error
+ )
for _, h := range s.Histograms[1:] {
trackCounterReset(h.H)
- _, _, nhcbBoundsReconciled, err := sum.Add(h.H)
+ comp, _, nhcbBoundsReconciled, err = sum.KahanAdd(h.H, comp)
if err != nil {
return sum, err
}
@@ -1155,7 +1193,16 @@ func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
nhcbBoundsReconciledSeen = true
}
}
- return sum, nil
+ if comp != nil {
+ sum, _, nhcbBoundsReconciled, err = sum.Add(comp)
+ if err != nil {
+ return sum, err
+ }
+ if nhcbBoundsReconciled {
+ nhcbBoundsReconciledSeen = true
+ }
+ }
+ return sum, err
})
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
@@ -1167,7 +1214,7 @@ func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
return aggrOverTime(matrixVal, enh, func(s Series) float64 {
var sum, c float64
for _, f := range s.Floats {
- sum, c = kahanSumInc(f.F, sum, c)
+ sum, c = kahansum.Inc(f.F, sum, c)
}
if math.IsInf(sum, 0) {
return sum
@@ -1220,8 +1267,8 @@ func varianceOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHe
for _, f := range s.Floats {
count++
delta := f.F - (mean + cMean)
- mean, cMean = kahanSumInc(delta/count, mean, cMean)
- aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux)
+ mean, cMean = kahansum.Inc(delta/count, mean, cMean)
+ aux, cAux = kahansum.Inc(delta*(f.F-(mean+cMean)), aux, cAux)
}
variance := (aux + cAux) / count
if varianceToResult == nil {
@@ -1434,24 +1481,6 @@ func funcTimestamp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *Eva
return enh.Out, nil
}
-// We get incorrect results if this function is inlined; see https://github.com/prometheus/prometheus/issues/16714.
-//
-//go:noinline
-func kahanSumInc(inc, sum, c float64) (newSum, newC float64) {
- t := sum + inc
- switch {
- case math.IsInf(t, 0):
- c = 0
-
- // Using Neumaier improvement, swap if next term larger than sum.
- case math.Abs(sum) >= math.Abs(inc):
- c += (sum - t) + inc
- default:
- c += (inc - t) + sum
- }
- return t, c
-}
-
// linearRegression performs a least-square linear regression analysis on the
// provided SamplePairs. It returns the slope, and the intercept value at the
// provided time.
@@ -1474,10 +1503,10 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f
}
n += 1.0
x := float64(sample.T-interceptTime) / 1e3
- sumX, cX = kahanSumInc(x, sumX, cX)
- sumY, cY = kahanSumInc(sample.F, sumY, cY)
- sumXY, cXY = kahanSumInc(x*sample.F, sumXY, cXY)
- sumX2, cX2 = kahanSumInc(x*x, sumX2, cX2)
+ sumX, cX = kahansum.Inc(x, sumX, cX)
+ sumY, cY = kahansum.Inc(sample.F, sumY, cY)
+ sumXY, cXY = kahansum.Inc(x*sample.F, sumXY, cXY)
+ sumX2, cX2 = kahansum.Inc(x*x, sumX2, cX2)
}
if constY {
if math.IsInf(initY, 0) {
@@ -1613,7 +1642,7 @@ func histogramVariance(vectorVals []Vector, enh *EvalNodeHelper, varianceToResul
}
}
delta := val - mean
- variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
+ variance, cVariance = kahansum.Inc(bucket.Count*delta*delta, variance, cVariance)
}
variance += cVariance
variance /= h.Count
diff --git a/promql/functions_internal_test.go b/promql/functions_internal_test.go
index 9efd9c3c2e..cd170823a8 100644
--- a/promql/functions_internal_test.go
+++ b/promql/functions_internal_test.go
@@ -24,6 +24,7 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser/posrange"
+ "github.com/prometheus/prometheus/util/kahansum"
)
func TestHistogramRateCounterResetHint(t *testing.T) {
@@ -79,7 +80,7 @@ func TestKahanSumInc(t *testing.T) {
runTest := func(t *testing.T, a, b, expected float64) {
t.Run(fmt.Sprintf("%v + %v = %v", a, b, expected), func(t *testing.T) {
- sum, c := kahanSumInc(b, a, 0)
+ sum, c := kahansum.Inc(b, a, 0)
result := sum + c
if math.IsNaN(expected) {
diff --git a/promql/promqltest/testdata/aggregators.test b/promql/promqltest/testdata/aggregators.test
index 576b36868f..a3dc61dcff 100644
--- a/promql/promqltest/testdata/aggregators.test
+++ b/promql/promqltest/testdata/aggregators.test
@@ -687,6 +687,11 @@ load 10s
eval instant at 1m sum(data{test="ten"})
{} 10
+# Plain addition doesn't use Kahan summation, so operations involving very large magnitudes
+# (±1e+100) lose precision. The smaller values are absorbed, leading to an incorrect result.
+# eval instant at 1m sum(data{test="ten",point="a"}) + sum(data{test="ten",point="b"}) + sum(data{test="ten",point="c"}) + sum(data{test="ten",point="d"})
+# {} 10
+
eval instant at 1m avg(data{test="ten"})
{} 2.5
diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test
index d66400f787..07352eb59a 100644
--- a/promql/promqltest/testdata/native_histograms.test
+++ b/promql/promqltest/testdata/native_histograms.test
@@ -1388,22 +1388,28 @@ clear
# Test native histograms with sum, count, avg.
load 10m
- histogram_sum{idx="0"} {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}}x1
- histogram_sum{idx="1"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1
- histogram_sum{idx="2"} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1
- histogram_sum{idx="3"} {{schema:1 count:0}}x1
+ histogram_sum{idx="0"} {{schema:0 count:25 sum:3.1 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}}x1
+ histogram_sum{idx="1"} {{schema:0 count:41 sum:1e100 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1
+ histogram_sum{idx="2"} {{schema:0 count:41 sum:-1e100 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1
+ histogram_sum{idx="3"} {{schema:1 count:0 sum:1.3 z_bucket:3 z_bucket_w:0.001 buckets:[2 4 2 3 2 2] n_buckets:[1 2 5 3 8 1 1 1 1 6 3]}}x1
histogram_sum_float{idx="0"} 42.0x1
eval instant at 10m sum(histogram_sum)
expect no_warn
- {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
+ {} {{schema:0 count:107 sum:4.4 z_bucket:17 z_bucket_w:0.001 buckets:[5 14 7 7 3 2 2] n_buckets:[3 13 19 6 17 18 0 0 0 10 10 4]}}
eval instant at 10m sum({idx="0"})
expect warn
-eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="1"} + ignoring(idx) histogram_sum{idx="2"} + ignoring(idx) histogram_sum{idx="3"})
+eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="3"})
expect no_warn
- {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
+ {} {{schema:0 count:25 sum:4.4 z_bucket:7 z_bucket_w:0.001 buckets:[3 8 5 3 1] n_buckets:[3 11 11 2 3 18]}}
+
+# Plain addition doesn't use Kahan summation, so operations involving very large magnitudes
+# (±1e+100) lose precision. The smaller values are absorbed, leading to an incorrect result.
+# eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="1"} + ignoring(idx) histogram_sum{idx="2"} + ignoring(idx) histogram_sum{idx="3"})
+# expect no_warn
+# {} {{schema:0 count:107 sum:4.4 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
eval instant at 10m count(histogram_sum)
expect no_warn
@@ -1411,13 +1417,63 @@ eval instant at 10m count(histogram_sum)
eval instant at 10m avg(histogram_sum)
expect no_warn
- {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}}
+ {} {{schema:0 count:26.75 sum:1.1 z_bucket:4.25 z_bucket_w:0.001 buckets:[1.25 3.5 1.75 1.75 0.75 0.5 0.5] n_buckets:[0.75 3.25 4.75 1.5 4.25 4.5 0 0 0 2.5 2.5 1]}}
+
+clear
+
+# Test native histograms with incremental avg calulation.
+# Very large floats involved trigger incremental avg calculation, as direct avg calculation would overflow float64.
+load 10m
+ histogram_avg_incremental{idx="0"} {{schema:0 count:1.7976931348623157e+308 sum:5.30921651659898 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[1.78264E+50 1.78264E+215 1.78264E+219 3363.5121756487] n_buckets:[1178.20696291113 731.697776280323 715.201503759399 1386.11378876781 855.572553278132]}}x1
+ histogram_avg_incremental{idx="1"} {{schema:0 count:1e308 sum:0.961118537914768 z_bucket:0.76342771 z_bucket_w:0.001 buckets:[0.76342771 0.76342771 0.76342771 195.70084087969] n_buckets:[421.30382970055 0 450441.779]}}x1
+ histogram_avg_incremental{idx="2"} {{schema:0 count:1e-6 sum:1.62091361305318 z_bucket:1.9592258 z_bucket_w:0.001 buckets:[1.9592258 1.9592258 1.9592258 1135.74692279] n_buckets:[0 4504.41779 588.599358265103 40.3760942760943]}}x1
+ histogram_avg_incremental{idx="3"} {{schema:0 count:1e-6 sum:0.865089463758091 z_bucket:7.69805412 z_bucket_w:0.001 buckets:[2.258E+220 2.258E+220 2.3757689E+217 1078.68071312804] n_buckets:[349.905284031261 0 0 0.161173466838949 588.599358]}}x1
+ histogram_avg_incremental{idx="4"} {{schema:0 count:1e-6 sum:0.323055185914577 z_bucket:458.90154 z_bucket_w:0.001 buckets:[7.69805412 7.69805412 2.258E+220 3173.28218135701]}}x1
+ histogram_avg_incremental{idx="5"} {{schema:0 count:1e-6 sum:0.951811357687154 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[458.90154 458.90154 7.69805412 2178.35] n_buckets:[2054.92644438789 844.560108898123]}}x1
+ histogram_avg_incremental{idx="6"} {{schema:0 count:1e-6 sum:0 z_bucket:5 z_bucket_w:0.001 buckets:[0 0 1.78264E+219 376.770478890989]}}x1
+ histogram_avg_incremental{idx="7"} {{schema:0 count:1e-6 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 458.90154 250325.5] n_buckets:[0 0.0000000011353 0 608.697257]}}x1
+# This test fails due to float64 rounding in the incremental average calculation.
+# For large intermediate means (e.g. ~1e99), multiplying by a fractional weight like (n-1)/n
+# produces values such as 2.0000000000000002e99 instead of the mathematically exact 2e99.
+# While the relative error is tiny, subtracting nearly equal high-magnitude values later
+# result in a large absolute error. The outcome also depends on the (effectively random) order
+# in which input series are processed which makes the test flaky.
+# histogram_avg_incremental_2{idx="0"} {{schema:0 count:1.7976931348623157e+308 sum:5.3 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[1.78264E+50 1.78264E+215 1.78264E+219 3363.5121756487] n_buckets:[1178.20696291113 731.697776280323 715.201503759399 1386.11378876781 855.572553278132]}}x1
+# histogram_avg_incremental_2{idx="1"} {{schema:0 count:1e308 sum:1e100 z_bucket:0.76342771 z_bucket_w:0.001 buckets:[0.76342771 0.76342771 0.76342771 195.70084087969] n_buckets:[421.30382970055 0 450441.779]}}x1
+# histogram_avg_incremental_2{idx="2"} {{schema:0 count:1e-6 sum:1 z_bucket:1.9592258 z_bucket_w:0.001 buckets:[1.9592258 1.9592258 1.9592258 1135.74692279] n_buckets:[0 4504.41779 588.599358265103 40.3760942760943]}}x1
+# histogram_avg_incremental_2{idx="3"} {{schema:0 count:1e-6 sum:-1e100 z_bucket:7.69805412 z_bucket_w:0.001 buckets:[2.258E+220 2.258E+220 2.3757689E+217 1078.68071312804] n_buckets:[349.905284031261 0 0 0.161173466838949 588.599358]}}x1
+# histogram_avg_incremental_2{idx="4"} {{schema:0 count:1e-6 sum:1 z_bucket:458.90154 z_bucket_w:0.001 buckets:[7.69805412 7.69805412 2.258E+220 3173.28218135701]}}x1
+# histogram_avg_incremental_2{idx="5"} {{schema:0 count:1e-6 sum:1 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[458.90154 458.90154 7.69805412 2178.35] n_buckets:[2054.92644438789 844.560108898123]}}x1
+# histogram_avg_incremental_2{idx="6"} {{schema:0 count:1e-6 sum:0 z_bucket:5 z_bucket_w:0.001 buckets:[0 0 1.78264E+219 376.770478890989]}}x1
+# histogram_avg_incremental_2{idx="7"} {{schema:0 count:1e-6 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 458.90154 250325.5] n_buckets:[0 0.0000000011353 0 608.697257]}}x1
+
+eval instant at 10m avg(histogram_avg_incremental)
+ {} {{schema:0 count:3.497116418577895e+307 sum:1.2539005843658437 z_bucket:4.4566e49 z_bucket_w:0.001 buckets:[2.8225e+219 2.822522283e+219 3.271129711125e+219 32728.442914086805] n_buckets:[500.5428151288539 760.0844593974477 56468.19748275306 254.4185391888429 180.5214889097665]}}
+
+# This test doesn't work, see the load section above for reasoning.
+# eval instant at 10m avg(histogram_avg_incremental_2)
+# {} {{schema:0 count:3.497116418577895e+307 sum:1.0375 z_bucket:4.4566e49 z_bucket_w:0.001 buckets:[2.8225e+219 2.822522283e+219 3.271129711125e+219 32728.442914086805] n_buckets:[500.5428151288539 760.0844593974477 56468.19748275306 254.4185391888429 180.5214889097665]}}
clear
# Test native histograms with sum_over_time, avg_over_time.
load 1m
histogram_sum_over_time {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:1 count:0}}
+ histogram_sum_over_time_2 {{schema:0 count:1e10 sum:5.30921651659898 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[1.78264E+50 1.78264E+215 1.78264E+219 3363.5121756487] n_buckets:[1178.20696291113 731.697776280323 715.201503759399 1386.11378876781 855.572553278132]}} {{schema:0 count:1e-6 sum:0.961118537914768 z_bucket:0.76342771 z_bucket_w:0.001 buckets:[0.76342771 0.76342771 0.76342771 195.70084087969] n_buckets:[421.30382970055 0 450441.779]}} {{schema:0 count:1e-6 sum:1.62091361305318 z_bucket:1.9592258 z_bucket_w:0.001 buckets:[1.9592258 1.9592258 1.9592258 1135.74692279] n_buckets:[0 4504.41779 588.599358265103 40.3760942760943]}} {{schema:0 count:1e-6 sum:0.865089463758091 z_bucket:7.69805412 z_bucket_w:0.001 buckets:[2.258E+220 2.258E+220 2.3757689E+217 1078.68071312804] n_buckets:[349.905284031261 0 0 0.161173466838949 588.599358]}} {{schema:0 count:1e-6 sum:0.323055185914577 z_bucket:458.90154 z_bucket_w:0.001 buckets:[7.69805412 7.69805412 2.258E+220 3173.28218135701]}} {{schema:0 count:1e-6 sum:0.951811357687154 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[458.90154 458.90154 7.69805412 2178.35] n_buckets:[2054.92644438789 844.560108898123]}} {{schema:0 count:1e-6 sum:0 z_bucket:5 z_bucket_w:0.001 buckets:[0 0 1.78264E+219 376.770478890989]}} {{schema:0 count:1e-6 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 458.90154 250325.5] n_buckets:[0 0.0000000011353 0 608.697257]}}
+ histogram_sum_over_time_3 {{schema:0 count:1 sum:1}} {{schema:0 count:2 sum:1e100}} {{schema:0 count:3 sum:1}} {{schema:0 count:4 sum:-1e100}}
+ histogram_sum_over_time_4 {{schema:0 count:1 sum:5.3}} {{schema:0 count:2 sum:1e100}} {{schema:0 count:3 sum:1}} {{schema:0 count:4 sum:-1e100}} {{schema:0 count:5 sum:2}} {{schema:0 count:6 sum:1e50}} {{schema:0 count:7 sum:-1e50}}
+ histogram_sum_over_time_incremental {{schema:0 count:1.7976931348623157e+308 sum:5.30921651659898 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[1.78264E+50 1.78264E+215 1.78264E+219 3363.5121756487] n_buckets:[1178.20696291113 731.697776280323 715.201503759399 1386.11378876781 855.572553278132]}} {{schema:0 count:1e308 sum:0.961118537914768 z_bucket:0.76342771 z_bucket_w:0.001 buckets:[0.76342771 0.76342771 0.76342771 195.70084087969] n_buckets:[421.30382970055 0 450441.779]}} {{schema:0 count:1e-6 sum:1.62091361305318 z_bucket:1.9592258 z_bucket_w:0.001 buckets:[1.9592258 1.9592258 1.9592258 1135.74692279] n_buckets:[0 4504.41779 588.599358265103 40.3760942760943]}} {{schema:0 count:1e-6 sum:0.865089463758091 z_bucket:7.69805412 z_bucket_w:0.001 buckets:[2.258E+220 2.258E+220 2.3757689E+217 1078.68071312804] n_buckets:[349.905284031261 0 0 0.161173466838949 588.599358]}} {{schema:0 count:1e-6 sum:0.323055185914577 z_bucket:458.90154 z_bucket_w:0.001 buckets:[7.69805412 7.69805412 2.258E+220 3173.28218135701]}} {{schema:0 count:1e-6 sum:0.951811357687154 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[458.90154 458.90154 7.69805412 2178.35] n_buckets:[2054.92644438789 844.560108898123]}} {{schema:0 count:1e-6 sum:0 z_bucket:5 z_bucket_w:0.001 buckets:[0 0 1.78264E+219 376.770478890989]}} {{schema:0 count:1e-6 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 458.90154 250325.5] n_buckets:[0 0.0000000011353 0 608.697257]}}
+ histogram_sum_over_time_incremental_2 {{schema:0 count:1.7976931348623157e+308 sum:5.3}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:2}} {{schema:0 count:1e-6 sum:0}} {{schema:0 count:1e-6 sum:0}}
+ histogram_sum_over_time_incremental_3 {{schema:0 count:1.7976931348623157e+308 sum:5.3}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:1e100}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:0}}
+ histogram_sum_over_time_incremental_4 {{schema:0 count:1.7976931348623157e+308 sum:5.3}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:1e50}} {{schema:0 count:1e-6 sum:-1e50}} {{schema:0 count:1e-6 sum:0}}
+ histogram_sum_over_time_incremental_6 {{schema:0 count:1.7976931348623157e+308 sum:1}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:-1e100}}
+# Kahan summation only compensates reliably across two magnitude scales. In following inputs, the
+# series contains three distinct magnitude groups (≈1, ≈1e50, and ≈1e100). When these magnitudes
+# are interleaved, rounding error can't be fully compensated, causing smaller values to be lost.
+# However, when values are ordered so that cancellation within one magnitude group
+# occurs first, followed by cancellation of the next group, the outcome remains accurate.
+# histogram_sum_over_time_5 {{schema:0 count:1 sum:5.3}} {{schema:0 count:2 sum:1e100}} {{schema:0 count:3 sum:1}} {{schema:0 count:4 sum:1e50}} {{schema:0 count:5 sum:2}} {{schema:0 count:6 sum:-1e100}} {{schema:0 count:7 sum:-1e50}}
+# histogram_sum_over_time_incremental_5 {{schema:0 count:1.7976931348623157e+308 sum:5.3}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:1e50}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:-1e50}} {{schema:0 count:1e-6 sum:0}}
eval instant at 3m sum_over_time(histogram_sum_over_time[4m:1m])
{} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
@@ -1425,6 +1481,68 @@ eval instant at 3m sum_over_time(histogram_sum_over_time[4m:1m])
eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m])
{} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}}
+eval instant at 7m sum_over_time(histogram_sum_over_time_2[8m:1m])
+ {} {{schema:0 count:10000000000.000008 sum:10.03120467492675 z_bucket:3.56528e+50 z_bucket_w:0.001 buckets:[2.258e+220 2.2580178264e+220 2.6169037689e+220 261827.54331269444] n_buckets:[4004.342521030831 6080.675675179582 451745.57986202446 2035.3483135107433 1444.171911278132]}}
+
+eval instant at 7m avg_over_time(histogram_sum_over_time_2[8m:1m])
+ {} {{schema:0 count:1250000000.000001 sum:1.2539005843658437 z_bucket:4.4566e49 z_bucket_w:0.001 buckets:[2.8225e+219 2.822522283e+219 3.271129711125e+219 32728.442914086805] n_buckets:[500.5428151288539 760.0844593974477 56468.19748275306 254.4185391888429 180.5214889097665]}}
+
+eval instant at 3m sum_over_time(histogram_sum_over_time_3[4m:1m])
+ {} {{schema:0 count:10 sum:2}}
+
+eval instant at 3m avg_over_time(histogram_sum_over_time_3[4m:1m])
+ {} {{schema:0 count:2.5 sum:0.5}}
+
+eval instant at 6m sum_over_time(histogram_sum_over_time_4[7m:1m])
+ {} {{schema:0 count:28 sum:8.3}}
+
+eval instant at 6m avg_over_time(histogram_sum_over_time_4[7m:1m])
+ {} {{schema:0 count:4 sum:1.1857142857142857}}
+
+# These tests don't work, see the load section above for reasoning.
+# eval instant at 6m sum_over_time(histogram_sum_over_time_5[7m:1m])
+# {} {{schema:0 count:28 sum:8.3}}
+#
+# eval instant at 6m avg_over_time(histogram_sum_over_time_5[7m:1m])
+# {} {{schema:0 count:4 sum:1.1857142857142857}}
+
+eval instant at 7m sum_over_time(histogram_sum_over_time_incremental[8m:1m])
+ {} {{schema:0 count:Inf sum:10.03120467492675 z_bucket:3.56528e+50 z_bucket_w:0.001 buckets:[2.258e+220 2.2580178264e+220 2.6169037689e+220 261827.54331269444] n_buckets:[4004.342521030831 6080.675675179582 451745.57986202446 2035.3483135107433 1444.171911278132]}}
+
+eval instant at 7m avg_over_time(histogram_sum_over_time_incremental[8m:1m])
+ {} {{schema:0 count:3.497116418577895e+307 sum:1.2539005843658437 z_bucket:4.4566e49 z_bucket_w:0.001 buckets:[2.8225e+219 2.822522283e+219 3.271129711125e+219 32728.442914086805] n_buckets:[500.5428151288539 760.0844593974477 56468.19748275306 254.4185391888429 180.5214889097665]}}
+
+eval instant at 6m sum_over_time(histogram_sum_over_time_incremental_2[7m:1m])
+ {} {{schema:0 count:Inf sum:8.3}}
+
+eval instant at 6m avg_over_time(histogram_sum_over_time_incremental_2[7m:1m])
+ {} {{schema:0 count:3.9967044783747367e+307 sum:1.1857142857142857}}
+
+eval instant at 6m sum_over_time(histogram_sum_over_time_incremental_3[7m:1m])
+ {} {{schema:0 count:Inf sum:6.3}}
+
+eval instant at 6m avg_over_time(histogram_sum_over_time_incremental_3[7m:1m])
+ {} {{schema:0 count:3.9967044783747367e+307 sum:0.9}}
+
+eval instant at 6m sum_over_time(histogram_sum_over_time_incremental_4[7m:1m])
+ {} {{schema:0 count:Inf sum:6.3}}
+
+eval instant at 6m avg_over_time(histogram_sum_over_time_incremental_4[7m:1m])
+ {} {{schema:0 count:3.9967044783747367e+307 sum:0.9}}
+
+# These tests don't work, see the load section above for reasoning.
+# eval instant at 6m sum_over_time(histogram_sum_over_time_incremental_5[7m:1m])
+# {} {{schema:0 count:Inf sum:6.3}}
+#
+# eval instant at 6m avg_over_time(histogram_sum_over_time_incremental_5[7m:1m])
+# {} {{schema:0 count:3.9967044783747367e+307 sum:0.9}}
+
+eval instant at 3m sum_over_time(histogram_sum_over_time_incremental_6[4m:1m])
+ {} {{schema:0 count:Inf sum:2}}
+
+eval instant at 3m avg_over_time(histogram_sum_over_time_incremental_6[4m:1m])
+ {} {{schema:0 count:6.99423283715579e+307 sum:0.5}}
+
clear
# Test native histograms with sub operator.
diff --git a/util/kahansum/kahansum.go b/util/kahansum/kahansum.go
new file mode 100644
index 0000000000..d55defcb29
--- /dev/null
+++ b/util/kahansum/kahansum.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kahansum
+
+import "math"
+
+// Inc performs addition of two floating-point numbers using the Kahan summation algorithm.
+// We get incorrect results if this function is inlined; see https://github.com/prometheus/prometheus/issues/16714.
+//
+//go:noinline
+func Inc(inc, sum, c float64) (newSum, newC float64) {
+ t := sum + inc
+ switch {
+ case math.IsInf(t, 0):
+ c = 0
+
+ // Using Neumaier improvement, swap if next term larger than sum.
+ case math.Abs(sum) >= math.Abs(inc):
+ c += (sum - t) + inc
+ default:
+ c += (inc - t) + sum
+ }
+ return t, c
+}
+
+func Dec(dec, sum, c float64) (newSum, newC float64) {
+ return Inc(-dec, sum, c)
+}
From a19a5314a36245a6698d7e5a01e0ae296f61c0e3 Mon Sep 17 00:00:00 2001
From: Mohammad Abbasi
Date: Sun, 8 Feb 2026 19:02:56 +0330
Subject: [PATCH 129/165] fix(docs): typo in prometheus_agent.md doc
Signed-off-by: Mohammad Abbasi
---
docs/prometheus_agent.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/prometheus_agent.md b/docs/prometheus_agent.md
index 468b5565d1..0d8c3fa94a 100644
--- a/docs/prometheus_agent.md
+++ b/docs/prometheus_agent.md
@@ -20,8 +20,8 @@ In essence, it looks like this:
### Benefits of agent mode
-- Improved efficency. The customized Agent TSDB WAL removes the data immediately after successful writes. If it cannot reach the remote endpoint, it persists the data temporarily on the disk until the remote endpoint is back online. This is currently limited to a two-hour buffer only, similar to non-agent Prometheus. This means that there is no need to build chunks of data in memory or maintain a full index for querying purposes. Essentially the Agent mode uses a fraction of the resources that a normal Prometheus server would use in a similar situation.
-- Agent mode eables easier [horizontal scalability for ingestion](https://prometheus.io/blog/2021/11/16/agent/#the-dream-auto-scalable-metric-ingestion).
+- Improved efficiency. The customized Agent TSDB WAL removes the data immediately after successful writes. If it cannot reach the remote endpoint, it persists the data temporarily on the disk until the remote endpoint is back online. This is currently limited to a two-hour buffer only, similar to non-agent Prometheus. This means that there is no need to build chunks of data in memory or maintain a full index for querying purposes. Essentially the Agent mode uses a fraction of the resources that a normal Prometheus server would use in a similar situation.
+- Agent mode enables easier [horizontal scalability for ingestion](https://prometheus.io/blog/2021/11/16/agent/#the-dream-auto-scalable-metric-ingestion).
### Downsides of agent mode
From 5329355ffb552a027fe5ddd0c002f890d4d345cf Mon Sep 17 00:00:00 2001
From: matt-gp
Date: Fri, 6 Feb 2026 10:12:48 +0000
Subject: [PATCH 130/165] AWS SD: ECS Standalone Tasks
The current ECS role in AWS SD assumes that a task is part of a service.
This means that tasks that are started as part of AWS Batch will get
missed and not be discovered. This changed fixes this so that standalone
tasks can be discovered as well.
Signed-off-by: matt-gp
---
discovery/aws/ecs.go | 993 ++++++++++++++++++++------------------
discovery/aws/ecs_test.go | 836 ++++++++++++++++++++++++--------
2 files changed, 1162 insertions(+), 667 deletions(-)
diff --git a/discovery/aws/ecs.go b/discovery/aws/ecs.go
index 1d5ff366de..9ecfcc44fe 100644
--- a/discovery/aws/ecs.go
+++ b/discovery/aws/ecs.go
@@ -19,7 +19,9 @@ import (
"fmt"
"log/slog"
"net"
+ "slices"
"strconv"
+ "strings"
"sync"
"time"
@@ -273,7 +275,6 @@ func (d *ECSDiscovery) initEcsClient(ctx context.Context) error {
// listClusterARNs returns a slice of cluster arns.
// This method does not use concurrency as it's a simple paginated call.
-// AWS ECS Cluster read actions have burst=50, sustained=20 req/sec limits.
func (d *ECSDiscovery) listClusterARNs(ctx context.Context) ([]string, error) {
var (
clusterARNs []string
@@ -281,7 +282,8 @@ func (d *ECSDiscovery) listClusterARNs(ctx context.Context) ([]string, error) {
)
for {
resp, err := d.ecs.ListClusters(ctx, &ecs.ListClustersInput{
- NextToken: nextToken,
+ NextToken: nextToken,
+ MaxResults: aws.Int32(100),
})
if err != nil {
return nil, fmt.Errorf("could not list clusters: %w", err)
@@ -299,56 +301,61 @@ func (d *ECSDiscovery) listClusterARNs(ctx context.Context) ([]string, error) {
}
// describeClusters returns a map of cluster ARN to a slice of clusters.
-// This method processes clusters in batches without concurrency as it's typically
-// a single call handling up to 100 clusters. AWS ECS Cluster read actions have
-// burst=50, sustained=20 req/sec limits.
+// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
+// Clusters are described in batches of 100 to respect AWS API limits (DescribeClusters allows up to 100 clusters per call).
func (d *ECSDiscovery) describeClusters(ctx context.Context, clusters []string) (map[string]types.Cluster, error) {
+ mu := sync.Mutex{}
clusterMap := make(map[string]types.Cluster)
-
- // AWS DescribeClusters can handle up to 100 clusters per call
- batchSize := 100
- for _, batch := range batchSlice(clusters, batchSize) {
- resp, err := d.ecs.DescribeClusters(ctx, &ecs.DescribeClustersInput{
- Clusters: batch,
- Include: []types.ClusterField{"TAGS"},
- })
- if err != nil {
- d.logger.Error("Failed to describe clusters", "clusters", batch, "error", err)
- return nil, fmt.Errorf("could not describe clusters %v: %w", batch, err)
- }
-
- for _, c := range resp.Clusters {
- if c.ClusterArn != nil {
- clusterMap[*c.ClusterArn] = c
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.SetLimit(d.cfg.RequestConcurrency)
+ for batch := range slices.Chunk(clusters, 100) {
+ errg.Go(func() error {
+ resp, err := d.ecs.DescribeClusters(ectx, &ecs.DescribeClustersInput{
+ Clusters: batch,
+ Include: []types.ClusterField{"TAGS"},
+ })
+ if err != nil {
+ d.logger.Error("Failed to describe clusters", "clusters", batch, "error", err)
+ return fmt.Errorf("could not describe clusters %v: %w", batch, err)
}
- }
+
+ for _, cluster := range resp.Clusters {
+ if cluster.ClusterArn != nil {
+ mu.Lock()
+ clusterMap[*cluster.ClusterArn] = cluster
+ mu.Unlock()
+ }
+ }
+ return nil
+ })
}
- return clusterMap, nil
+ return clusterMap, errg.Wait()
}
// listServiceARNs returns a map of cluster ARN to a slice of service ARNs.
// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
-// AWS ECS Service read actions have burst=100, sustained=20 req/sec limits.
+// Services are listed in batches of 100 to respect AWS API limits (ListServices allows up to 100 services per call).
func (d *ECSDiscovery) listServiceARNs(ctx context.Context, clusters []string) (map[string][]string, error) {
- serviceARNsMu := sync.Mutex{}
- serviceARNs := make(map[string][]string)
+ mu := sync.Mutex{}
+ services := make(map[string][]string)
errg, ectx := errgroup.WithContext(ctx)
errg.SetLimit(d.cfg.RequestConcurrency)
for _, clusterARN := range clusters {
errg.Go(func() error {
var nextToken *string
- var clusterServiceARNs []string
+ var serviceARNs []string
for {
resp, err := d.ecs.ListServices(ectx, &ecs.ListServicesInput{
- Cluster: aws.String(clusterARN),
- NextToken: nextToken,
+ Cluster: aws.String(clusterARN),
+ NextToken: nextToken,
+ MaxResults: aws.Int32(100),
})
if err != nil {
return fmt.Errorf("could not list services for cluster %q: %w", clusterARN, err)
}
- clusterServiceARNs = append(clusterServiceARNs, resp.ServiceArns...)
+ serviceARNs = append(serviceARNs, resp.ServiceArns...)
if resp.NextToken == nil {
break
@@ -356,75 +363,76 @@ func (d *ECSDiscovery) listServiceARNs(ctx context.Context, clusters []string) (
nextToken = resp.NextToken
}
- serviceARNsMu.Lock()
- serviceARNs[clusterARN] = clusterServiceARNs
- serviceARNsMu.Unlock()
+ mu.Lock()
+ services[clusterARN] = serviceARNs
+ mu.Unlock()
return nil
})
}
- return serviceARNs, errg.Wait()
-}
-
-// describeServices returns a map of cluster ARN to services.
-// Uses concurrent requests with batching (10 services per request) to respect AWS API limits.
-// AWS ECS Service read actions have burst=100, sustained=20 req/sec limits.
-func (d *ECSDiscovery) describeServices(ctx context.Context, clusterServiceARNsMap map[string][]string) (map[string][]types.Service, error) {
- batchSize := 10 // AWS DescribeServices API limit is 10 services per request
- serviceMu := sync.Mutex{}
- services := make(map[string][]types.Service)
- errg, ectx := errgroup.WithContext(ctx)
- errg.SetLimit(d.cfg.RequestConcurrency)
- for clusterARN, serviceARNs := range clusterServiceARNsMap {
- for _, batch := range batchSlice(serviceARNs, batchSize) {
- errg.Go(func() error {
- resp, err := d.ecs.DescribeServices(ectx, &ecs.DescribeServicesInput{
- Services: batch,
- Cluster: aws.String(clusterARN),
- Include: []types.ServiceField{"TAGS"},
- })
- if err != nil {
- d.logger.Error("Failed to describe services", "cluster", clusterARN, "batch", batch, "error", err)
- return fmt.Errorf("could not describe services for cluster %q: %w", clusterARN, err)
- }
-
- serviceMu.Lock()
- services[clusterARN] = append(services[clusterARN], resp.Services...)
- serviceMu.Unlock()
-
- return nil
- })
- }
- }
-
return services, errg.Wait()
}
-// listTaskARNs returns a map of service ARN to a slice of task ARNs.
+// describeServices returns a map of service name to service.
// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
-// AWS ECS Cluster resource read actions have burst=100, sustained=20 req/sec limits.
-func (d *ECSDiscovery) listTaskARNs(ctx context.Context, services []types.Service) (map[string][]string, error) {
- taskARNsMu := sync.Mutex{}
- taskARNs := make(map[string][]string)
+// Services are described in batches of 10 to respect AWS API limits (DescribeServices allows up to 10 services per call).
+func (d *ECSDiscovery) describeServices(ctx context.Context, clusterARN string, serviceARNS []string) (map[string]types.Service, error) {
+ mu := sync.Mutex{}
+ services := make(map[string]types.Service)
errg, ectx := errgroup.WithContext(ctx)
errg.SetLimit(d.cfg.RequestConcurrency)
- for _, service := range services {
+ for batch := range slices.Chunk(serviceARNS, 10) {
errg.Go(func() error {
- serviceArn := aws.ToString(service.ServiceArn)
+ resp, err := d.ecs.DescribeServices(ectx, &ecs.DescribeServicesInput{
+ Cluster: aws.String(clusterARN),
+ Services: batch,
+ Include: []types.ServiceField{"TAGS"},
+ })
+ if err != nil {
+ d.logger.Error("Failed to describe services", "cluster", clusterARN, "batch", batch, "error", err)
+ return fmt.Errorf("could not describe services for cluster %q: batch %v: %w", clusterARN, batch, err)
+ }
- var nextToken *string
- var serviceTaskARNs []string
+ for _, service := range resp.Services {
+ if service.ServiceArn != nil {
+ mu.Lock()
+ services[*service.ServiceName] = service
+ mu.Unlock()
+ }
+ }
+ return nil
+ })
+ }
+
+ return services, errg.Wait()
+}
+
+// listTaskARNs returns a map of clustersARN to a slice of task ARNs.
+// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
+// Tasks are listed in batches of 100 to respect AWS API limits (ListTasks allows up to 100 tasks per call).
+// This method also uses pagination to handle cases where there are more than 100 tasks in a cluster.
+func (d *ECSDiscovery) listTaskARNs(ctx context.Context, clusterARNs []string) (map[string][]string, error) {
+ mu := sync.Mutex{}
+ tasks := make(map[string][]string)
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.SetLimit(d.cfg.RequestConcurrency)
+ for _, clusterARN := range clusterARNs {
+ errg.Go(func() error {
+ var (
+ nextToken *string
+ taskARNs []string
+ )
for {
resp, err := d.ecs.ListTasks(ectx, &ecs.ListTasksInput{
- Cluster: aws.String(*service.ClusterArn),
- ServiceName: aws.String(*service.ServiceName),
- NextToken: nextToken,
+ Cluster: aws.String(clusterARN),
+ NextToken: nextToken,
+ MaxResults: aws.Int32(100),
})
if err != nil {
- return fmt.Errorf("could not list tasks for service %q: %w", serviceArn, err)
+ return fmt.Errorf("could not list tasks for cluster %q: %w", clusterARN, err)
}
- serviceTaskARNs = append(serviceTaskARNs, resp.TaskArns...)
+ taskARNs = append(taskARNs, resp.TaskArns...)
if resp.NextToken == nil {
break
@@ -432,77 +440,87 @@ func (d *ECSDiscovery) listTaskARNs(ctx context.Context, services []types.Servic
nextToken = resp.NextToken
}
- taskARNsMu.Lock()
- taskARNs[serviceArn] = serviceTaskARNs
- taskARNsMu.Unlock()
+ mu.Lock()
+ tasks[clusterARN] = taskARNs
+ mu.Unlock()
return nil
})
}
- return taskARNs, errg.Wait()
+ return tasks, errg.Wait()
}
-// describeTasks returns a map of task arn to a slice task.
-// Uses concurrent requests with batching (100 tasks per request) to respect AWS API limits.
-// AWS ECS Cluster resource read actions have burst=100, sustained=20 req/sec limits.
-func (d *ECSDiscovery) describeTasks(ctx context.Context, clusterARN string, taskARNsMap map[string][]string) (map[string][]types.Task, error) {
- batchSize := 100 // AWS DescribeTasks API limit is 100 tasks per request
- taskMu := sync.Mutex{}
- tasks := make(map[string][]types.Task)
+// describeTasks returns a slice of tasks.
+// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
+// Tasks are described in batches of 100 to respect AWS API limits (DescribeTasks allows up to 100 tasks per call).
+func (d *ECSDiscovery) describeTasks(ctx context.Context, clusterARN string, taskARNs []string) ([]types.Task, error) {
+ mu := sync.Mutex{}
+ var tasks []types.Task
errg, ectx := errgroup.WithContext(ctx)
errg.SetLimit(d.cfg.RequestConcurrency)
- for serviceARN, taskARNs := range taskARNsMap {
- for _, batch := range batchSlice(taskARNs, batchSize) {
- errg.Go(func() error {
- resp, err := d.ecs.DescribeTasks(ectx, &ecs.DescribeTasksInput{
- Cluster: aws.String(clusterARN),
- Tasks: batch,
- Include: []types.TaskField{"TAGS"},
- })
- if err != nil {
- d.logger.Error("Failed to describe tasks", "service", serviceARN, "cluster", clusterARN, "batch", batch, "error", err)
- return fmt.Errorf("could not describe tasks for service %q in cluster %q: %w", serviceARN, clusterARN, err)
- }
-
- taskMu.Lock()
- tasks[serviceARN] = append(tasks[serviceARN], resp.Tasks...)
- taskMu.Unlock()
-
- return nil
+ for batch := range slices.Chunk(taskARNs, 100) {
+ errg.Go(func() error {
+ resp, err := d.ecs.DescribeTasks(ectx, &ecs.DescribeTasksInput{
+ Cluster: aws.String(clusterARN),
+ Tasks: batch,
+ Include: []types.TaskField{"TAGS"},
})
- }
+ if err != nil {
+ d.logger.Error("Failed to describe tasks", "cluster", clusterARN, "batch", batch, "error", err)
+ return fmt.Errorf("could not describe tasks in cluster %q: batch %v: %w", clusterARN, batch, err)
+ }
+
+ mu.Lock()
+ tasks = append(tasks, resp.Tasks...)
+ mu.Unlock()
+ return nil
+ })
}
return tasks, errg.Wait()
}
// describeContainerInstances returns a map of container instance ARN to EC2 instance ID
-// Uses batching to respect AWS API limits (100 container instances per request).
-func (d *ECSDiscovery) describeContainerInstances(ctx context.Context, clusterARN string, containerInstanceARNs []string) (map[string]string, error) {
+// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
+// Container instances are described in batches of 100 to respect AWS API limits (DescribeContainerInstances allows up to 100 container instances per call).
+func (d *ECSDiscovery) describeContainerInstances(ctx context.Context, clusterARN string, tasks []types.Task) (map[string]string, error) {
+ containerInstanceARNs := make([]string, 0, len(tasks))
+ for _, task := range tasks {
+ if task.ContainerInstanceArn != nil {
+ containerInstanceARNs = append(containerInstanceARNs, *task.ContainerInstanceArn)
+ }
+ }
+
if len(containerInstanceARNs) == 0 {
return make(map[string]string), nil
}
+ mu := sync.Mutex{}
containerInstToEC2 := make(map[string]string)
- batchSize := 100 // AWS API limit
-
- for _, batch := range batchSlice(containerInstanceARNs, batchSize) {
- resp, err := d.ecs.DescribeContainerInstances(ctx, &ecs.DescribeContainerInstancesInput{
- Cluster: aws.String(clusterARN),
- ContainerInstances: batch,
- })
- if err != nil {
- return nil, fmt.Errorf("could not describe container instances: %w", err)
- }
-
- for _, ci := range resp.ContainerInstances {
- if ci.ContainerInstanceArn != nil && ci.Ec2InstanceId != nil {
- containerInstToEC2[*ci.ContainerInstanceArn] = *ci.Ec2InstanceId
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.SetLimit(d.cfg.RequestConcurrency)
+ for batch := range slices.Chunk(containerInstanceARNs, 100) {
+ errg.Go(func() error {
+ resp, err := d.ecs.DescribeContainerInstances(ectx, &ecs.DescribeContainerInstancesInput{
+ Cluster: aws.String(clusterARN),
+ ContainerInstances: batch,
+ })
+ if err != nil {
+ return fmt.Errorf("could not describe container instances: %w", err)
}
- }
+
+ for _, ci := range resp.ContainerInstances {
+ if ci.ContainerInstanceArn != nil && ci.Ec2InstanceId != nil {
+ mu.Lock()
+ containerInstToEC2[*ci.ContainerInstanceArn] = *ci.Ec2InstanceId
+ mu.Unlock()
+ }
+ }
+ return nil
+ })
}
- return containerInstToEC2, nil
+ return containerInstToEC2, errg.Wait()
}
// ec2InstanceInfo holds information retrieved from EC2 DescribeInstances.
@@ -515,83 +533,112 @@ type ec2InstanceInfo struct {
}
// describeEC2Instances returns a map of EC2 instance ID to instance information.
+// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
+// This method does not use concurrency as it's a simple paginated call.
func (d *ECSDiscovery) describeEC2Instances(ctx context.Context, instanceIDs []string) (map[string]ec2InstanceInfo, error) {
if len(instanceIDs) == 0 {
return make(map[string]ec2InstanceInfo), nil
}
instanceInfo := make(map[string]ec2InstanceInfo)
+ var nextToken *string
- resp, err := d.ec2.DescribeInstances(ctx, &ec2.DescribeInstancesInput{
- InstanceIds: instanceIDs,
- })
- if err != nil {
- return nil, fmt.Errorf("could not describe EC2 instances: %w", err)
- }
+ for {
+ resp, err := d.ec2.DescribeInstances(ctx, &ec2.DescribeInstancesInput{
+ InstanceIds: instanceIDs,
+ NextToken: nextToken,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not describe EC2 instances: %w", err)
+ }
- for _, reservation := range resp.Reservations {
- for _, instance := range reservation.Instances {
- if instance.InstanceId != nil && instance.PrivateIpAddress != nil {
- info := ec2InstanceInfo{
- privateIP: *instance.PrivateIpAddress,
- tags: make(map[string]string),
- }
- if instance.PublicIpAddress != nil {
- info.publicIP = *instance.PublicIpAddress
- }
- if instance.SubnetId != nil {
- info.subnetID = *instance.SubnetId
- }
- if instance.InstanceType != "" {
- info.instanceType = string(instance.InstanceType)
- }
- // Collect EC2 instance tags
- for _, tag := range instance.Tags {
- if tag.Key != nil && tag.Value != nil {
- info.tags[*tag.Key] = *tag.Value
+ for _, reservation := range resp.Reservations {
+ for _, instance := range reservation.Instances {
+ if instance.InstanceId != nil && instance.PrivateIpAddress != nil {
+ info := ec2InstanceInfo{
+ privateIP: *instance.PrivateIpAddress,
+ tags: make(map[string]string),
}
+ if instance.PublicIpAddress != nil {
+ info.publicIP = *instance.PublicIpAddress
+ }
+ if instance.SubnetId != nil {
+ info.subnetID = *instance.SubnetId
+ }
+ if instance.InstanceType != "" {
+ info.instanceType = string(instance.InstanceType)
+ }
+ // Collect EC2 instance tags
+ for _, tag := range instance.Tags {
+ if tag.Key != nil && tag.Value != nil {
+ info.tags[*tag.Key] = *tag.Value
+ }
+ }
+ instanceInfo[*instance.InstanceId] = info
}
- instanceInfo[*instance.InstanceId] = info
}
}
+
+ if resp.NextToken == nil {
+ break
+ }
+ nextToken = resp.NextToken
}
return instanceInfo, nil
}
// describeNetworkInterfaces returns a map of ENI ID to public IP address.
-func (d *ECSDiscovery) describeNetworkInterfaces(ctx context.Context, eniIDs []string) (map[string]string, error) {
+// This is needed to get the public IP for tasks using awsvpc network mode, as the ENI is what gets the public IP, not the EC2 instance.
+// This method does not use concurrency as it's a simple paginated call.
+func (d *ECSDiscovery) describeNetworkInterfaces(ctx context.Context, tasks []types.Task) (map[string]string, error) {
+ eniIDs := make([]string, 0, len(tasks))
+
+ for _, task := range tasks {
+ for _, attachment := range task.Attachments {
+ if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
+ for _, detail := range attachment.Details {
+ if detail.Name != nil && *detail.Name == "networkInterfaceId" && detail.Value != nil {
+ eniIDs = append(eniIDs, *detail.Value)
+ break
+ }
+ }
+ break
+ }
+ }
+ }
+
if len(eniIDs) == 0 {
return make(map[string]string), nil
}
eniToPublicIP := make(map[string]string)
+ var nextToken *string
- resp, err := d.ec2.DescribeNetworkInterfaces(ctx, &ec2.DescribeNetworkInterfacesInput{
- NetworkInterfaceIds: eniIDs,
- })
- if err != nil {
- return nil, fmt.Errorf("could not describe network interfaces: %w", err)
- }
-
- for _, eni := range resp.NetworkInterfaces {
- if eni.NetworkInterfaceId != nil && eni.Association != nil && eni.Association.PublicIp != nil {
- eniToPublicIP[*eni.NetworkInterfaceId] = *eni.Association.PublicIp
+ for {
+ resp, err := d.ec2.DescribeNetworkInterfaces(ctx, &ec2.DescribeNetworkInterfacesInput{
+ NetworkInterfaceIds: eniIDs,
+ NextToken: nextToken,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not describe network interfaces: %w", err)
}
+
+ for _, eni := range resp.NetworkInterfaces {
+ if eni.NetworkInterfaceId != nil && eni.Association != nil && eni.Association.PublicIp != nil {
+ eniToPublicIP[*eni.NetworkInterfaceId] = *eni.Association.PublicIp
+ }
+ }
+
+ if resp.NextToken == nil {
+ break
+ }
+ nextToken = resp.NextToken
}
return eniToPublicIP, nil
}
-func batchSlice[T any](a []T, size int) [][]T {
- batches := make([][]T, 0, len(a)/size+1)
- for i := 0; i < len(a); i += size {
- end := min(i+size, len(a))
- batches = append(batches, a[i:end])
- }
- return batches
-}
-
func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
err := d.initEcsClient(ctx)
if err != nil {
@@ -620,314 +667,338 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
Source: d.cfg.Region,
}
- clusterARNMap, err := d.describeClusters(ctx, clusters)
- if err != nil {
- return nil, err
- }
+ // Fetch cluster details, service ARNs, and task ARNs in parallel
+ var (
+ clusterMap map[string]types.Cluster
+ serviceMap map[string][]string
+ taskMap map[string][]string
+ )
- clusterServiceARNMap, err := d.listServiceARNs(ctx, clusters)
- if err != nil {
- return nil, err
- }
+ clusterErrg, clusterCtx := errgroup.WithContext(ctx)
+ clusterErrg.Go(func() error {
+ var err error
+ clusterMap, err = d.describeClusters(clusterCtx, clusters)
+ return err
+ })
+ clusterErrg.Go(func() error {
+ var err error
+ serviceMap, err = d.listServiceARNs(clusterCtx, clusters)
+ return err
+ })
+ clusterErrg.Go(func() error {
+ var err error
+ taskMap, err = d.listTaskARNs(clusterCtx, clusters)
+ return err
+ })
- clusterServicesMap, err := d.describeServices(ctx, clusterServiceARNMap)
- if err != nil {
+ if err := clusterErrg.Wait(); err != nil {
return nil, err
}
// Use goroutines to process clusters in parallel
var (
- targetsMu sync.Mutex
- wg sync.WaitGroup
+ clusterWg sync.WaitGroup
+ clusterMu sync.Mutex
+ clusterTargets []model.LabelSet
)
- for clusterArn, clusterServices := range clusterServicesMap {
- if len(clusterServices) == 0 {
+ for clusterARN, taskARNs := range taskMap {
+ if len(taskARNs) == 0 {
continue
}
- wg.Add(1)
- go func(clusterArn string, clusterServices []types.Service) {
- defer wg.Done()
+ clusterWg.Add(1)
- serviceTaskARNMap, err := d.listTaskARNs(ctx, clusterServices)
- if err != nil {
- d.logger.Error("Failed to list task ARNs for cluster", "cluster", clusterArn, "error", err)
- return
- }
+ go func(cluster types.Cluster, serviceARNs, taskARNs []string) {
+ defer clusterWg.Done()
- serviceTaskMap, err := d.describeTasks(ctx, clusterArn, serviceTaskARNMap)
- if err != nil {
- d.logger.Error("Failed to describe tasks for cluster", "cluster", clusterArn, "error", err)
- return
- }
-
- // Process services within this cluster in parallel
+ // Fetch services and tasks in parallel (they're independent)
var (
- serviceWg sync.WaitGroup
- localTargets []model.LabelSet
- localTargetsMu sync.Mutex
+ services map[string]types.Service
+ tasks []types.Task
)
- for _, clusterService := range clusterServices {
- serviceWg.Add(1)
- go func(clusterService types.Service) {
- defer serviceWg.Done()
+ resourceErrg, resourceCtx := errgroup.WithContext(ctx)
+ resourceErrg.Go(func() error {
+ var err error
+ services, err = d.describeServices(resourceCtx, *cluster.ClusterArn, serviceARNs)
+ if err != nil {
+ d.logger.Error("Failed to describe services for cluster", "cluster", *cluster.ClusterArn, "error", err)
+ }
+ return err
+ })
+ resourceErrg.Go(func() error {
+ var err error
+ tasks, err = d.describeTasks(resourceCtx, *cluster.ClusterArn, taskARNs)
+ if err != nil {
+ d.logger.Error("Failed to describe tasks for cluster", "cluster", *cluster.ClusterArn, "error", err)
+ }
+ return err
+ })
- serviceArn := *clusterService.ServiceArn
-
- if tasks, exists := serviceTaskMap[serviceArn]; exists {
- var serviceTargets []model.LabelSet
-
- // Collect container instance ARNs for all EC2 tasks to get instance type
- var containerInstanceARNs []string
- taskToContainerInstance := make(map[string]string)
- // Collect ENI IDs for awsvpc tasks to get public IPs
- var eniIDs []string
- taskToENI := make(map[string]string)
-
- for _, task := range tasks {
- // Collect container instance ARN for any task running on EC2
- if task.ContainerInstanceArn != nil {
- containerInstanceARNs = append(containerInstanceARNs, *task.ContainerInstanceArn)
- taskToContainerInstance[*task.TaskArn] = *task.ContainerInstanceArn
- }
-
- // Collect ENI IDs from awsvpc tasks
- for _, attachment := range task.Attachments {
- if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
- for _, detail := range attachment.Details {
- if detail.Name != nil && *detail.Name == "networkInterfaceId" && detail.Value != nil {
- eniIDs = append(eniIDs, *detail.Value)
- taskToENI[*task.TaskArn] = *detail.Value
- break
- }
- }
- break
- }
- }
- }
-
- // Batch describe container instances and EC2 instances to get instance type and other metadata
- var containerInstToEC2 map[string]string
- var ec2InstInfo map[string]ec2InstanceInfo
- if len(containerInstanceARNs) > 0 {
- var err error
- containerInstToEC2, err = d.describeContainerInstances(ctx, clusterArn, containerInstanceARNs)
- if err != nil {
- d.logger.Error("Failed to describe container instances", "cluster", clusterArn, "error", err)
- // Continue processing tasks
- } else {
- // Collect unique EC2 instance IDs
- ec2InstanceIDs := make([]string, 0, len(containerInstToEC2))
- for _, ec2ID := range containerInstToEC2 {
- ec2InstanceIDs = append(ec2InstanceIDs, ec2ID)
- }
-
- // Batch describe EC2 instances
- ec2InstInfo, err = d.describeEC2Instances(ctx, ec2InstanceIDs)
- if err != nil {
- d.logger.Error("Failed to describe EC2 instances", "cluster", clusterArn, "error", err)
- }
- }
- }
-
- // Batch describe ENIs to get public IPs for awsvpc tasks
- var eniToPublicIP map[string]string
- if len(eniIDs) > 0 {
- var err error
- eniToPublicIP, err = d.describeNetworkInterfaces(ctx, eniIDs)
- if err != nil {
- d.logger.Error("Failed to describe network interfaces", "cluster", clusterArn, "error", err)
- // Continue processing without ENI public IPs
- }
- }
-
- for _, task := range tasks {
- var ipAddress, subnetID, publicIP string
- var networkMode string
- var ec2InstanceID, ec2InstanceType, ec2InstancePrivateIP, ec2InstancePublicIP string
-
- // Try to get IP from ENI attachment (awsvpc mode)
- var eniAttachment *types.Attachment
- for _, attachment := range task.Attachments {
- if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
- eniAttachment = &attachment
- break
- }
- }
-
- if eniAttachment != nil {
- // awsvpc networking mode - get IP from ENI
- networkMode = "awsvpc"
- for _, detail := range eniAttachment.Details {
- switch *detail.Name {
- case "privateIPv4Address":
- ipAddress = *detail.Value
- case "subnetId":
- subnetID = *detail.Value
- }
- }
- // Get public IP from ENI if available
- if eniID, ok := taskToENI[*task.TaskArn]; ok {
- if eniPublicIP, ok := eniToPublicIP[eniID]; ok {
- publicIP = eniPublicIP
- }
- }
- } else if task.ContainerInstanceArn != nil {
- // bridge/host networking mode - need to get EC2 instance IP and subnet
- networkMode = "bridge"
- containerInstARN, ok := taskToContainerInstance[*task.TaskArn]
- if ok {
- ec2InstanceID, ok = containerInstToEC2[containerInstARN]
- if ok {
- info, ok := ec2InstInfo[ec2InstanceID]
- if ok {
- ipAddress = info.privateIP
- publicIP = info.publicIP
- subnetID = info.subnetID
- ec2InstanceType = info.instanceType
- ec2InstancePrivateIP = info.privateIP
- ec2InstancePublicIP = info.publicIP
- } else {
- d.logger.Debug("EC2 instance info not found", "instance", ec2InstanceID, "task", *task.TaskArn)
- }
- } else {
- d.logger.Debug("Container instance not found in map", "arn", containerInstARN, "task", *task.TaskArn)
- }
- }
- }
-
- // Get EC2 instance metadata for awsvpc tasks running on EC2
- // We want the instance type and the host IPs for advanced use cases
- if networkMode == "awsvpc" && task.ContainerInstanceArn != nil {
- containerInstARN, ok := taskToContainerInstance[*task.TaskArn]
- if ok {
- ec2InstanceID, ok = containerInstToEC2[containerInstARN]
- if ok {
- info, ok := ec2InstInfo[ec2InstanceID]
- if ok {
- ec2InstanceType = info.instanceType
- ec2InstancePrivateIP = info.privateIP
- ec2InstancePublicIP = info.publicIP
- }
- }
- }
- }
-
- if ipAddress == "" {
- continue
- }
-
- labels := model.LabelSet{
- ecsLabelClusterARN: model.LabelValue(*clusterService.ClusterArn),
- ecsLabelService: model.LabelValue(*clusterService.ServiceName),
- ecsLabelServiceARN: model.LabelValue(*clusterService.ServiceArn),
- ecsLabelServiceStatus: model.LabelValue(*clusterService.Status),
- ecsLabelTaskGroup: model.LabelValue(*task.Group),
- ecsLabelTaskARN: model.LabelValue(*task.TaskArn),
- ecsLabelTaskDefinition: model.LabelValue(*task.TaskDefinitionArn),
- ecsLabelIPAddress: model.LabelValue(ipAddress),
- ecsLabelRegion: model.LabelValue(d.cfg.Region),
- ecsLabelLaunchType: model.LabelValue(task.LaunchType),
- ecsLabelAvailabilityZone: model.LabelValue(*task.AvailabilityZone),
- ecsLabelDesiredStatus: model.LabelValue(*task.DesiredStatus),
- ecsLabelLastStatus: model.LabelValue(*task.LastStatus),
- ecsLabelHealthStatus: model.LabelValue(task.HealthStatus),
- ecsLabelNetworkMode: model.LabelValue(networkMode),
- }
-
- // Add subnet ID when available (awsvpc mode from ENI, bridge/host from EC2 instance)
- if subnetID != "" {
- labels[ecsLabelSubnetID] = model.LabelValue(subnetID)
- }
-
- // Add container instance and EC2 instance info for EC2 launch type
- if task.ContainerInstanceArn != nil {
- labels[ecsLabelContainerInstanceARN] = model.LabelValue(*task.ContainerInstanceArn)
- }
- if ec2InstanceID != "" {
- labels[ecsLabelEC2InstanceID] = model.LabelValue(ec2InstanceID)
- }
- if ec2InstanceType != "" {
- labels[ecsLabelEC2InstanceType] = model.LabelValue(ec2InstanceType)
- }
- if ec2InstancePrivateIP != "" {
- labels[ecsLabelEC2InstancePrivateIP] = model.LabelValue(ec2InstancePrivateIP)
- }
- if ec2InstancePublicIP != "" {
- labels[ecsLabelEC2InstancePublicIP] = model.LabelValue(ec2InstancePublicIP)
- }
- if publicIP != "" {
- labels[ecsLabelPublicIP] = model.LabelValue(publicIP)
- }
-
- if task.PlatformFamily != nil {
- labels[ecsLabelPlatformFamily] = model.LabelValue(*task.PlatformFamily)
- }
- if task.PlatformVersion != nil {
- labels[ecsLabelPlatformVersion] = model.LabelValue(*task.PlatformVersion)
- }
-
- labels[model.AddressLabel] = model.LabelValue(net.JoinHostPort(ipAddress, strconv.Itoa(d.cfg.Port)))
-
- // Add cluster tags
- if cluster, exists := clusterARNMap[*clusterService.ClusterArn]; exists {
- if cluster.ClusterName != nil {
- labels[ecsLabelCluster] = model.LabelValue(*cluster.ClusterName)
- }
-
- for _, clusterTag := range cluster.Tags {
- if clusterTag.Key != nil && clusterTag.Value != nil {
- labels[model.LabelName(ecsLabelTagCluster+strutil.SanitizeLabelName(*clusterTag.Key))] = model.LabelValue(*clusterTag.Value)
- }
- }
- }
-
- // Add service tags
- for _, serviceTag := range clusterService.Tags {
- if serviceTag.Key != nil && serviceTag.Value != nil {
- labels[model.LabelName(ecsLabelTagService+strutil.SanitizeLabelName(*serviceTag.Key))] = model.LabelValue(*serviceTag.Value)
- }
- }
-
- // Add task tags
- for _, taskTag := range task.Tags {
- if taskTag.Key != nil && taskTag.Value != nil {
- labels[model.LabelName(ecsLabelTagTask+strutil.SanitizeLabelName(*taskTag.Key))] = model.LabelValue(*taskTag.Value)
- }
- }
-
- // Add EC2 instance tags (if running on EC2)
- if ec2InstanceID != "" {
- if info, ok := ec2InstInfo[ec2InstanceID]; ok {
- for tagKey, tagValue := range info.tags {
- labels[model.LabelName(ecsLabelTagEC2+strutil.SanitizeLabelName(tagKey))] = model.LabelValue(tagValue)
- }
- }
- }
-
- serviceTargets = append(serviceTargets, labels)
- }
-
- // Add service targets to local targets with mutex protection
- localTargetsMu.Lock()
- localTargets = append(localTargets, serviceTargets...)
- localTargetsMu.Unlock()
- }
- }(clusterService)
+ if err := resourceErrg.Wait(); err != nil {
+ return
}
- serviceWg.Wait()
+ // Fetch container instances and network interfaces in parallel (both depend on tasks)
+ var (
+ containerInstances map[string]string
+ eniToPublicIP map[string]string
+ )
- // Add all local targets to main target group with mutex protection
- targetsMu.Lock()
- tg.Targets = append(tg.Targets, localTargets...)
- targetsMu.Unlock()
- }(clusterArn, clusterServices)
+ instanceErrg, instanceCtx := errgroup.WithContext(ctx)
+ instanceErrg.Go(func() error {
+ var err error
+ containerInstances, err = d.describeContainerInstances(instanceCtx, *cluster.ClusterArn, tasks)
+ if err != nil {
+ d.logger.Error("Failed to describe container instances for cluster", "cluster", *cluster.ClusterArn, "error", err)
+ }
+ return err
+ })
+ instanceErrg.Go(func() error {
+ var err error
+ eniToPublicIP, err = d.describeNetworkInterfaces(instanceCtx, tasks)
+ if err != nil {
+ d.logger.Error("Failed to describe network interfaces for cluster", "cluster", *cluster.ClusterArn, "error", err)
+ }
+ return err
+ })
+
+ if err := instanceErrg.Wait(); err != nil {
+ return
+ }
+
+ ec2Instances := make(map[string]ec2InstanceInfo)
+ if len(containerInstances) > 0 {
+ // Deduplicate EC2 instance IDs (multiple tasks can share the same instance)
+ ec2InstanceIDSet := make(map[string]struct{})
+ for _, ec2ID := range containerInstances {
+ ec2InstanceIDSet[ec2ID] = struct{}{}
+ }
+ ec2InstanceIDs := make([]string, 0, len(ec2InstanceIDSet))
+ for ec2ID := range ec2InstanceIDSet {
+ ec2InstanceIDs = append(ec2InstanceIDs, ec2ID)
+ }
+ ec2Instances, err = d.describeEC2Instances(ctx, ec2InstanceIDs)
+ if err != nil {
+ d.logger.Error("Failed to describe EC2 instances for cluster", "cluster", *cluster.ClusterArn, "error", err)
+ return
+ }
+ }
+
+ var (
+ taskWg sync.WaitGroup
+ taskMu sync.Mutex
+ taskTargets []model.LabelSet
+ )
+
+ for _, task := range tasks {
+ taskWg.Add(1)
+
+ go func(cluster types.Cluster, services map[string]types.Service, task types.Task, containerInstances map[string]string, ec2Instances map[string]ec2InstanceInfo, eniToPublicIP map[string]string) {
+ defer taskWg.Done()
+
+ var (
+ ipAddress, subnetID, publicIP string
+ networkMode string
+ ec2InstanceID, ec2InstanceType, ec2InstancePrivateIP, ec2InstancePublicIP string
+ )
+
+ // Try to get IP from ENI attachment (awsvpc mode)
+ var eniAttachment *types.Attachment
+ for _, attachment := range task.Attachments {
+ if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
+ eniAttachment = &attachment
+ break
+ }
+ }
+
+ if eniAttachment != nil {
+ // awsvpc networking mode - get IP from ENI
+ networkMode = "awsvpc"
+ var eniID string
+ for _, detail := range eniAttachment.Details {
+ switch *detail.Name {
+ case "privateIPv4Address":
+ ipAddress = *detail.Value
+ case "subnetId":
+ subnetID = *detail.Value
+ case "networkInterfaceId":
+ eniID = *detail.Value
+ }
+ }
+ // Get public IP from ENI if available
+ if eniID != "" {
+ if pub, ok := eniToPublicIP[eniID]; ok {
+ publicIP = pub
+ }
+ }
+ } else if task.ContainerInstanceArn != nil {
+ // bridge/host networking mode - need to get EC2 instance IP and subnet
+ networkMode = "bridge"
+ var ok bool
+ ec2InstanceID, ok = containerInstances[*task.ContainerInstanceArn]
+ if ok {
+ info, ok := ec2Instances[ec2InstanceID]
+ if ok {
+ ipAddress = info.privateIP
+ publicIP = info.publicIP
+ subnetID = info.subnetID
+ ec2InstanceType = info.instanceType
+ ec2InstancePrivateIP = info.privateIP
+ ec2InstancePublicIP = info.publicIP
+ } else {
+ d.logger.Debug("EC2 instance info not found", "instance", ec2InstanceID, "task", *task.TaskArn)
+ }
+ } else {
+ d.logger.Debug("Container instance not found in map", "arn", *task.ContainerInstanceArn, "task", *task.TaskArn)
+ }
+ }
+
+ // Get EC2 instance metadata for awsvpc tasks running on EC2
+ // We want the instance type and the host IPs for advanced use cases
+ if networkMode == "awsvpc" && task.ContainerInstanceArn != nil {
+ var ok bool
+ ec2InstanceID, ok = containerInstances[*task.ContainerInstanceArn]
+ if ok {
+ info, ok := ec2Instances[ec2InstanceID]
+ if ok {
+ ec2InstanceType = info.instanceType
+ ec2InstancePrivateIP = info.privateIP
+ ec2InstancePublicIP = info.publicIP
+ }
+ }
+ }
+
+ if ipAddress == "" {
+ return
+ }
+
+ labels := model.LabelSet{
+ ecsLabelClusterARN: model.LabelValue(*cluster.ClusterArn),
+ ecsLabelCluster: model.LabelValue(*cluster.ClusterName),
+ ecsLabelTaskGroup: model.LabelValue(*task.Group),
+ ecsLabelTaskARN: model.LabelValue(*task.TaskArn),
+ ecsLabelTaskDefinition: model.LabelValue(*task.TaskDefinitionArn),
+ ecsLabelIPAddress: model.LabelValue(ipAddress),
+ ecsLabelRegion: model.LabelValue(d.cfg.Region),
+ ecsLabelLaunchType: model.LabelValue(task.LaunchType),
+ ecsLabelAvailabilityZone: model.LabelValue(*task.AvailabilityZone),
+ ecsLabelDesiredStatus: model.LabelValue(*task.DesiredStatus),
+ ecsLabelLastStatus: model.LabelValue(*task.LastStatus),
+ ecsLabelHealthStatus: model.LabelValue(task.HealthStatus),
+ ecsLabelNetworkMode: model.LabelValue(networkMode),
+ }
+
+ // Add subnet ID when available (awsvpc mode from ENI, bridge/host from EC2 instance)
+ if subnetID != "" {
+ labels[ecsLabelSubnetID] = model.LabelValue(subnetID)
+ }
+
+ // Add container instance and EC2 instance info for EC2 launch type
+ if task.ContainerInstanceArn != nil {
+ labels[ecsLabelContainerInstanceARN] = model.LabelValue(*task.ContainerInstanceArn)
+ }
+ if ec2InstanceID != "" {
+ labels[ecsLabelEC2InstanceID] = model.LabelValue(ec2InstanceID)
+ }
+ if ec2InstanceType != "" {
+ labels[ecsLabelEC2InstanceType] = model.LabelValue(ec2InstanceType)
+ }
+ if ec2InstancePrivateIP != "" {
+ labels[ecsLabelEC2InstancePrivateIP] = model.LabelValue(ec2InstancePrivateIP)
+ }
+ if ec2InstancePublicIP != "" {
+ labels[ecsLabelEC2InstancePublicIP] = model.LabelValue(ec2InstancePublicIP)
+ }
+ if publicIP != "" {
+ labels[ecsLabelPublicIP] = model.LabelValue(publicIP)
+ }
+
+ if task.PlatformFamily != nil {
+ labels[ecsLabelPlatformFamily] = model.LabelValue(*task.PlatformFamily)
+ }
+ if task.PlatformVersion != nil {
+ labels[ecsLabelPlatformVersion] = model.LabelValue(*task.PlatformVersion)
+ }
+
+ labels[model.AddressLabel] = model.LabelValue(net.JoinHostPort(ipAddress, strconv.Itoa(d.cfg.Port)))
+
+ // Add cluster tags
+ for _, clusterTag := range cluster.Tags {
+ if clusterTag.Key != nil && clusterTag.Value != nil {
+ labels[model.LabelName(ecsLabelTagCluster+strutil.SanitizeLabelName(*clusterTag.Key))] = model.LabelValue(*clusterTag.Value)
+ }
+ }
+
+ // If this is not a standalone task, add service information and tags
+ if !isStandaloneTask(task) {
+ service, ok := services[getServiceNameFromTaskGroup(task)]
+ if !ok {
+ d.logger.Debug("Service not found for task", "task", *task.TaskArn, "service", getServiceNameFromTaskGroup(task))
+ }
+ if service.ServiceName != nil {
+ labels[ecsLabelService] = model.LabelValue(*service.ServiceName)
+ }
+ if service.ServiceArn != nil {
+ labels[ecsLabelServiceARN] = model.LabelValue(*service.ServiceArn)
+ }
+ if service.Status != nil {
+ labels[ecsLabelServiceStatus] = model.LabelValue(*service.Status)
+ }
+
+ // Add service tags
+ for _, serviceTag := range service.Tags {
+ if serviceTag.Key != nil && serviceTag.Value != nil {
+ labels[model.LabelName(ecsLabelTagService+strutil.SanitizeLabelName(*serviceTag.Key))] = model.LabelValue(*serviceTag.Value)
+ }
+ }
+ }
+
+ // Add task tags
+ for _, taskTag := range task.Tags {
+ if taskTag.Key != nil && taskTag.Value != nil {
+ labels[model.LabelName(ecsLabelTagTask+strutil.SanitizeLabelName(*taskTag.Key))] = model.LabelValue(*taskTag.Value)
+ }
+ }
+
+ // Add EC2 instance tags (if running on EC2)
+ if ec2InstanceID != "" {
+ if info, ok := ec2Instances[ec2InstanceID]; ok {
+ for tagKey, tagValue := range info.tags {
+ labels[model.LabelName(ecsLabelTagEC2+strutil.SanitizeLabelName(tagKey))] = model.LabelValue(tagValue)
+ }
+ }
+ }
+
+ taskMu.Lock()
+ taskTargets = append(taskTargets, labels)
+ taskMu.Unlock()
+ }(cluster, services, task, containerInstances, ec2Instances, eniToPublicIP)
+ }
+
+ taskWg.Wait()
+
+ // Add this cluster's task targets to the overall collection
+ clusterMu.Lock()
+ clusterTargets = append(clusterTargets, taskTargets...)
+ clusterMu.Unlock()
+ }(clusterMap[clusterARN], serviceMap[clusterARN], taskARNs)
}
- wg.Wait()
+ clusterWg.Wait()
+
+ // Set all targets to the target group
+ tg.Targets = clusterTargets
return []*targetgroup.Group{tg}, nil
}
+
+func isStandaloneTask(task types.Task) bool {
+ // A standalone task will have a group of "family:task-def-name"
+ return task.Group != nil && strings.HasPrefix(*task.Group, "family:")
+}
+
+func getServiceNameFromTaskGroup(task types.Task) string {
+ return strings.Split(*task.Group, ":")[1]
+}
diff --git a/discovery/aws/ecs_test.go b/discovery/aws/ecs_test.go
index 1cb48b27fa..bb1f96a28e 100644
--- a/discovery/aws/ecs_test.go
+++ b/discovery/aws/ecs_test.go
@@ -214,7 +214,6 @@ func TestECSDiscoveryDescribeClusters(t *testing.T) {
func TestECSDiscoveryListServiceARNs(t *testing.T) {
ctx := context.Background()
- // iterate through the test cases
for _, tt := range []struct {
name string
ecsData *ecsDataStore
@@ -225,33 +224,18 @@ func TestECSDiscoveryListServiceARNs(t *testing.T) {
name: "SingleClusterWithServices",
ecsData: &ecsDataStore{
region: "us-west-2",
- clusters: []ecsTypes.Cluster{
- {
- ClusterName: strptr("test-cluster"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("ACTIVE"),
- },
- },
services: []ecsTypes.Service{
{
ServiceName: strptr("web-service"),
ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"),
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
+ Status: strptr("ACTIVE"),
},
{
ServiceName: strptr("api-service"),
ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"),
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- },
- {
- // this is to test the old arn format without the cluster name in the service arn
- // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-arn-migration.html
- ServiceName: strptr("old-api-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/old-api-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
+ Status: strptr("ACTIVE"),
},
},
},
@@ -260,70 +244,50 @@ func TestECSDiscoveryListServiceARNs(t *testing.T) {
"arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": {
"arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service",
"arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service",
- "arn:aws:ecs:us-west-2:123456789012:service/old-api-service",
},
},
},
{
- name: "MultipleClustesWithServices",
+ name: "MultipleClusters",
ecsData: &ecsDataStore{
- region: "us-east-1",
- clusters: []ecsTypes.Cluster{
- {
- ClusterName: strptr("cluster-1"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"),
- Status: strptr("ACTIVE"),
- },
- {
- ClusterName: strptr("cluster-2"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"),
- Status: strptr("ACTIVE"),
- },
- },
+ region: "us-west-2",
services: []ecsTypes.Service{
{
- ServiceName: strptr("service-1"),
- ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"),
- Status: strptr("RUNNING"),
+ ServiceName: strptr("web-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/cluster-1/web-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/cluster-1"),
+ Status: strptr("ACTIVE"),
},
{
- ServiceName: strptr("service-2"),
- ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"),
- Status: strptr("RUNNING"),
+ ServiceName: strptr("api-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/cluster-2/api-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/cluster-2"),
+ Status: strptr("ACTIVE"),
},
},
},
clusterARNs: []string{
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1",
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2",
+ "arn:aws:ecs:us-west-2:123456789012:cluster/cluster-1",
+ "arn:aws:ecs:us-west-2:123456789012:cluster/cluster-2",
},
expected: map[string][]string{
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1": {
- "arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1",
+ "arn:aws:ecs:us-west-2:123456789012:cluster/cluster-1": {
+ "arn:aws:ecs:us-west-2:123456789012:service/cluster-1/web-service",
},
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2": {
- "arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2",
+ "arn:aws:ecs:us-west-2:123456789012:cluster/cluster-2": {
+ "arn:aws:ecs:us-west-2:123456789012:service/cluster-2/api-service",
},
},
},
{
- name: "ClusterWithNoServices",
+ name: "EmptyCluster",
ecsData: &ecsDataStore{
- region: "us-west-2",
- clusters: []ecsTypes.Cluster{
- {
- ClusterName: strptr("empty-cluster"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/empty-cluster"),
- Status: strptr("ACTIVE"),
- },
- },
+ region: "us-west-2",
services: []ecsTypes.Service{},
},
- clusterARNs: []string{"arn:aws:ecs:us-west-2:123456789012:cluster/empty-cluster"},
+ clusterARNs: []string{"arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"},
expected: map[string][]string{
- "arn:aws:ecs:us-west-2:123456789012:cluster/empty-cluster": nil,
+ "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": nil,
},
},
} {
@@ -334,7 +298,7 @@ func TestECSDiscoveryListServiceARNs(t *testing.T) {
ecs: client,
cfg: &ECSSDConfig{
Region: tt.ecsData.region,
- RequestConcurrency: 1,
+ RequestConcurrency: 2,
},
}
@@ -348,113 +312,178 @@ func TestECSDiscoveryListServiceARNs(t *testing.T) {
func TestECSDiscoveryDescribeServices(t *testing.T) {
ctx := context.Background()
- // iterate through the test cases
for _, tt := range []struct {
- name string
- ecsData *ecsDataStore
- clusterServiceARNsMap map[string][]string
- expected map[string][]ecsTypes.Service
+ name string
+ ecsData *ecsDataStore
+ clusterARN string
+ serviceARNs []string
+ expected map[string]ecsTypes.Service
}{
{
- name: "SingleClusterServices",
+ name: "ServicesWithTags",
ecsData: &ecsDataStore{
region: "us-west-2",
services: []ecsTypes.Service{
{
- ServiceName: strptr("web-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- TaskDefinition: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"),
+ ServiceName: strptr("web-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Status: strptr("ACTIVE"),
Tags: []ecsTypes.Tag{
{Key: strptr("Environment"), Value: strptr("production")},
+ {Key: strptr("Team"), Value: strptr("platform")},
},
},
{
- ServiceName: strptr("api-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- TaskDefinition: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/api-task:2"),
- },
- },
- },
- clusterServiceARNsMap: map[string][]string{
- "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": {
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service",
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service",
- },
- },
- expected: map[string][]ecsTypes.Service{
- "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": {
- {
- ServiceName: strptr("web-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- TaskDefinition: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"),
+ ServiceName: strptr("api-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Status: strptr("ACTIVE"),
Tags: []ecsTypes.Tag{
- {Key: strptr("Environment"), Value: strptr("production")},
+ {Key: strptr("Environment"), Value: strptr("staging")},
},
},
- {
- ServiceName: strptr("api-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- TaskDefinition: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/api-task:2"),
+ },
+ },
+ clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
+ serviceARNs: []string{
+ "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service",
+ "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service",
+ },
+ expected: map[string]ecsTypes.Service{
+ "web-service": {
+ ServiceName: strptr("web-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Status: strptr("ACTIVE"),
+ Tags: []ecsTypes.Tag{
+ {Key: strptr("Environment"), Value: strptr("production")},
+ {Key: strptr("Team"), Value: strptr("platform")},
+ },
+ },
+ "api-service": {
+ ServiceName: strptr("api-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Status: strptr("ACTIVE"),
+ Tags: []ecsTypes.Tag{
+ {Key: strptr("Environment"), Value: strptr("staging")},
},
},
},
},
{
- name: "MultipleClustersServices",
+ name: "EmptyServiceList",
ecsData: &ecsDataStore{
- region: "us-east-1",
- services: []ecsTypes.Service{
+ region: "us-west-2",
+ services: []ecsTypes.Service{},
+ },
+ clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
+ serviceARNs: []string{},
+ expected: map[string]ecsTypes.Service{},
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ client := newMockECSClient(tt.ecsData)
+
+ d := &ECSDiscovery{
+ ecs: client,
+ cfg: &ECSSDConfig{
+ Region: tt.ecsData.region,
+ RequestConcurrency: 2,
+ },
+ }
+
+ services, err := d.describeServices(ctx, tt.clusterARN, tt.serviceARNs)
+ require.NoError(t, err)
+ require.Equal(t, tt.expected, services)
+ })
+ }
+}
+
+func TestECSDiscoveryDescribeContainerInstances(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ ecsData *ecsDataStore
+ clusterARN string
+ tasks []ecsTypes.Task
+ expected map[string]string
+ }{
+ {
+ name: "EC2Tasks",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ containerInstances: []ecsTypes.ContainerInstance{
{
- ServiceName: strptr("service-1"),
- ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"),
- Status: strptr("RUNNING"),
- TaskDefinition: strptr("arn:aws:ecs:us-east-1:123456789012:task-definition/task-1:1"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
+ Ec2InstanceId: strptr("i-1234567890abcdef0"),
},
{
- ServiceName: strptr("service-2"),
- ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"),
- Status: strptr("DRAINING"),
- TaskDefinition: strptr("arn:aws:ecs:us-east-1:123456789012:task-definition/task-2:1"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/xyz789"),
+ Ec2InstanceId: strptr("i-0987654321fedcba0"),
},
},
},
- clusterServiceARNsMap: map[string][]string{
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1": {
- "arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1",
+ clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
+ LaunchType: ecsTypes.LaunchTypeEc2,
},
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2": {
- "arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2",
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/xyz789"),
+ LaunchType: ecsTypes.LaunchTypeEc2,
},
},
- expected: map[string][]ecsTypes.Service{
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1": {
+ expected: map[string]string{
+ "arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123": "i-1234567890abcdef0",
+ "arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/xyz789": "i-0987654321fedcba0",
+ },
+ },
+ {
+ name: "FargateTasks",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ containerInstances: []ecsTypes.ContainerInstance{},
+ },
+ clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ },
+ },
+ expected: map[string]string{},
+ },
+ {
+ name: "MixedTasks",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ containerInstances: []ecsTypes.ContainerInstance{
{
- ServiceName: strptr("service-1"),
- ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"),
- Status: strptr("RUNNING"),
- TaskDefinition: strptr("arn:aws:ecs:us-east-1:123456789012:task-definition/task-1:1"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
+ Ec2InstanceId: strptr("i-1234567890abcdef0"),
},
},
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2": {
- {
- ServiceName: strptr("service-2"),
- ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"),
- Status: strptr("DRAINING"),
- TaskDefinition: strptr("arn:aws:ecs:us-east-1:123456789012:task-definition/task-2:1"),
- },
+ },
+ clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-ec2"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
+ LaunchType: ecsTypes.LaunchTypeEc2,
},
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-fargate"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ },
+ },
+ expected: map[string]string{
+ "arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123": "i-1234567890abcdef0",
},
},
} {
@@ -465,13 +494,267 @@ func TestECSDiscoveryDescribeServices(t *testing.T) {
ecs: client,
cfg: &ECSSDConfig{
Region: tt.ecsData.region,
- RequestConcurrency: 1,
+ RequestConcurrency: 2,
},
}
- serviceMap, err := d.describeServices(ctx, tt.clusterServiceARNsMap)
+ containerInstances, err := d.describeContainerInstances(ctx, tt.clusterARN, tt.tasks)
require.NoError(t, err)
- require.Equal(t, tt.expected, serviceMap)
+ require.Equal(t, tt.expected, containerInstances)
+ })
+ }
+}
+
+func TestECSDiscoveryDescribeEC2Instances(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ ecsData *ecsDataStore
+ instanceIDs []string
+ expected map[string]ec2InstanceInfo
+ }{
+ {
+ name: "InstancesWithTags",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ ec2Instances: map[string]ec2InstanceInfo{
+ "i-1234567890abcdef0": {
+ privateIP: "10.0.1.50",
+ publicIP: "54.1.2.3",
+ subnetID: "subnet-12345",
+ instanceType: "t3.medium",
+ tags: map[string]string{
+ "Name": "ecs-host-1",
+ "Environment": "production",
+ },
+ },
+ "i-0987654321fedcba0": {
+ privateIP: "10.0.1.75",
+ publicIP: "54.2.3.4",
+ subnetID: "subnet-67890",
+ instanceType: "t3.large",
+ tags: map[string]string{
+ "Name": "ecs-host-2",
+ "Team": "platform",
+ },
+ },
+ },
+ },
+ instanceIDs: []string{"i-1234567890abcdef0", "i-0987654321fedcba0"},
+ expected: map[string]ec2InstanceInfo{
+ "i-1234567890abcdef0": {
+ privateIP: "10.0.1.50",
+ publicIP: "54.1.2.3",
+ subnetID: "subnet-12345",
+ instanceType: "t3.medium",
+ tags: map[string]string{
+ "Name": "ecs-host-1",
+ "Environment": "production",
+ },
+ },
+ "i-0987654321fedcba0": {
+ privateIP: "10.0.1.75",
+ publicIP: "54.2.3.4",
+ subnetID: "subnet-67890",
+ instanceType: "t3.large",
+ tags: map[string]string{
+ "Name": "ecs-host-2",
+ "Team": "platform",
+ },
+ },
+ },
+ },
+ {
+ name: "EmptyList",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ ec2Instances: map[string]ec2InstanceInfo{},
+ },
+ instanceIDs: []string{},
+ expected: map[string]ec2InstanceInfo{},
+ },
+ {
+ name: "InstanceWithoutPublicIP",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ ec2Instances: map[string]ec2InstanceInfo{
+ "i-privateonly": {
+ privateIP: "10.0.1.100",
+ publicIP: "",
+ subnetID: "subnet-private",
+ instanceType: "t3.micro",
+ tags: map[string]string{},
+ },
+ },
+ },
+ instanceIDs: []string{"i-privateonly"},
+ expected: map[string]ec2InstanceInfo{
+ "i-privateonly": {
+ privateIP: "10.0.1.100",
+ publicIP: "",
+ subnetID: "subnet-private",
+ instanceType: "t3.micro",
+ tags: map[string]string{},
+ },
+ },
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ ec2Client := newMockECSEC2Client(tt.ecsData.ec2Instances, nil)
+
+ d := &ECSDiscovery{
+ ec2: ec2Client,
+ cfg: &ECSSDConfig{
+ Region: tt.ecsData.region,
+ RequestConcurrency: 2,
+ },
+ }
+
+ instances, err := d.describeEC2Instances(ctx, tt.instanceIDs)
+ require.NoError(t, err)
+ require.Equal(t, tt.expected, instances)
+ })
+ }
+}
+
+func TestECSDiscoveryDescribeNetworkInterfaces(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ ecsData *ecsDataStore
+ tasks []ecsTypes.Task
+ expected map[string]string
+ }{
+ {
+ name: "AwsvpcTasksWithPublicIPs",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ eniPublicIPs: map[string]string{
+ "eni-12345": "52.1.2.3",
+ "eni-67890": "52.2.3.4",
+ },
+ },
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ Attachments: []ecsTypes.Attachment{
+ {
+ Type: strptr("ElasticNetworkInterface"),
+ Details: []ecsTypes.KeyValuePair{
+ {Name: strptr("networkInterfaceId"), Value: strptr("eni-12345")},
+ {Name: strptr("privateIPv4Address"), Value: strptr("10.0.1.100")},
+ },
+ },
+ },
+ },
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ Attachments: []ecsTypes.Attachment{
+ {
+ Type: strptr("ElasticNetworkInterface"),
+ Details: []ecsTypes.KeyValuePair{
+ {Name: strptr("networkInterfaceId"), Value: strptr("eni-67890")},
+ {Name: strptr("privateIPv4Address"), Value: strptr("10.0.1.200")},
+ },
+ },
+ },
+ },
+ },
+ expected: map[string]string{
+ "eni-12345": "52.1.2.3",
+ "eni-67890": "52.2.3.4",
+ },
+ },
+ {
+ name: "AwsvpcTasksWithoutPublicIPs",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ eniPublicIPs: map[string]string{},
+ },
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ Attachments: []ecsTypes.Attachment{
+ {
+ Type: strptr("ElasticNetworkInterface"),
+ Details: []ecsTypes.KeyValuePair{
+ {Name: strptr("networkInterfaceId"), Value: strptr("eni-private")},
+ {Name: strptr("privateIPv4Address"), Value: strptr("10.0.1.100")},
+ },
+ },
+ },
+ },
+ },
+ expected: map[string]string{},
+ },
+ {
+ name: "BridgeTasksNoENI",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ eniPublicIPs: map[string]string{},
+ },
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
+ LaunchType: ecsTypes.LaunchTypeEc2,
+ // No ENI attachment for bridge networking
+ Attachments: []ecsTypes.Attachment{},
+ },
+ },
+ expected: map[string]string{},
+ },
+ {
+ name: "MixedTasks",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ eniPublicIPs: map[string]string{
+ "eni-fargate": "52.1.2.3",
+ },
+ },
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-fargate"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ Attachments: []ecsTypes.Attachment{
+ {
+ Type: strptr("ElasticNetworkInterface"),
+ Details: []ecsTypes.KeyValuePair{
+ {Name: strptr("networkInterfaceId"), Value: strptr("eni-fargate")},
+ {Name: strptr("privateIPv4Address"), Value: strptr("10.0.1.100")},
+ },
+ },
+ },
+ },
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-bridge"),
+ LaunchType: ecsTypes.LaunchTypeEc2,
+ Attachments: []ecsTypes.Attachment{},
+ },
+ },
+ expected: map[string]string{
+ "eni-fargate": "52.1.2.3",
+ },
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ ec2Client := newMockECSEC2Client(nil, tt.ecsData.eniPublicIPs)
+
+ d := &ECSDiscovery{
+ ec2: ec2Client,
+ cfg: &ECSSDConfig{
+ Region: tt.ecsData.region,
+ RequestConcurrency: 2,
+ },
+ }
+
+ eniMap, err := d.describeNetworkInterfaces(ctx, tt.tasks)
+ require.NoError(t, err)
+ require.Equal(t, tt.expected, eniMap)
})
}
}
@@ -481,13 +764,13 @@ func TestECSDiscoveryListTaskARNs(t *testing.T) {
// iterate through the test cases
for _, tt := range []struct {
- name string
- ecsData *ecsDataStore
- services []ecsTypes.Service
- expected map[string][]string
+ name string
+ ecsData *ecsDataStore
+ clusterARNs []string
+ expected map[string][]string
}{
{
- name: "ServicesWithTasks",
+ name: "TasksInCluster",
ecsData: &ecsDataStore{
region: "us-west-2",
tasks: []ecsTypes.Task{
@@ -511,46 +794,24 @@ func TestECSDiscoveryListTaskARNs(t *testing.T) {
},
},
},
- services: []ecsTypes.Service{
- {
- ServiceName: strptr("web-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- },
- {
- ServiceName: strptr("api-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- },
- },
+ clusterARNs: []string{"arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"},
expected: map[string][]string{
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service": {
+ "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": {
"arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1",
"arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2",
- },
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service": {
"arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-3",
},
},
},
{
- name: "ServiceWithNoTasks",
+ name: "EmptyCluster",
ecsData: &ecsDataStore{
region: "us-west-2",
tasks: []ecsTypes.Task{},
},
- services: []ecsTypes.Service{
- {
- ServiceName: strptr("empty-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/empty-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- },
- },
+ clusterARNs: []string{"arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"},
expected: map[string][]string{
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/empty-service": nil,
+ "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": nil,
},
},
} {
@@ -565,7 +826,7 @@ func TestECSDiscoveryListTaskARNs(t *testing.T) {
},
}
- taskMap, err := d.listTaskARNs(ctx, tt.services)
+ taskMap, err := d.listTaskARNs(ctx, tt.clusterARNs)
require.NoError(t, err)
require.Equal(t, tt.expected, taskMap)
})
@@ -577,11 +838,11 @@ func TestECSDiscoveryDescribeTasks(t *testing.T) {
// iterate through the test cases
for _, tt := range []struct {
- name string
- ecsData *ecsDataStore
- clusterARN string
- taskARNsMap map[string][]string
- expected map[string][]ecsTypes.Task
+ name string
+ ecsData *ecsDataStore
+ clusterARN string
+ taskARNs []string
+ expected []ecsTypes.Task
}{
{
name: "TasksInCluster",
@@ -608,47 +869,39 @@ func TestECSDiscoveryDescribeTasks(t *testing.T) {
},
},
clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
- taskARNsMap: map[string][]string{
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service": {
- "arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1",
- },
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service": {
- "arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2",
- },
+ taskARNs: []string{
+ "arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1",
+ "arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2",
},
- expected: map[string][]ecsTypes.Task{
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service": {
- {
- TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Group: strptr("service:web-service"),
- TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"),
- LastStatus: strptr("RUNNING"),
- Tags: []ecsTypes.Tag{
- {Key: strptr("Environment"), Value: strptr("production")},
- },
+ expected: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Group: strptr("service:web-service"),
+ TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"),
+ LastStatus: strptr("RUNNING"),
+ Tags: []ecsTypes.Tag{
+ {Key: strptr("Environment"), Value: strptr("production")},
},
},
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service": {
- {
- TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Group: strptr("service:api-service"),
- TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/api-task:2"),
- LastStatus: strptr("RUNNING"),
- },
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Group: strptr("service:api-service"),
+ TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/api-task:2"),
+ LastStatus: strptr("RUNNING"),
},
},
},
{
- name: "EmptyTaskARNsMap",
+ name: "EmptyTaskList",
ecsData: &ecsDataStore{
region: "us-west-2",
tasks: []ecsTypes.Task{},
},
- clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
- taskARNsMap: map[string][]string{},
- expected: map[string][]ecsTypes.Task{},
+ clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
+ taskARNs: []string{},
+ expected: nil,
},
} {
t.Run(tt.name, func(t *testing.T) {
@@ -662,9 +915,9 @@ func TestECSDiscoveryDescribeTasks(t *testing.T) {
},
}
- taskMap, err := d.describeTasks(ctx, tt.clusterARN, tt.taskARNsMap)
+ tasks, err := d.describeTasks(ctx, tt.clusterARN, tt.taskARNs)
require.NoError(t, err)
- require.Equal(t, tt.expected, taskMap)
+ require.Equal(t, tt.expected, tasks)
})
}
}
@@ -836,6 +1089,75 @@ func TestECSDiscoveryRefresh(t *testing.T) {
},
},
},
+ {
+ name: "StandaloneTaskNoService",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ clusters: []ecsTypes.Cluster{
+ {
+ ClusterName: strptr("standalone-cluster"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/standalone-cluster"),
+ Status: strptr("ACTIVE"),
+ },
+ },
+ services: []ecsTypes.Service{},
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/standalone-cluster/task-standalone"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/standalone-cluster"),
+ TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/standalone-task:1"),
+ Group: strptr("family:standalone-task"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ LastStatus: strptr("RUNNING"),
+ DesiredStatus: strptr("RUNNING"),
+ HealthStatus: ecsTypes.HealthStatusHealthy,
+ AvailabilityZone: strptr("us-west-2a"),
+ Attachments: []ecsTypes.Attachment{
+ {
+ Type: strptr("ElasticNetworkInterface"),
+ Details: []ecsTypes.KeyValuePair{
+ {Name: strptr("subnetId"), Value: strptr("subnet-standalone-1")},
+ {Name: strptr("privateIPv4Address"), Value: strptr("10.0.4.10")},
+ {Name: strptr("networkInterfaceId"), Value: strptr("eni-standalone-123")},
+ },
+ },
+ },
+ Tags: []ecsTypes.Tag{
+ {Key: strptr("Role"), Value: strptr("batch")},
+ },
+ },
+ },
+ eniPublicIPs: map[string]string{
+ "eni-standalone-123": "52.4.5.6",
+ },
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-west-2",
+ Targets: []model.LabelSet{
+ {
+ model.AddressLabel: model.LabelValue("10.0.4.10:80"),
+ "__meta_ecs_cluster": model.LabelValue("standalone-cluster"),
+ "__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/standalone-cluster"),
+ "__meta_ecs_task_group": model.LabelValue("family:standalone-task"),
+ "__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/standalone-cluster/task-standalone"),
+ "__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/standalone-task:1"),
+ "__meta_ecs_region": model.LabelValue("us-west-2"),
+ "__meta_ecs_availability_zone": model.LabelValue("us-west-2a"),
+ "__meta_ecs_subnet_id": model.LabelValue("subnet-standalone-1"),
+ "__meta_ecs_ip_address": model.LabelValue("10.0.4.10"),
+ "__meta_ecs_launch_type": model.LabelValue("FARGATE"),
+ "__meta_ecs_desired_status": model.LabelValue("RUNNING"),
+ "__meta_ecs_last_status": model.LabelValue("RUNNING"),
+ "__meta_ecs_health_status": model.LabelValue("HEALTHY"),
+ "__meta_ecs_network_mode": model.LabelValue("awsvpc"),
+ "__meta_ecs_public_ip": model.LabelValue("52.4.5.6"),
+ "__meta_ecs_tag_task_Role": model.LabelValue("batch"),
+ },
+ },
+ },
+ },
+ },
{
name: "TaskWithBridgeNetworking",
ecsData: &ecsDataStore{
@@ -1184,7 +1506,14 @@ func TestECSDiscoveryRefresh(t *testing.T) {
groups, err := d.refresh(ctx)
require.NoError(t, err)
- require.Equal(t, tt.expected, groups)
+ if tt.name == "MixedNetworkingModes" {
+ // Use ElementsMatch for tests with multiple tasks as goroutines can affect order
+ require.Len(t, groups, len(tt.expected))
+ require.Equal(t, tt.expected[0].Source, groups[0].Source)
+ require.ElementsMatch(t, tt.expected[0].Targets, groups[0].Targets)
+ } else {
+ require.Equal(t, tt.expected, groups)
+ }
})
}
}
@@ -1381,3 +1710,98 @@ func (m *mockECSEC2Client) DescribeNetworkInterfaces(_ context.Context, input *e
NetworkInterfaces: networkInterfaces,
}, nil
}
+
+func TestIsStandaloneTask(t *testing.T) {
+ tests := []struct {
+ name string
+ task ecsTypes.Task
+ expected bool
+ }{
+ {
+ name: "StandaloneTask",
+ task: ecsTypes.Task{
+ Group: strptr("family:my-task-definition"),
+ },
+ expected: true,
+ },
+ {
+ name: "ServiceTask",
+ task: ecsTypes.Task{
+ Group: strptr("service:my-service"),
+ },
+ expected: false,
+ },
+ {
+ name: "ServiceTaskWithColon",
+ task: ecsTypes.Task{
+ Group: strptr("service:my:service:name"),
+ },
+ expected: false,
+ },
+ {
+ name: "NilGroup",
+ task: ecsTypes.Task{
+ Group: nil,
+ },
+ expected: false,
+ },
+ {
+ name: "EmptyGroup",
+ task: ecsTypes.Task{
+ Group: strptr(""),
+ },
+ expected: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := isStandaloneTask(tt.task)
+ require.Equal(t, tt.expected, result)
+ })
+ }
+}
+
+func TestGetServiceNameFromTaskGroup(t *testing.T) {
+ tests := []struct {
+ name string
+ task ecsTypes.Task
+ expected string
+ }{
+ {
+ name: "SimpleServiceName",
+ task: ecsTypes.Task{
+ Group: strptr("service:my-service"),
+ },
+ expected: "my-service",
+ },
+ {
+ name: "ServiceNameWithHyphens",
+ task: ecsTypes.Task{
+ Group: strptr("service:web-api-service"),
+ },
+ expected: "web-api-service",
+ },
+ {
+ name: "ServiceNameWithColons",
+ task: ecsTypes.Task{
+ Group: strptr("service:my:service:name"),
+ },
+ expected: "my",
+ },
+ {
+ name: "FamilyGroup",
+ task: ecsTypes.Task{
+ Group: strptr("family:my-task-def"),
+ },
+ expected: "my-task-def",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := getServiceNameFromTaskGroup(tt.task)
+ require.Equal(t, tt.expected, result)
+ })
+ }
+}
From 96a87a06ea899a044339ae7cf294f28fea6c0a85 Mon Sep 17 00:00:00 2001
From: matt-gp
Date: Sun, 8 Feb 2026 16:41:54 +0000
Subject: [PATCH 131/165] AWS SD: Optmise MSK Role
Signed-off-by: matt-gp
---
discovery/aws/msk.go | 107 +++++++++++++-----------
discovery/aws/msk_test.go | 170 +++++++++++++++++++++++++++-----------
2 files changed, 180 insertions(+), 97 deletions(-)
diff --git a/discovery/aws/msk.go b/discovery/aws/msk.go
index 2a2b240d49..40bd809645 100644
--- a/discovery/aws/msk.go
+++ b/discovery/aws/msk.go
@@ -35,6 +35,7 @@ import (
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
+ "golang.org/x/sync/errgroup"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
@@ -88,9 +89,10 @@ const (
// DefaultMSKSDConfig is the default MSK SD configuration.
var DefaultMSKSDConfig = MSKSDConfig{
- Port: 80,
- RefreshInterval: model.Duration(60 * time.Second),
- HTTPClientConfig: config.DefaultHTTPClientConfig,
+ Port: 80,
+ RefreshInterval: model.Duration(60 * time.Second),
+ RequestConcurrency: 10,
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
func init() {
@@ -109,7 +111,8 @@ type MSKSDConfig struct {
Port int `yaml:"port"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
- HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
+ RequestConcurrency int `yaml:"request_concurrency,omitempty"`
+ HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
}
// NewDiscovererMetrics implements discovery.Config.
@@ -268,39 +271,33 @@ func (d *MSKDiscovery) initMskClient(ctx context.Context) error {
return nil
}
+// describeClusters describes the clusters with the given ARNs and returns their details.
func (d *MSKDiscovery) describeClusters(ctx context.Context, clusterARNs []string) ([]types.Cluster, error) {
var (
clusters []types.Cluster
- wg sync.WaitGroup
mu sync.Mutex
- errs []error
)
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.SetLimit(d.cfg.RequestConcurrency)
for _, clusterARN := range clusterARNs {
- wg.Add(1)
- go func(clusterARN string) {
- defer wg.Done()
- cluster, err := d.msk.DescribeClusterV2(ctx, &kafka.DescribeClusterV2Input{
+ errg.Go(func() error {
+ cluster, err := d.msk.DescribeClusterV2(ectx, &kafka.DescribeClusterV2Input{
ClusterArn: aws.String(clusterARN),
})
if err != nil {
- mu.Lock()
- errs = append(errs, fmt.Errorf("could not describe cluster %v: %w", clusterARN, err))
- mu.Unlock()
- return
+ return fmt.Errorf("could not describe cluster %v: %w", clusterARN, err)
}
mu.Lock()
clusters = append(clusters, *cluster.ClusterInfo)
mu.Unlock()
- }(clusterARN)
- }
- wg.Wait()
- if len(errs) > 0 {
- return nil, fmt.Errorf("errors occurred while describing clusters: %v", errs)
+ return nil
+ })
}
- return clusters, nil
+ return clusters, errg.Wait()
}
+// listClusters lists all MSK clusters in the configured region and returns their details.
func (d *MSKDiscovery) listClusters(ctx context.Context) ([]types.Cluster, error) {
var (
clusters []types.Cluster
@@ -328,29 +325,42 @@ func (d *MSKDiscovery) listClusters(ctx context.Context) ([]types.Cluster, error
return clusters, nil
}
-func (d *MSKDiscovery) listNodes(ctx context.Context, clusterARN string) ([]types.NodeInfo, error) {
- var (
- nodes []types.NodeInfo
- nextToken *string
- )
- for {
- resp, err := d.msk.ListNodes(ctx, &kafka.ListNodesInput{
- ClusterArn: aws.String(clusterARN),
- MaxResults: aws.Int32(100),
- NextToken: nextToken,
- })
- if err != nil {
- return nil, fmt.Errorf("could not list nodes for cluster %v: %w", clusterARN, err)
- }
+// listNodes lists all nodes for the given clusters and returns a map of cluster ARN to its nodes.
+func (d *MSKDiscovery) listNodes(ctx context.Context, clusters []types.Cluster) (map[string][]types.NodeInfo, error) {
+ clusterNodeMap := make(map[string][]types.NodeInfo)
+ mu := sync.Mutex{}
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.SetLimit(d.cfg.RequestConcurrency)
+ for _, cluster := range clusters {
+ clusterARN := aws.ToString(cluster.ClusterArn)
+ errg.Go(func() error {
+ var clusterNodes []types.NodeInfo
+ var nextToken *string
+ for {
+ resp, err := d.msk.ListNodes(ectx, &kafka.ListNodesInput{
+ ClusterArn: aws.String(clusterARN),
+ MaxResults: aws.Int32(100),
+ NextToken: nextToken,
+ })
+ if err != nil {
+ return fmt.Errorf("could not list nodes for cluster %v: %w", clusterARN, err)
+ }
- nodes = append(nodes, resp.NodeInfoList...)
- if resp.NextToken == nil {
- break
- }
- nextToken = resp.NextToken
+ clusterNodes = append(clusterNodes, resp.NodeInfoList...)
+ if resp.NextToken == nil {
+ break
+ }
+ nextToken = resp.NextToken
+ }
+
+ mu.Lock()
+ clusterNodeMap[clusterARN] = clusterNodes
+ mu.Unlock()
+ return nil
+ })
}
- return nodes, nil
+ return clusterNodeMap, errg.Wait()
}
func (d *MSKDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
@@ -376,21 +386,20 @@ func (d *MSKDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
}
}
+ clusterNodeMap, err := d.listNodes(ctx, clusters)
+ if err != nil {
+ return nil, err
+ }
+
var (
targetsMu sync.Mutex
wg sync.WaitGroup
)
for _, cluster := range clusters {
wg.Add(1)
- go func(cluster types.Cluster) {
+
+ go func(cluster types.Cluster, nodes []types.NodeInfo) {
defer wg.Done()
-
- nodes, err := d.listNodes(ctx, aws.ToString(cluster.ClusterArn))
- if err != nil {
- d.logger.Error("Failed to list nodes", "cluster", aws.ToString(cluster.ClusterName), "error", err)
- return
- }
-
for _, node := range nodes {
labels := model.LabelSet{
mskLabelClusterName: model.LabelValue(aws.ToString(cluster.ClusterName)),
@@ -446,7 +455,7 @@ func (d *MSKDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
continue
}
}
- }(cluster)
+ }(cluster, clusterNodeMap[aws.ToString(cluster.ClusterArn)])
}
wg.Wait()
diff --git a/discovery/aws/msk_test.go b/discovery/aws/msk_test.go
index 31744221ef..b1d48a7ea6 100644
--- a/discovery/aws/msk_test.go
+++ b/discovery/aws/msk_test.go
@@ -218,7 +218,8 @@ func TestMSKDiscoveryDescribeClusters(t *testing.T) {
d := &MSKDiscovery{
msk: client,
cfg: &MSKSDConfig{
- Region: tt.mskData.region,
+ Region: tt.mskData.region,
+ RequestConcurrency: 10,
},
}
@@ -242,10 +243,10 @@ func TestMSKDiscoveryListNodes(t *testing.T) {
ctx := context.Background()
for _, tt := range []struct {
- name string
- mskData *mskDataStore
- clusterARN string
- expected []types.NodeInfo
+ name string
+ mskData *mskDataStore
+ clusters []types.Cluster
+ expected map[string][]types.NodeInfo
}{
{
name: "ClusterWithBrokers",
@@ -280,30 +281,36 @@ func TestMSKDiscoveryListNodes(t *testing.T) {
},
},
},
- clusterARN: "arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123",
- expected: []types.NodeInfo{
+ clusters: []types.Cluster{
{
- NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
- AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
- InstanceType: strptr("kafka.m5.large"),
- BrokerNodeInfo: &types.BrokerNodeInfo{
- BrokerId: aws.Float64(1),
- ClientSubnet: strptr("subnet-12345"),
- ClientVpcIpAddress: strptr("10.0.1.100"),
- Endpoints: []string{"b-1.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
- AttachedENIId: strptr("eni-12345"),
- },
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
},
- {
- NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
- AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
- InstanceType: strptr("kafka.m5.large"),
- BrokerNodeInfo: &types.BrokerNodeInfo{
- BrokerId: aws.Float64(2),
- ClientSubnet: strptr("subnet-67890"),
- ClientVpcIpAddress: strptr("10.0.1.101"),
- Endpoints: []string{"b-2.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
- AttachedENIId: strptr("eni-67890"),
+ },
+ expected: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ ClientSubnet: strptr("subnet-12345"),
+ ClientVpcIpAddress: strptr("10.0.1.100"),
+ Endpoints: []string{"b-1.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-12345"),
+ },
+ },
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(2),
+ ClientSubnet: strptr("subnet-67890"),
+ ClientVpcIpAddress: strptr("10.0.1.101"),
+ Endpoints: []string{"b-2.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-67890"),
+ },
},
},
},
@@ -316,8 +323,68 @@ func TestMSKDiscoveryListNodes(t *testing.T) {
"arn:aws:kafka:us-west-2:123456789012:cluster/empty-cluster/xyz-789": {},
},
},
- clusterARN: "arn:aws:kafka:us-west-2:123456789012:cluster/empty-cluster/xyz-789",
- expected: nil,
+ clusters: []types.Cluster{
+ {
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/empty-cluster/xyz-789"),
+ },
+ },
+ expected: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/empty-cluster/xyz-789": nil,
+ },
+ },
+ {
+ name: "MultipleClusters",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/cluster-1/abc-123": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ },
+ },
+ },
+ "arn:aws:kafka:us-west-2:123456789012:cluster/cluster-2/def-456": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
+ InstanceType: strptr("kafka.m5.xlarge"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(2),
+ },
+ },
+ },
+ },
+ },
+ clusters: []types.Cluster{
+ {
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/cluster-1/abc-123"),
+ },
+ {
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/cluster-2/def-456"),
+ },
+ },
+ expected: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/cluster-1/abc-123": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ },
+ },
+ },
+ "arn:aws:kafka:us-west-2:123456789012:cluster/cluster-2/def-456": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
+ InstanceType: strptr("kafka.m5.xlarge"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(2),
+ },
+ },
+ },
+ },
},
} {
t.Run(tt.name, func(t *testing.T) {
@@ -326,11 +393,12 @@ func TestMSKDiscoveryListNodes(t *testing.T) {
d := &MSKDiscovery{
msk: client,
cfg: &MSKSDConfig{
- Region: tt.mskData.region,
+ Region: tt.mskData.region,
+ RequestConcurrency: 10,
},
}
- nodes, err := d.listNodes(ctx, tt.clusterARN)
+ nodes, err := d.listNodes(ctx, tt.clusters)
require.NoError(t, err)
require.Equal(t, tt.expected, nodes)
})
@@ -398,9 +466,10 @@ func TestMSKDiscoveryRefresh(t *testing.T) {
},
},
config: &MSKSDConfig{
- Region: "us-west-2",
- Port: 80,
- Clusters: []string{"arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"},
+ Region: "us-west-2",
+ Port: 80,
+ RequestConcurrency: 10,
+ Clusters: []string{"arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"},
},
expected: []*targetgroup.Group{
{
@@ -441,9 +510,10 @@ func TestMSKDiscoveryRefresh(t *testing.T) {
clusters: []types.Cluster{},
},
config: &MSKSDConfig{
- Region: "us-east-1",
- Port: 80,
- Clusters: []string{}, // Empty clusters list uses listClusters
+ Region: "us-east-1",
+ Port: 80,
+ RequestConcurrency: 10,
+ Clusters: []string{}, // Empty clusters list uses listClusters
},
expected: []*targetgroup.Group{
{
@@ -499,9 +569,10 @@ func TestMSKDiscoveryRefresh(t *testing.T) {
},
},
config: &MSKSDConfig{
- Region: "us-west-2",
- Port: 80,
- Clusters: nil, // nil clusters list uses listClusters (backward compatibility)
+ Region: "us-west-2",
+ Port: 80,
+ RequestConcurrency: 10,
+ Clusters: nil, // nil clusters list uses listClusters (backward compatibility)
},
expected: []*targetgroup.Group{
{
@@ -612,9 +683,10 @@ func TestMSKDiscoveryRefresh(t *testing.T) {
},
},
config: &MSKSDConfig{
- Region: "us-west-2",
- Port: 80,
- Clusters: []string{"arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"},
+ Region: "us-west-2",
+ Port: 80,
+ RequestConcurrency: 10,
+ Clusters: []string{"arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"},
},
expected: []*targetgroup.Group{
{
@@ -764,9 +836,10 @@ func TestMSKDiscoveryRefresh(t *testing.T) {
},
},
config: &MSKSDConfig{
- Region: "us-east-1",
- Port: 80,
- Clusters: []string{"arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"},
+ Region: "us-east-1",
+ Port: 80,
+ RequestConcurrency: 10,
+ Clusters: []string{"arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"},
},
expected: []*targetgroup.Group{
{
@@ -922,8 +995,9 @@ func TestMSKDiscoveryRefresh(t *testing.T) {
if config == nil {
// Default config for backward compatibility
config = &MSKSDConfig{
- Region: tt.mskData.region,
- Port: 80,
+ Region: tt.mskData.region,
+ Port: 80,
+ RequestConcurrency: 10,
}
}
From 43834999970a07dc47fca0f91deff43073de5601 Mon Sep 17 00:00:00 2001
From: Ian Kerins
Date: Mon, 9 Feb 2026 02:26:37 -0500
Subject: [PATCH 132/165] promtool: support missing promql syntax features
(#17926)
Namely promql-duration-expr and promql-extended-range-selectors. This
allows promtool to e.g. check rules files using syntax gated by these
features.
Signed-off-by: Ian Kerins
---
cmd/promtool/main.go | 6 +++++-
cmd/promtool/main_test.go | 2 +-
cmd/promtool/testdata/features.yml | 8 ++++++--
docs/command-line/promtool.md | 2 +-
4 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go
index 16cc40233a..17035bb3b4 100644
--- a/cmd/promtool/main.go
+++ b/cmd/promtool/main.go
@@ -314,7 +314,7 @@ func main() {
promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String()
promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String()
- featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Valid options: promql-experimental-functions, promql-delayed-name-removal. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details").Default("").Strings()
+ featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Valid options: promql-experimental-functions, promql-delayed-name-removal, promql-duration-expr, promql-extended-range-selectors. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details").Default("").Strings()
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
@@ -351,6 +351,10 @@ func main() {
parser.EnableExperimentalFunctions = true
case "promql-delayed-name-removal":
promqlEnableDelayedNameRemoval = true
+ case "promql-duration-expr":
+ parser.ExperimentalDurationExpr = true
+ case "promql-extended-range-selectors":
+ parser.EnableExtendedRangeSelectors = true
case "":
continue
default:
diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go
index 9e6e7268f7..68d145795a 100644
--- a/cmd/promtool/main_test.go
+++ b/cmd/promtool/main_test.go
@@ -655,7 +655,7 @@ func TestCheckRulesWithFeatureFlag(t *testing.T) {
// As opposed to TestCheckRules calling CheckRules directly we run promtool
// so the feature flag parsing can be tested.
- args := []string{"-test.main", "--enable-feature=promql-experimental-functions", "check", "rules", "testdata/features.yml"}
+ args := []string{"-test.main", "--enable-feature=promql-experimental-functions", "--enable-feature=promql-duration-expr", "--enable-feature=promql-extended-range-selectors", "check", "rules", "testdata/features.yml"}
tool := exec.Command(promtoolPath, args...)
err := tool.Run()
require.NoError(t, err)
diff --git a/cmd/promtool/testdata/features.yml b/cmd/promtool/testdata/features.yml
index 769f8362bf..946e07d0d7 100644
--- a/cmd/promtool/testdata/features.yml
+++ b/cmd/promtool/testdata/features.yml
@@ -1,6 +1,10 @@
groups:
- name: features
rules:
- - record: x
- # We don't expect anything from this, just want to check the function parses.
+ # We don't expect anything from these, just want to check the syntax parses.
+ - record: promql-experimental-functions
expr: sort_by_label(up, "instance")
+ - record: promql-duration-expr
+ expr: rate(up[1m * 2])
+ - record: promql-extended-range-selectors
+ expr: rate(up[1m] anchored)
diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md
index f6737bc37f..e8ffa75aaa 100644
--- a/docs/command-line/promtool.md
+++ b/docs/command-line/promtool.md
@@ -12,7 +12,7 @@ Tooling for the Prometheus monitoring system.
| -h, --help | Show context-sensitive help (also try --help-long and --help-man). |
| --version | Show application version. |
| --experimental | Enable experimental commands. |
-| --enable-feature... | Comma separated feature names to enable. Valid options: promql-experimental-functions, promql-delayed-name-removal. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details |
+| --enable-feature... | Comma separated feature names to enable. Valid options: promql-experimental-functions, promql-delayed-name-removal, promql-duration-expr, promql-extended-range-selectors. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details |
From ac521337ba19ebd4ef187fe5aea1b4993bba0b81 Mon Sep 17 00:00:00 2001
From: SuperQ
Date: Mon, 9 Feb 2026 15:29:48 +0100
Subject: [PATCH 133/165] Also run CI on release tags
Make sure we also run the main CI workflow `v*` release tags
so `publish_release` job is run.
Signed-off-by: SuperQ
---
.github/workflows/ci.yml | 1 +
1 file changed, 1 insertion(+)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 8453110e7f..1553bdba19 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -4,6 +4,7 @@ on:
pull_request:
push:
branches: [main, 'release-*']
+ tags: ['v*']
permissions:
contents: read
From effa3c5c25d1c5a3305240680749c48d10ffcbe2 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 10 Feb 2026 08:24:26 +0000
Subject: [PATCH 134/165] chore(deps): bump github.com/hashicorp/consul/api
from 1.32.0 to 1.33.2 (#17449)
Bumps [github.com/hashicorp/consul/api](https://github.com/hashicorp/consul) from 1.32.0 to 1.33.2.
- [Release notes](https://github.com/hashicorp/consul/releases)
- [Changelog](https://github.com/hashicorp/consul/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hashicorp/consul/compare/api/v1.32.0...api/v1.33.2)
---
updated-dependencies:
- dependency-name: github.com/hashicorp/consul/api
dependency-version: 1.33.2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Signed-off-by: Arve Knudsen
Co-authored-by: Arve Knudsen
---
documentation/examples/remote_storage/go.mod | 2 +-
go.mod | 4 ++--
go.sum | 8 ++++----
go.work | 2 +-
internal/tools/go.mod | 2 +-
web/ui/mantine-ui/src/promql/tools/go.mod | 2 +-
6 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod
index 5f2cd98037..80d22ad13c 100644
--- a/documentation/examples/remote_storage/go.mod
+++ b/documentation/examples/remote_storage/go.mod
@@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/documentation/examples/remote_storage
-go 1.25.0
+go 1.25.5
require (
github.com/alecthomas/kingpin/v2 v2.4.0
diff --git a/go.mod b/go.mod
index dcad44eb9c..4b286fa77c 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/prometheus/prometheus
-go 1.25.0
+go 1.25.5
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
@@ -40,7 +40,7 @@ require (
github.com/google/uuid v1.6.0
github.com/gophercloud/gophercloud/v2 v2.9.0
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853
- github.com/hashicorp/consul/api v1.32.1
+ github.com/hashicorp/consul/api v1.33.2
github.com/hashicorp/nomad/api v0.0.0-20260106084653-e8f2200c7039
github.com/hetznercloud/hcloud-go/v2 v2.33.0
github.com/ionos-cloud/sdk-go/v6 v6.3.6
diff --git a/go.sum b/go.sum
index 661c8af7c7..aa2eb1b039 100644
--- a/go.sum
+++ b/go.sum
@@ -278,10 +278,10 @@ github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 h1:cLN4IBkmkYZNnk7E
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
-github.com/hashicorp/consul/api v1.32.1 h1:0+osr/3t/aZNAdJX558crU3PEjVrG4x6715aZHRgceE=
-github.com/hashicorp/consul/api v1.32.1/go.mod h1:mXUWLnxftwTmDv4W3lzxYCPD199iNLLUyLfLGFJbtl4=
-github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
-github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
+github.com/hashicorp/consul/api v1.33.2 h1:Q6mE0WZsUTJerlnl9TuXzqrtZ0cKdOCsxcZhj5mKbMs=
+github.com/hashicorp/consul/api v1.33.2/go.mod h1:K3yoL/vnIBcQV/25NeMZVokRvPPERiqp2Udtr4xAfhs=
+github.com/hashicorp/consul/sdk v0.17.1 h1:LumAh8larSXmXw2wvw/lK5ZALkJ2wK8VRwWMLVV5M5c=
+github.com/hashicorp/consul/sdk v0.17.1/go.mod h1:EngiixMhmw9T7wApycq6rDRFXXVUwjjf7HuLiGMH/Sw=
github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4=
github.com/hashicorp/cronexpr v1.1.3/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
diff --git a/go.work b/go.work
index c5ba5dfad6..4d53344b16 100644
--- a/go.work
+++ b/go.work
@@ -1,4 +1,4 @@
-go 1.25.0
+go 1.25.5
use (
.
diff --git a/internal/tools/go.mod b/internal/tools/go.mod
index 5238fca024..e2e5ce6b8f 100644
--- a/internal/tools/go.mod
+++ b/internal/tools/go.mod
@@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/internal/tools
-go 1.25.0
+go 1.25.5
require (
github.com/bufbuild/buf v1.62.1
diff --git a/web/ui/mantine-ui/src/promql/tools/go.mod b/web/ui/mantine-ui/src/promql/tools/go.mod
index d3f69a698b..bf12b9c855 100644
--- a/web/ui/mantine-ui/src/promql/tools/go.mod
+++ b/web/ui/mantine-ui/src/promql/tools/go.mod
@@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/web/ui/mantine-ui/src/promql/tools
-go 1.25.0
+go 1.25.5
require (
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853
From e27bcdf03f6c488d987ed99982dde5011b65b760 Mon Sep 17 00:00:00 2001
From: Matt
Date: Tue, 10 Feb 2026 11:02:24 +0000
Subject: [PATCH 135/165] AWS SD: Load Region Fallback (#18019)
* AWS SD: Load Region Fallback
---------
Signed-off-by: matt-gp
---
discovery/aws/aws.go | 54 ++++++++---
discovery/aws/aws_test.go | 178 +++++++++++++++++++++++++++++++++++++
discovery/aws/ec2.go | 30 +------
discovery/aws/ecs.go | 15 +---
discovery/aws/lightsail.go | 28 +-----
discovery/aws/msk.go | 27 +-----
6 files changed, 233 insertions(+), 99 deletions(-)
diff --git a/discovery/aws/aws.go b/discovery/aws/aws.go
index 9db87965bb..69b3b41c06 100644
--- a/discovery/aws/aws.go
+++ b/discovery/aws/aws.go
@@ -14,10 +14,13 @@
package aws
import (
+ "context"
"errors"
"fmt"
"time"
+ awsConfig "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
@@ -100,6 +103,12 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
}
*c = SDConfig(aux)
+ var err error
+ c.Region, err = loadRegion(context.Background(), c.Region)
+ if err != nil {
+ return fmt.Errorf("could not determine AWS region: %w", err)
+ }
+
switch c.Role {
case RoleEC2:
if c.EC2SDConfig == nil {
@@ -107,9 +116,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
c.EC2SDConfig = &ec2Config
}
c.EC2SDConfig.HTTPClientConfig = c.HTTPClientConfig
- if c.Region != "" {
- c.EC2SDConfig.Region = c.Region
- }
+ c.EC2SDConfig.Region = c.Region
if c.Endpoint != "" {
c.EC2SDConfig.Endpoint = c.Endpoint
}
@@ -140,9 +147,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
c.ECSSDConfig = &ecsConfig
}
c.ECSSDConfig.HTTPClientConfig = c.HTTPClientConfig
- if c.Region != "" {
- c.ECSSDConfig.Region = c.Region
- }
+ c.ECSSDConfig.Region = c.Region
if c.Endpoint != "" {
c.ECSSDConfig.Endpoint = c.Endpoint
}
@@ -173,9 +178,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
c.LightsailSDConfig = &lightsailConfig
}
c.LightsailSDConfig.HTTPClientConfig = c.HTTPClientConfig
- if c.Region != "" {
- c.LightsailSDConfig.Region = c.Region
- }
+ c.LightsailSDConfig.Region = c.Region
if c.Endpoint != "" {
c.LightsailSDConfig.Endpoint = c.Endpoint
}
@@ -203,9 +206,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
c.MSKSDConfig = &mskConfig
}
c.MSKSDConfig.HTTPClientConfig = c.HTTPClientConfig
- if c.Region != "" {
- c.MSKSDConfig.Region = c.Region
- }
+ c.MSKSDConfig.Region = c.Region
if c.Endpoint != "" {
c.MSKSDConfig.Endpoint = c.Endpoint
}
@@ -268,3 +269,32 @@ func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Di
return nil, fmt.Errorf("unknown AWS SD role %q", c.Role)
}
}
+
+// loadRegion finds the region in order: AWS config/env vars ->IMDS.
+func loadRegion(ctx context.Context, specifiedRegion string) (string, error) {
+ if specifiedRegion != "" {
+ return specifiedRegion, nil
+ }
+
+ cfg, err := awsConfig.LoadDefaultConfig(ctx)
+ if err != nil {
+ return "", fmt.Errorf("failed to load AWS config: %w", err)
+ }
+
+ if cfg.Region != "" {
+ return cfg.Region, nil
+ }
+
+ // Fallback (may fail in non-AWS environments)
+ imdsClient := imds.NewFromConfig(cfg)
+ region, err := imdsClient.GetRegion(ctx, &imds.GetRegionInput{})
+ if err != nil {
+ return "", fmt.Errorf("failed to get region from IMDS: %w", err)
+ }
+
+ if region.Region == "" {
+ return "", errors.New("region not found in AWS config or IMDS")
+ }
+
+ return region.Region, nil
+}
diff --git a/discovery/aws/aws_test.go b/discovery/aws/aws_test.go
index b47a6cd92c..d1ec7b2282 100644
--- a/discovery/aws/aws_test.go
+++ b/discovery/aws/aws_test.go
@@ -14,7 +14,13 @@
package aws
import (
+ "context"
"errors"
+ "math/rand/v2"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
"testing"
"time"
@@ -309,3 +315,175 @@ func TestMultipleSDConfigsDoNotShareState(t *testing.T) {
})
}
}
+
+// getRandomRegion is a helper to return a pseudo-random AWS region for testing.
+func getRandomRegion() string {
+ regions := []string{
+ "us-east-1",
+ "us-east-2",
+ "us-west-1",
+ "us-west-2",
+ "eu-west-1",
+ "eu-west-2",
+ "ap-southeast-1",
+ "ap-southeast-2",
+ "ap-northeast-1",
+ "ap-northeast-2",
+ }
+
+ return regions[rand.IntN(len(regions))]
+}
+
+func TestLoadRegion(t *testing.T) {
+ t.Run("with_env_region", func(t *testing.T) {
+ randomRegion := getRandomRegion()
+ t.Setenv("AWS_REGION", randomRegion)
+ t.Setenv("AWS_ACCESS_KEY_ID", "dummy")
+ t.Setenv("AWS_SECRET_ACCESS_KEY", "dummy")
+ t.Setenv("AWS_CONFIG_FILE", "") // Ensure no config file is used
+ t.Setenv("AWS_PROFILE", "") // Ensure no profile file is used
+
+ region, err := loadRegion(context.Background(), "")
+ require.NoError(t, err)
+ require.Equal(t, randomRegion, region)
+ })
+
+ t.Run("with_config_file_default_profile", func(t *testing.T) {
+ randomRegion := getRandomRegion()
+
+ // Create a temporary AWS config file
+ tmpDir := t.TempDir()
+ configFile := filepath.Join(tmpDir, "config")
+
+ configContent := `[default]
+region = ` + randomRegion + `
+`
+
+ err := os.WriteFile(configFile, []byte(configContent), 0o644)
+ require.NoError(t, err)
+ defer os.Remove(configFile)
+
+ // Set up environment to use the config file
+ t.Setenv("AWS_CONFIG_FILE", configFile)
+ t.Setenv("AWS_ACCESS_KEY_ID", "dummy")
+ t.Setenv("AWS_SECRET_ACCESS_KEY", "dummy")
+ // Clear any region environment variables to force config file usage
+ t.Setenv("AWS_REGION", "")
+ t.Setenv("AWS_PROFILE", "") // Ensure no profile file is used
+ t.Setenv("AWS_DEFAULT_REGION", "")
+
+ region, err := loadRegion(context.Background(), "")
+ require.NoError(t, err)
+ require.Equal(t, randomRegion, region)
+ })
+
+ t.Run("with_config_file_named_profile", func(t *testing.T) {
+ randomRegion := getRandomRegion()
+
+ // Create a temporary AWS config file
+ tmpDir := t.TempDir()
+ configFile := filepath.Join(tmpDir, "config")
+
+ configContent := `[default]
+region = ` + getRandomRegion() + `
+
+[profile ` + randomRegion + `-profile]
+region = ` + randomRegion + `
+`
+
+ err := os.WriteFile(configFile, []byte(configContent), 0o644)
+ require.NoError(t, err)
+ defer os.Remove(configFile)
+
+ // Set up environment to use the config file
+ t.Setenv("AWS_CONFIG_FILE", configFile)
+ t.Setenv("AWS_PROFILE", randomRegion+"-profile")
+ t.Setenv("AWS_ACCESS_KEY_ID", "dummy")
+ t.Setenv("AWS_SECRET_ACCESS_KEY", "dummy")
+ // Clear any region environment variables to force config file usage
+ t.Setenv("AWS_REGION", "")
+ t.Setenv("AWS_DEFAULT_REGION", "")
+
+ region, err := loadRegion(context.Background(), "")
+ require.NoError(t, err)
+ require.Equal(t, randomRegion, region)
+ })
+
+ t.Run("with_specified_region", func(t *testing.T) {
+ specifiedRegion := getRandomRegion()
+
+ // Even with environment region set differently, specified region should take precedence
+ t.Setenv("AWS_REGION", getRandomRegion())
+ t.Setenv("AWS_ACCESS_KEY_ID", "dummy")
+ t.Setenv("AWS_SECRET_ACCESS_KEY", "dummy")
+
+ region, err := loadRegion(context.Background(), specifiedRegion)
+ require.NoError(t, err)
+ require.Equal(t, specifiedRegion, region)
+ })
+
+ t.Run("imds_fallback", func(t *testing.T) {
+ randomRegion := getRandomRegion()
+
+ // Mock IMDS server that returns a region
+ mockIMDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Handle instance identity document (contains region info)
+ if r.URL.Path == "/latest/dynamic/instance-identity/document" {
+ imdsPayload := `{"region": "` + randomRegion + `"}`
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(imdsPayload))
+ return
+ }
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer mockIMDS.Close()
+
+ // Set up environment with no region but valid credentials
+ // This will force fallback to IMDS
+ t.Setenv("AWS_ACCESS_KEY_ID", "dummy")
+ t.Setenv("AWS_SECRET_ACCESS_KEY", "dummy")
+ // Unset any existing region
+ t.Setenv("AWS_REGION", "")
+ t.Setenv("AWS_DEFAULT_REGION", "")
+ t.Setenv("AWS_CONFIG_FILE", "") // Ensure no config file is used
+ t.Setenv("AWS_PROFILE", "") // Ensure no profile file is used
+ // Point IMDS to our mock server
+ t.Setenv("AWS_EC2_METADATA_SERVICE_ENDPOINT", mockIMDS.URL)
+
+ region, err := loadRegion(context.Background(), "")
+ require.NoError(t, err)
+ require.Equal(t, randomRegion, region)
+ })
+
+ t.Run("imds_empty_region", func(t *testing.T) {
+ // Mock IMDS server that returns empty region
+ mockIMDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Handle instance identity document with empty region
+ if r.URL.Path == "/latest/dynamic/instance-identity/document" {
+ imdsPayload := `{"region": ""}`
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(imdsPayload))
+ return
+ }
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer mockIMDS.Close()
+
+ // Set up environment with no region but valid credentials
+ t.Setenv("AWS_ACCESS_KEY_ID", "dummy")
+ t.Setenv("AWS_SECRET_ACCESS_KEY", "dummy")
+ // Unset any existing region
+ t.Setenv("AWS_REGION", "")
+ t.Setenv("AWS_DEFAULT_REGION", "")
+ t.Setenv("AWS_CONFIG_FILE", "") // Ensure no config file is used
+ t.Setenv("AWS_PROFILE", "") // Ensure no profile file is used
+ // Point IMDS to our mock server
+ t.Setenv("AWS_EC2_METADATA_SERVICE_ENDPOINT", mockIMDS.URL)
+
+ _, err := loadRegion(context.Background(), "")
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "failed to get region from IMDS")
+ })
+}
diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go
index 19ecebd491..4daff43ecc 100644
--- a/discovery/aws/ec2.go
+++ b/discovery/aws/ec2.go
@@ -27,7 +27,6 @@ import (
awsConfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
- "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/service/ec2"
ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
"github.com/aws/aws-sdk-go-v2/service/sts"
@@ -125,31 +124,10 @@ func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
return err
}
- if c.Region == "" {
- cfg, err := awsConfig.LoadDefaultConfig(context.Background())
- if err != nil {
- return err
- }
-
- if cfg.Region != "" {
- // If the region is already set in the config, use it.
- // This can happen if the user has set the region in the AWS config file or environment variables.
- c.Region = cfg.Region
- }
-
- if c.Region == "" {
- // Try to get the region from the instance metadata service (IMDS).
- imdsClient := imds.NewFromConfig(cfg)
- region, err := imdsClient.GetRegion(context.Background(), &imds.GetRegionInput{})
- if err != nil {
- return err
- }
- c.Region = region.Region
- }
- }
-
- if c.Region == "" {
- return errors.New("EC2 SD configuration requires a region")
+ // Check if the region is set, if not attempt to load it from the AWS SDK.
+ c.Region, err = loadRegion(context.Background(), c.Region)
+ if err != nil {
+ return fmt.Errorf("could not determine AWS region: %w", err)
}
for _, f := range c.Filters {
diff --git a/discovery/aws/ecs.go b/discovery/aws/ecs.go
index 1d5ff366de..e9d578aec3 100644
--- a/discovery/aws/ecs.go
+++ b/discovery/aws/ecs.go
@@ -27,7 +27,6 @@ import (
awsConfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
- "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/service/ec2"
"github.com/aws/aws-sdk-go-v2/service/ecs"
"github.com/aws/aws-sdk-go-v2/service/ecs/types"
@@ -137,17 +136,9 @@ func (c *ECSSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
return err
}
- if c.Region == "" {
- cfg, err := awsConfig.LoadDefaultConfig(context.TODO())
- if err != nil {
- return err
- }
- client := imds.NewFromConfig(cfg)
- result, err := client.GetRegion(context.Background(), &imds.GetRegionInput{})
- if err != nil {
- return fmt.Errorf("ECS SD configuration requires a region. Tried to fetch it from the instance metadata: %w", err)
- }
- c.Region = result.Region
+ c.Region, err = loadRegion(context.Background(), c.Region)
+ if err != nil {
+ return fmt.Errorf("could not determine AWS region: %w", err)
}
return c.HTTPClientConfig.Validate()
diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go
index b13f26cc5f..69a5b6625f 100644
--- a/discovery/aws/lightsail.go
+++ b/discovery/aws/lightsail.go
@@ -26,7 +26,6 @@ import (
awsConfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
- "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/service/lightsail"
"github.com/aws/aws-sdk-go-v2/service/sts"
"github.com/aws/smithy-go"
@@ -106,30 +105,9 @@ func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
return err
}
- if c.Region == "" {
- cfg, err := awsConfig.LoadDefaultConfig(context.Background())
- if err != nil {
- return err
- }
-
- if cfg.Region != "" {
- // Use the region from the AWS config. It will load environment variables and shared config files.
- c.Region = cfg.Region
- }
-
- if c.Region == "" {
- // Try to get the region from the instance metadata service (IMDS).
- imdsClient := imds.NewFromConfig(cfg)
- region, err := imdsClient.GetRegion(context.Background(), &imds.GetRegionInput{})
- if err != nil {
- return err
- }
- c.Region = region.Region
- }
- }
-
- if c.Region == "" {
- return errors.New("lightsail SD configuration requires a region")
+ c.Region, err = loadRegion(context.Background(), c.Region)
+ if err != nil {
+ return fmt.Errorf("could not determine AWS region: %w", err)
}
return c.HTTPClientConfig.Validate()
diff --git a/discovery/aws/msk.go b/discovery/aws/msk.go
index 2a2b240d49..a68960066f 100644
--- a/discovery/aws/msk.go
+++ b/discovery/aws/msk.go
@@ -27,7 +27,6 @@ import (
awsConfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
- "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/service/kafka"
"github.com/aws/aws-sdk-go-v2/service/kafka/types"
"github.com/aws/aws-sdk-go-v2/service/sts"
@@ -136,29 +135,9 @@ func (c *MSKSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
return err
}
- if c.Region == "" {
- cfg, err := awsConfig.LoadDefaultConfig(context.Background())
- if err != nil {
- return err
- }
- if cfg.Region != "" {
- // If the region is already set in the config, use it (env vars).
- c.Region = cfg.Region
- }
-
- if c.Region == "" {
- // Try to get the region from IMDS.
- imdsClient := imds.NewFromConfig(cfg)
- region, err := imdsClient.GetRegion(context.Background(), &imds.GetRegionInput{})
- if err != nil {
- return err
- }
- c.Region = region.Region
- }
- }
-
- if c.Region == "" {
- return errors.New("MSK SD configuration requires a region")
+ c.Region, err = loadRegion(context.Background(), c.Region)
+ if err != nil {
+ return fmt.Errorf("could not determine AWS region: %w", err)
}
return c.HTTPClientConfig.Validate()
From 90166d3ddb0be9986f5d32a35379d7b35a7636c2 Mon Sep 17 00:00:00 2001
From: ffgan
Date: Tue, 10 Feb 2026 19:41:44 +0800
Subject: [PATCH 136/165] Build riscv64 docker image by default (#17508)
* Allow building riscv64 docker image
Co-authored by: nijincheng@iscas.ac.cn;
Signed-off-by: ffgan
* Update Makefile
Co-authored-by: Ben Kochie
Signed-off-by: ffgan
---------
Signed-off-by: ffgan
Co-authored-by: Ben Kochie
---
.dockerignore | 1 +
Makefile | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/.dockerignore b/.dockerignore
index 5eca8e1b80..c528ea1189 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -7,3 +7,4 @@ data/
!.build/linux-arm64/
!.build/linux-ppc64le/
!.build/linux-s390x/
+!.build/linux-riscv64/
diff --git a/Makefile b/Makefile
index 8bc4a3dcaa..ad4b90f020 100644
--- a/Makefile
+++ b/Makefile
@@ -12,7 +12,7 @@
# limitations under the License.
# Needs to be defined before including Makefile.common to auto-generate targets
-DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x
+DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le riscv64 s390x
UI_PATH = web/ui
UI_NODE_MODULES_PATH = $(UI_PATH)/node_modules
From 7b02d25f6c0ac3fb326956d88e41217ce736d6f5 Mon Sep 17 00:00:00 2001
From: Ben Kochie
Date: Tue, 10 Feb 2026 17:27:02 +0100
Subject: [PATCH 137/165] Increase Renovate speed (#18052)
* Allow Renovate to run all day on the monthly day.
* Increase the max PRs from 10 to 20.
* Incresae the PR open rate from 2 to 5.
Signed-off-by: SuperQ
---
renovate.json | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/renovate.json b/renovate.json
index 350cfe2a0d..814193329a 100644
--- a/renovate.json
+++ b/renovate.json
@@ -9,11 +9,13 @@
"gomodTidy",
"gomodUpdateImportPaths"
],
- "schedule": ["* 11 21 * *"],
+ "schedule": ["* * 21 * *"],
"timezone": "UTC",
"github-actions": {
"managerFilePatterns": ["scripts/**"]
},
+ "prConcurrentLimit": 20,
+ "prHourlyLimit": 5,
"packageRules": [
{
"description": "Don't update replace directives",
From 6fec996722600b0b4e6142b4c6931071edf57f73 Mon Sep 17 00:00:00 2001
From: George Krajcsovits
Date: Tue, 10 Feb 2026 17:33:06 +0100
Subject: [PATCH 138/165] docs(api): clarify metadata vs remote protocols
(#17481)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: György Krajcsovits
---
docs/querying/api.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/docs/querying/api.md b/docs/querying/api.md
index 7324669699..78574ec103 100644
--- a/docs/querying/api.md
+++ b/docs/querying/api.md
@@ -1029,6 +1029,7 @@ curl http://localhost:9090/api/v1/alerts
## Querying target metadata
The following endpoint returns metadata about metrics currently scraped from targets.
+The endpoint has the limitation that only metadata scraped from targets directly is returned, metadata sent over Remote-Write or OTLP to Prometheus is not included in this endpoint and will not show up on the UI in "Explore Metrics".
This is **experimental** and might change in the future.
```
From d8c24c6bde7c3b3f03f6c3ef2a8c8c8238e8f789 Mon Sep 17 00:00:00 2001
From: Patryk Prus
Date: Tue, 10 Feb 2026 12:44:06 -0500
Subject: [PATCH 139/165] tsdb/index: export sentinel error for symbol table
size exceeded
Signed-off-by: Patryk Prus