diff --git a/.dockerignore b/.dockerignore
index 5eca8e1b80..c528ea1189 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -7,3 +7,4 @@ data/
!.build/linux-arm64/
!.build/linux-ppc64le/
!.build/linux-s390x/
+!.build/linux-riscv64/
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000..432caee6f7
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+web/api/v1/testdata/openapi_golden.yaml linguist-generated
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index ec4eef8dae..7873822f26 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -28,6 +28,7 @@ If no, just write "NONE" in the release-notes block below.
Otherwise, please describe what should be mentioned in the CHANGELOG. Use the following prefixes:
[FEATURE] [ENHANCEMENT] [PERF] [BUGFIX] [SECURITY] [CHANGE]
Refer to the existing CHANGELOG for inspiration: https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md
+A concrete example may look as follows (be sure to leave out the surrounding quotes): "[FEATURE] API: Add /api/v1/features for clients to understand which features are supported".
If you need help formulating your entries, consult the reviewer(s).
-->
```release-notes
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 8d25176252..87b6fb90a0 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -3,6 +3,8 @@ name: CI
on:
pull_request:
push:
+ branches: [main, 'release-*']
+ tags: ['v*']
permissions:
contents: read
@@ -19,7 +21,7 @@ jobs:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
+ - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
- uses: ./.github/promci/actions/setup_environment
with:
enable_npm: true
@@ -37,7 +39,7 @@ jobs:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
+ - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
- uses: ./.github/promci/actions/setup_environment
- run: go test --tags=dedupelabels ./...
- run: go test --tags=slicelabels -race ./cmd/prometheus ./model/textparse ./prompb/...
@@ -81,7 +83,7 @@ jobs:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
+ - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
- uses: ./.github/promci/actions/setup_environment
with:
enable_go: false
@@ -100,7 +102,7 @@ jobs:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
+ - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: 1.25.x
- run: |
@@ -146,7 +148,7 @@ jobs:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
+ - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
- uses: ./.github/promci/actions/build
with:
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
@@ -173,7 +175,7 @@ jobs:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
+ - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
- uses: ./.github/promci/actions/build
with:
parallelism: 12
@@ -212,7 +214,7 @@ jobs:
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
+ - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
- uses: ./.github/promci/actions/setup_environment
with:
enable_npm: true
@@ -227,7 +229,7 @@ jobs:
with:
persist-credentials: false
- name: Install Go
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
+ uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: 1.25.x
- name: Install snmp_exporter/generator dependencies
@@ -270,7 +272,7 @@ jobs:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
+ - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
- uses: ./.github/promci/actions/publish_main
with:
docker_hub_login: ${{ secrets.docker_hub_login }}
@@ -289,7 +291,7 @@ jobs:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
+ - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
- uses: ./.github/promci/actions/publish_release
with:
docker_hub_login: ${{ secrets.docker_hub_login }}
@@ -306,13 +308,13 @@ jobs:
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
+ - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
- name: Install nodejs
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
with:
node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org"
- - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
+ - uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml
index 776e0a67c5..0afcbe6f0c 100644
--- a/.github/workflows/fuzzing.yml
+++ b/.github/workflows/fuzzing.yml
@@ -13,11 +13,11 @@ jobs:
fuzz_test: [FuzzParseMetricText, FuzzParseOpenMetric, FuzzParseMetricSelector, FuzzParseExpr]
steps:
- name: Checkout repository
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1
with:
persist-credentials: false
- name: Install Go
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
+ uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: 1.25.x
- name: Run Fuzzing
@@ -29,7 +29,7 @@ jobs:
if: failure()
with:
name: fuzz-artifacts-${{ matrix.fuzz_test }}
- path: promql/testdata/fuzz/${{ matrix.fuzz_test }}
+ path: util/fuzzing/testdata/fuzz/${{ matrix.fuzz_test }}
fuzzing_status:
# This status check aggregates the individual matrix jobs of the fuzzing
# step into a final status. Fails if a single matrix job fails, succeeds if
diff --git a/.golangci.yml b/.golangci.yml
index 0c866611e9..8cb3265f4f 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -102,6 +102,10 @@ linters:
desc: "Use github.com/klauspost/compress instead of zlib"
- pkg: "golang.org/x/exp/slices"
desc: "Use 'slices' instead."
+ - pkg: "gopkg.in/yaml.v2"
+ desc: "Use go.yaml.in/yaml/v2 instead of gopkg.in/yaml.v2"
+ - pkg: "gopkg.in/yaml.v3"
+ desc: "Use go.yaml.in/yaml/v3 instead of gopkg.in/yaml.v3"
errcheck:
exclude-functions:
# Don't flag lines such as "io.Copy(io.Discard, resp.Body)".
@@ -124,6 +128,8 @@ linters:
# Disable this check for now since it introduces too many changes in our existing codebase.
# See https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#hdr-Analyzer_omitzero for more details.
- omitzero
+ # Disable waitgroup check until we really move to Go 1.25.
+ - waitgroup
perfsprint:
# Optimizes even if it requires an int or uint type cast.
int-conversion: true
diff --git a/.yamllint b/.yamllint
index 8d09c375fd..b329f464fb 100644
--- a/.yamllint
+++ b/.yamllint
@@ -2,6 +2,7 @@
extends: default
ignore: |
**/node_modules
+ web/api/v1/testdata/openapi_*_golden.yaml
rules:
braces:
diff --git a/CODEOWNERS b/CODEOWNERS
index f28cdbf832..2c5dedbffa 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -2,25 +2,28 @@
# Please keep this file in sync with the MAINTAINERS.md file!
#
+# Prometheus team members are members of the "default maintainers" github team.
+# They are code owners by default for the whole repo.
+* @prometheus/default-maintainers
+
# Subsystems.
-/Makefile @simonpasquier @SuperQ
-/cmd/promtool @dgl
-/documentation/prometheus-mixin @metalmatze
-/model/histogram @beorn7 @krajorama
-/web/ui @juliusv
-/web/ui/module @juliusv @nexucis
-/promql @roidelapluie
-/storage/remote @cstyan @bwplotka @tomwilkie @npazosmendez @alexgreenbank
-/storage/remote/otlptranslator @aknuds1 @jesusvazquez @ArthurSens
-/tsdb @jesusvazquez @codesome @bwplotka @krajorama
+/Makefile @prometheus/default-maintainers @simonpasquier @SuperQ
+/cmd/promtool @prometheus/default-maintainers @dgl
+/documentation/prometheus-mixin @prometheus/default-maintainers @metalmatze
+/model/histogram @prometheus/default-maintainers @beorn7 @krajorama
+/web/ui @prometheus/default-maintainers @juliusv
+/web/ui/module @prometheus/default-maintainers @juliusv @nexucis
+/promql @prometheus/default-maintainers @roidelapluie
+/storage/remote @prometheus/default-maintainers @cstyan @bwplotka @tomwilkie @alexgreenbank
+/storage/remote/otlptranslator @prometheus/default-maintainers @aknuds1 @jesusvazquez @ArthurSens
+/tsdb @prometheus/default-maintainers @jesusvazquez @codesome @bwplotka @krajorama
# Service discovery.
-/discovery/kubernetes @brancz
-/discovery/stackit @jkroepke
+/discovery/kubernetes @prometheus/default-maintainers @brancz
+/discovery/stackit @prometheus/default-maintainers @jkroepke
+/discovery/aws/ @prometheus/default-maintainers @matt-gp @sysadmind
# Pending
-# https://github.com/prometheus/prometheus/pull/17105#issuecomment-3248209452
-# /discovery/aws/ @matt-gp @sysadmind
# https://github.com/prometheus/prometheus/pull/15212#issuecomment-3575225179
-# /discovery/aliyun @KeyOfSpectator
+# /discovery/aliyun @prometheus/default-maintainers @KeyOfSpectator
# https://github.com/prometheus/prometheus/pull/14108#issuecomment-2639515421
-# /discovery/nomad @jaloren @jrasell
+# /discovery/nomad @prometheus/default-maintainers @jaloren @jrasell
diff --git a/Dockerfile b/Dockerfile
index 071e7441e3..98712d8f9c 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -9,7 +9,8 @@ LABEL org.opencontainers.image.authors="The Prometheus Authors" \
org.opencontainers.image.source="https://github.com/prometheus/prometheus" \
org.opencontainers.image.url="https://github.com/prometheus/prometheus" \
org.opencontainers.image.documentation="https://prometheus.io/docs" \
- org.opencontainers.image.licenses="Apache License 2.0"
+ org.opencontainers.image.licenses="Apache License 2.0" \
+ io.prometheus.image.variant="busybox"
ARG ARCH="amd64"
ARG OS="linux"
diff --git a/Dockerfile.distroless b/Dockerfile.distroless
new file mode 100644
index 0000000000..0ee184a91c
--- /dev/null
+++ b/Dockerfile.distroless
@@ -0,0 +1,29 @@
+ARG DISTROLESS_ARCH="amd64"
+
+# Use DISTROLESS_ARCH for base image selection (handles armv7->arm mapping).
+FROM gcr.io/distroless/static-debian13:nonroot-${DISTROLESS_ARCH}
+# Base image sets USER to 65532:65532 (nonroot user).
+
+ARG ARCH="amd64"
+ARG OS="linux"
+
+LABEL org.opencontainers.image.authors="The Prometheus Authors"
+LABEL org.opencontainers.image.vendor="Prometheus"
+LABEL org.opencontainers.image.title="Prometheus"
+LABEL org.opencontainers.image.description="The Prometheus monitoring system and time series database"
+LABEL org.opencontainers.image.source="https://github.com/prometheus/prometheus"
+LABEL org.opencontainers.image.url="https://github.com/prometheus/prometheus"
+LABEL org.opencontainers.image.documentation="https://prometheus.io/docs"
+LABEL org.opencontainers.image.licenses="Apache License 2.0"
+LABEL io.prometheus.image.variant="distroless"
+
+COPY documentation/examples/prometheus.yml /etc/prometheus/prometheus.yml
+COPY LICENSE NOTICE npm_licenses.tar.bz2 /
+COPY .build/${OS}-${ARCH}/prometheus /bin/prometheus
+COPY .build/${OS}-${ARCH}/promtool /bin/promtool
+
+WORKDIR /prometheus
+EXPOSE 9090
+ENTRYPOINT [ "/bin/prometheus" ]
+CMD [ "--config.file=/etc/prometheus/prometheus.yml", \
+ "--storage.tsdb.path=/prometheus" ]
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index f23c7fbd63..ae61059af5 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -6,6 +6,7 @@ General maintainers:
* Bryan Boreham (bjboreham@gmail.com / @bboreham)
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
* Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie)
+* György Krajcsovits ( / @krajorama)
Maintainers for specific parts of the codebase:
* `cmd`
@@ -15,12 +16,10 @@ Maintainers for specific parts of the codebase:
* `stackit`: Jan-Otto Kröpke ( / @jkroepke)
* `documentation`
* `prometheus-mixin`: Matthias Loibl ( / @metalmatze)
-* `model/histogram` and other code related to native histograms: Björn Rabenstein ( / @beorn7),
-George Krajcsovits ( / @krajorama)
* `storage`
- * `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( / @npazosmendez), Alex Greenbank ( / @alexgreenbank)
+ * `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Alex Greenbank ( / @alexgreenbank)
* `otlptranslator`: Arthur Silva Sens ( / @ArthurSens), Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez)
-* `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez), George Krajcsovits ( / @krajorama)
+* `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez)
* `web`
* `ui`: Julius Volz ( / @juliusv)
* `module`: Augustin Husson ( / @nexucis)
diff --git a/Makefile b/Makefile
index 8bc4a3dcaa..ad4b90f020 100644
--- a/Makefile
+++ b/Makefile
@@ -12,7 +12,7 @@
# limitations under the License.
# Needs to be defined before including Makefile.common to auto-generate targets
-DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x
+DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le riscv64 s390x
UI_PATH = web/ui
UI_NODE_MODULES_PATH = $(UI_PATH)/node_modules
diff --git a/Makefile.common b/Makefile.common
index 7beae6e58f..b8c9b3844c 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -82,11 +82,32 @@ endif
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
-DOCKERFILE_PATH ?= ./Dockerfile
DOCKERBUILD_CONTEXT ?= ./
DOCKER_REPO ?= prom
+# Check if deprecated DOCKERFILE_PATH is set
+ifdef DOCKERFILE_PATH
+$(error DOCKERFILE_PATH is deprecated. Use DOCKERFILE_VARIANTS ?= $(DOCKERFILE_PATH) in the Makefile)
+endif
+
DOCKER_ARCHS ?= amd64
+DOCKERFILE_VARIANTS ?= Dockerfile $(wildcard Dockerfile.*)
+
+# Function to extract variant from Dockerfile label.
+# Returns the variant name from io.prometheus.image.variant label, or "default" if not found.
+define dockerfile_variant
+$(strip $(or $(shell sed -n 's/.*io\.prometheus\.image\.variant="\([^"]*\)".*/\1/p' $(1)),default))
+endef
+
+# Check for duplicate variant names (including default for Dockerfiles without labels).
+DOCKERFILE_VARIANT_NAMES := $(foreach df,$(DOCKERFILE_VARIANTS),$(call dockerfile_variant,$(df)))
+DOCKERFILE_VARIANT_NAMES_SORTED := $(sort $(DOCKERFILE_VARIANT_NAMES))
+ifneq ($(words $(DOCKERFILE_VARIANT_NAMES)),$(words $(DOCKERFILE_VARIANT_NAMES_SORTED)))
+$(error Duplicate variant names found. Each Dockerfile must have a unique io.prometheus.image.variant label, and only one can be without a label (default))
+endif
+
+# Build variant:dockerfile pairs for shell iteration.
+DOCKERFILE_VARIANTS_WITH_NAMES := $(foreach df,$(DOCKERFILE_VARIANTS),$(call dockerfile_variant,$(df)):$(df))
BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
@@ -226,28 +247,110 @@ common-docker-repo-name:
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
- docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
- -f $(DOCKERFILE_PATH) \
- --build-arg ARCH="$*" \
- --build-arg OS="linux" \
- $(DOCKERBUILD_CONTEXT)
+ @for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \
+ dockerfile=$${variant#*:}; \
+ variant_name=$${variant%%:*}; \
+ distroless_arch="$*"; \
+ if [ "$*" = "armv7" ]; then \
+ distroless_arch="arm"; \
+ fi; \
+ if [ "$$dockerfile" = "Dockerfile" ]; then \
+ echo "Building default variant ($$variant_name) for linux-$* using $$dockerfile"; \
+ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
+ -f $$dockerfile \
+ --build-arg ARCH="$*" \
+ --build-arg OS="linux" \
+ --build-arg DISTROLESS_ARCH="$$distroless_arch" \
+ $(DOCKERBUILD_CONTEXT); \
+ if [ "$$variant_name" != "default" ]; then \
+ echo "Tagging default variant with $$variant_name suffix"; \
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
+ "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \
+ fi; \
+ else \
+ echo "Building $$variant_name variant for linux-$* using $$dockerfile"; \
+ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" \
+ -f $$dockerfile \
+ --build-arg ARCH="$*" \
+ --build-arg OS="linux" \
+ --build-arg DISTROLESS_ARCH="$$distroless_arch" \
+ $(DOCKERBUILD_CONTEXT); \
+ fi; \
+ done
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
- docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
+ @for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \
+ dockerfile=$${variant#*:}; \
+ variant_name=$${variant%%:*}; \
+ if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
+ echo "Pushing $$variant_name variant for linux-$*"; \
+ docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \
+ fi; \
+ if [ "$$dockerfile" = "Dockerfile" ]; then \
+ echo "Pushing default variant ($$variant_name) for linux-$*"; \
+ docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"; \
+ fi; \
+ if [ "$(DOCKER_IMAGE_TAG)" = "latest" ]; then \
+ if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
+ echo "Pushing $$variant_name variant version tags for linux-$*"; \
+ docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \
+ fi; \
+ if [ "$$dockerfile" = "Dockerfile" ]; then \
+ echo "Pushing default variant version tag for linux-$*"; \
+ docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"; \
+ fi; \
+ fi; \
+ done
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
+ @for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \
+ dockerfile=$${variant#*:}; \
+ variant_name=$${variant%%:*}; \
+ if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
+ echo "Tagging $$variant_name variant for linux-$* as latest"; \
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest-$$variant_name"; \
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \
+ fi; \
+ if [ "$$dockerfile" = "Dockerfile" ]; then \
+ echo "Tagging default variant ($$variant_name) for linux-$* as latest"; \
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"; \
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"; \
+ fi; \
+ done
.PHONY: common-docker-manifest
common-docker-manifest:
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
+ @for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \
+ dockerfile=$${variant#*:}; \
+ variant_name=$${variant%%:*}; \
+ if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
+ echo "Creating manifest for $$variant_name variant"; \
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name); \
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \
+ fi; \
+ if [ "$$dockerfile" = "Dockerfile" ]; then \
+ echo "Creating default variant ($$variant_name) manifest"; \
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)); \
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"; \
+ fi; \
+ if [ "$(DOCKER_IMAGE_TAG)" = "latest" ]; then \
+ if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
+ echo "Creating manifest for $$variant_name variant version tag"; \
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name); \
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \
+ fi; \
+ if [ "$$dockerfile" = "Dockerfile" ]; then \
+ echo "Creating default variant version tag manifest"; \
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):v$(DOCKER_MAJOR_VERSION_TAG)); \
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)"; \
+ fi; \
+ fi; \
+ done
.PHONY: promu
promu: $(PROMU)
diff --git a/README.md b/README.md
index 7b04a51cee..030a827952 100644
--- a/README.md
+++ b/README.md
@@ -159,6 +159,15 @@ produce a fully working image when run locally.
## Using Prometheus as a Go Library
+Within the Prometheus project, repositories such as [prometheus/common](https://github.com/prometheus/common) and
+[prometheus/client-golang](https://github.com/prometheus/client-golang) are designed as re-usable libraries.
+
+The [prometheus/prometheus](https://github.com/prometheus/prometheus) repository builds a stand-alone program and is not
+designed for use as a library. We are aware that people do use parts as such,
+and we do not put any deliberate inconvenience in the way, but we want you to be
+aware that no care has been taken to make it work well as a library. For instance,
+you may encounter errors that only surface when used as a library.
+
### Remote Write
We are publishing our Remote Write protobuf independently at
diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go
index b06b6095b3..6bee6dd25d 100644
--- a/cmd/prometheus/main.go
+++ b/cmd/prometheus/main.go
@@ -219,6 +219,8 @@ type flagConfig struct {
promqlEnableDelayedNameRemoval bool
+ parserOpts parser.Options
+
promslogConfig promslog.Config
}
@@ -256,23 +258,36 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
c.enableConcurrentRuleEval = true
logger.Info("Experimental concurrent rule evaluation enabled.")
case "promql-experimental-functions":
- parser.EnableExperimentalFunctions = true
+ c.parserOpts.EnableExperimentalFunctions = true
logger.Info("Experimental PromQL functions enabled.")
case "promql-duration-expr":
- parser.ExperimentalDurationExpr = true
+ c.parserOpts.ExperimentalDurationExpr = true
logger.Info("Experimental duration expression parsing enabled.")
case "native-histograms":
logger.Warn("This option for --enable-feature is a no-op. To scrape native histograms, set the scrape_native_histograms scrape config setting to true.", "option", o)
case "ooo-native-histograms":
logger.Warn("This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o)
case "created-timestamp-zero-ingestion":
+ // NOTE(bwplotka): Once AppendableV1 is removed, there will be only the TSDB and agent flags.
c.scrape.EnableStartTimestampZeroIngestion = true
c.web.STZeroIngestionEnabled = true
+ c.tsdb.EnableSTAsZeroSample = true
c.agent.EnableSTAsZeroSample = true
+
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
+ // This is to widen the ST support surface.
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
- logger.Info("Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
+ logger.Info("Experimental start timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
+ case "st-storage":
+ // TODO(bwplotka): Implement ST Storage as per PROM-60 and document this hidden feature flag.
+ c.tsdb.EnableSTStorage = true
+ c.agent.EnableSTStorage = true
+
+ // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. This is to widen the ST support surface.
+ config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
+ config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
+ logger.Info("Experimental start timestamp storage enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
case "delayed-compaction":
c.tsdb.EnableDelayedCompaction = true
logger.Info("Experimental delayed compaction is enabled.")
@@ -280,8 +295,11 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
c.promqlEnableDelayedNameRemoval = true
logger.Info("Experimental PromQL delayed name removal enabled.")
case "promql-extended-range-selectors":
- parser.EnableExtendedRangeSelectors = true
+ c.parserOpts.EnableExtendedRangeSelectors = true
logger.Info("Experimental PromQL extended range selectors enabled.")
+ case "promql-binop-fill-modifiers":
+ c.parserOpts.EnableBinopFillModifiers = true
+ logger.Info("Experimental PromQL binary operator fill modifiers enabled.")
case "":
continue
case "old-ui":
@@ -581,7 +599,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
- a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io, promql-extended-range-selectors. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
+ a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io, promql-extended-range-selectors, promql-binop-fill-modifiers. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList)
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
@@ -617,6 +635,8 @@ func main() {
os.Exit(1)
}
+ promqlParser := parser.NewParser(cfg.parserOpts)
+
if agentMode && len(serverOnlyFlags) > 0 {
fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags)
os.Exit(3)
@@ -671,7 +691,7 @@ func main() {
}
// Parse rule files to verify they exist and contain valid rules.
- if err := rules.ParseFiles(cfgFile.RuleFiles, cfgFile.GlobalConfig.MetricNameValidationScheme); err != nil {
+ if err := rules.ParseFiles(cfgFile.RuleFiles, cfgFile.GlobalConfig.MetricNameValidationScheme, promqlParser); err != nil {
absPath, pathErr := filepath.Abs(cfg.configFile)
if pathErr != nil {
absPath = cfg.configFile
@@ -692,6 +712,7 @@ func main() {
}
if cfgFile.StorageConfig.TSDBConfig != nil {
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
+ cfg.tsdb.StaleSeriesCompactionThreshold = cfgFile.StorageConfig.TSDBConfig.StaleSeriesCompactionThreshold
if cfgFile.StorageConfig.TSDBConfig.Retention != nil {
if cfgFile.StorageConfig.TSDBConfig.Retention.Time > 0 {
cfg.tsdb.RetentionDuration = cfgFile.StorageConfig.TSDBConfig.Retention.Time
@@ -881,7 +902,7 @@ func main() {
&cfg.scrape,
logger.With("component", "scrape manager"),
logging.NewJSONFileLogger,
- fanoutStorage,
+ nil, fanoutStorage,
prometheus.DefaultRegisterer,
)
if err != nil {
@@ -913,6 +934,7 @@ func main() {
EnableDelayedNameRemoval: cfg.promqlEnableDelayedNameRemoval,
EnableTypeAndUnitLabels: cfg.scrape.EnableTypeAndUnitLabels,
FeatureRegistry: features.DefaultRegistry,
+ Parser: promqlParser,
}
queryEngine = promql.NewEngine(opts)
@@ -936,6 +958,7 @@ func main() {
return time.Duration(cfgFile.GlobalConfig.RuleQueryOffset)
},
FeatureRegistry: features.DefaultRegistry,
+ Parser: promqlParser,
})
}
@@ -955,6 +978,7 @@ func main() {
cfg.web.LookbackDelta = time.Duration(cfg.lookbackDelta)
cfg.web.IsAgent = agentMode
cfg.web.AppName = modeAppName
+ cfg.web.Parser = promqlParser
cfg.web.Version = &web.PrometheusVersion{
Version: version.Version,
@@ -1373,6 +1397,8 @@ func main() {
"WALSegmentSize", cfg.tsdb.WALSegmentSize,
"WALCompressionType", cfg.tsdb.WALCompressionType,
"BlockReloadInterval", cfg.tsdb.BlockReloadInterval,
+ "EnableSTAsZeroSample", cfg.tsdb.EnableSTAsZeroSample,
+ "EnableSTStorage", cfg.tsdb.EnableSTStorage,
)
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
@@ -1430,6 +1456,7 @@ func main() {
"MaxWALTime", cfg.agent.MaxWALTime,
"OutOfOrderTimeWindow", cfg.agent.OutOfOrderTimeWindow,
"EnableSTAsZeroSample", cfg.agent.EnableSTAsZeroSample,
+ "EnableSTStorage", cfg.tsdb.EnableSTStorage,
)
localStorage.Set(db, 0)
@@ -1581,7 +1608,7 @@ func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logg
logger.Error("Failed to apply configuration", "err", err)
failed = true
}
- timingsLogger = timingsLogger.With((rl.name), time.Since(rstart))
+ timingsLogger = timingsLogger.With(rl.name, time.Since(rstart))
}
if failed {
return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
@@ -1755,6 +1782,14 @@ func (s *readyStorage) Appender(ctx context.Context) storage.Appender {
return notReadyAppender{}
}
+// AppenderV2 implements the Storage interface.
+func (s *readyStorage) AppenderV2(ctx context.Context) storage.AppenderV2 {
+ if x := s.get(); x != nil {
+ return x.AppenderV2(ctx)
+ }
+ return notReadyAppenderV2{}
+}
+
type notReadyAppender struct{}
// SetOptions does nothing in this appender implementation.
@@ -1788,6 +1823,15 @@ func (notReadyAppender) Commit() error { return tsdb.ErrNotReady }
func (notReadyAppender) Rollback() error { return tsdb.ErrNotReady }
+type notReadyAppenderV2 struct{}
+
+func (notReadyAppenderV2) Append(storage.SeriesRef, labels.Labels, int64, int64, float64, *histogram.Histogram, *histogram.FloatHistogram, storage.AOptions) (storage.SeriesRef, error) {
+ return 0, tsdb.ErrNotReady
+}
+func (notReadyAppenderV2) Commit() error { return tsdb.ErrNotReady }
+
+func (notReadyAppenderV2) Rollback() error { return tsdb.ErrNotReady }
+
// Close implements the Storage interface.
func (s *readyStorage) Close() error {
if x := s.get(); x != nil {
@@ -1932,6 +1976,9 @@ type tsdbOptions struct {
UseUncachedIO bool
BlockCompactionExcludeFunc tsdb.BlockExcludeFilterFunc
BlockReloadInterval model.Duration
+ EnableSTAsZeroSample bool
+ EnableSTStorage bool
+ StaleSeriesCompactionThreshold float64
}
func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
@@ -1958,6 +2005,9 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
BlockCompactionExcludeFunc: opts.BlockCompactionExcludeFunc,
BlockReloadInterval: time.Duration(opts.BlockReloadInterval),
FeatureRegistry: features.DefaultRegistry,
+ EnableSTAsZeroSample: opts.EnableSTAsZeroSample,
+ EnableSTStorage: opts.EnableSTStorage,
+ StaleSeriesCompactionThreshold: opts.StaleSeriesCompactionThreshold,
}
}
@@ -1972,6 +2022,7 @@ type agentOptions struct {
NoLockfile bool
OutOfOrderTimeWindow int64 // TODO(bwplotka): Unused option, fix it or remove.
EnableSTAsZeroSample bool
+ EnableSTStorage bool
}
func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Options {
@@ -1988,6 +2039,7 @@ func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Option
NoLockfile: opts.NoLockfile,
OutOfOrderTimeWindow: outOfOrderTimeWindow,
EnableSTAsZeroSample: opts.EnableSTAsZeroSample,
+ EnableSTStorage: opts.EnableSTStorage,
}
}
diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go
index 6765bae900..38dfd3f2da 100644
--- a/cmd/prometheus/main_test.go
+++ b/cmd/prometheus/main_test.go
@@ -395,6 +395,7 @@ func TestTimeMetrics(t *testing.T) {
}
func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames ...string) map[string]float64 {
+ t.Helper()
f, err := reg.Gather()
require.NoError(t, err)
@@ -426,7 +427,7 @@ func TestAgentSuccessfulStartup(t *testing.T) {
go func() { done <- prom.Wait() }()
select {
case err := <-done:
- t.Logf("prometheus agent should be still running: %v", err)
+ t.Logf("prometheus agent exited early: %v", err)
actualExitStatus = prom.ProcessState.ExitCode()
case <-time.After(startupTime):
prom.Process.Kill()
@@ -571,12 +572,7 @@ func TestDocumentation(t *testing.T) {
var stdout bytes.Buffer
cmd.Stdout = &stdout
- if err := cmd.Run(); err != nil {
- var exitError *exec.ExitError
- if errors.As(err, &exitError) && exitError.ExitCode() != 0 {
- fmt.Println("Command failed with non-zero exit code")
- }
- }
+ require.NoError(t, cmd.Run(), "failed to generate CLI documentation via --write-documentation")
generatedContent := strings.ReplaceAll(stdout.String(), filepath.Base(promPath), strings.TrimSuffix(filepath.Base(promPath), ".test"))
@@ -753,7 +749,7 @@ global:
configFile := filepath.Join(tmpDir, "prometheus.yml")
port := testutil.RandomUnprivilegedPort(t)
- os.WriteFile(configFile, []byte(tc.config), 0o777)
+ require.NoError(t, os.WriteFile(configFile, []byte(tc.config), 0o777))
prom := prometheusCommandWithLogging(
t,
configFile,
@@ -801,7 +797,7 @@ global:
newConfig := `
runtime:
gogc: 99`
- os.WriteFile(configFile, []byte(newConfig), 0o777)
+ require.NoError(t, os.WriteFile(configFile, []byte(newConfig), 0o777))
reloadPrometheusConfig(t, reloadURL)
ensureGOGCValue(99.0)
})
@@ -834,7 +830,7 @@ scrape_configs:
static_configs:
- targets: ['localhost:%d']
`, port, port)
- os.WriteFile(configFile, []byte(config), 0o777)
+ require.NoError(t, os.WriteFile(configFile, []byte(config), 0o777))
prom := prometheusCommandWithLogging(
t,
@@ -995,7 +991,7 @@ func TestRemoteWrite_ReshardingWithoutDeadlock(t *testing.T) {
config := fmt.Sprintf(`
global:
# Using a smaller interval may cause the scrape to time out.
- scrape_interval: 1s
+ scrape_interval: 1s
scrape_configs:
- job_name: 'self'
static_configs:
diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go
index 5e5a9ac3b7..e410f836a9 100644
--- a/cmd/prometheus/query_log_test.go
+++ b/cmd/prometheus/query_log_test.go
@@ -334,7 +334,8 @@ func (p *queryLogTest) run(t *testing.T) {
p.query(t)
- ql := readQueryLog(t, queryLogFile.Name())
+ // Wait for query log entry to be written (avoid race with file I/O).
+ ql := waitForQueryLog(t, queryLogFile.Name(), 1)
qc := len(ql)
if p.exactQueryCount() {
require.Equal(t, 1, qc)
@@ -361,7 +362,8 @@ func (p *queryLogTest) run(t *testing.T) {
p.query(t)
qc++
- ql = readQueryLog(t, queryLogFile.Name())
+ // Wait for query log entry to be written (avoid race with file I/O).
+ ql = waitForQueryLog(t, queryLogFile.Name(), qc)
if p.exactQueryCount() {
require.Len(t, ql, qc)
} else {
@@ -392,7 +394,8 @@ func (p *queryLogTest) run(t *testing.T) {
qc++
- ql = readQueryLog(t, newFile.Name())
+ // Wait for query log entry to be written (avoid race with file I/O).
+ ql = waitForQueryLog(t, newFile.Name(), qc)
if p.exactQueryCount() {
require.Len(t, ql, qc)
} else {
@@ -404,7 +407,8 @@ func (p *queryLogTest) run(t *testing.T) {
p.query(t)
- ql = readQueryLog(t, queryLogFile.Name())
+ // Wait for query log entry to be written (avoid race with file I/O).
+ ql = waitForQueryLog(t, queryLogFile.Name(), 1)
qc = len(ql)
if p.exactQueryCount() {
require.Equal(t, 1, qc)
@@ -446,6 +450,18 @@ func readQueryLog(t *testing.T, path string) []queryLogLine {
return ql
}
+// waitForQueryLog waits for the query log to contain at least minEntries entries,
+// polling at regular intervals until the timeout is reached.
+func waitForQueryLog(t *testing.T, path string, minEntries int) []queryLogLine {
+ t.Helper()
+ var ql []queryLogLine
+ require.Eventually(t, func() bool {
+ ql = readQueryLog(t, path)
+ return len(ql) >= minEntries
+ }, 5*time.Second, 100*time.Millisecond, "timed out waiting for query log to have at least %d entries, got %d", minEntries, len(ql))
+ return ql
+}
+
func TestQueryLog(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
diff --git a/cmd/prometheus/testdata/features.json b/cmd/prometheus/testdata/features.json
index 145bb04d77..c39f60ab33 100644
--- a/cmd/prometheus/testdata/features.json
+++ b/cmd/prometheus/testdata/features.json
@@ -4,6 +4,8 @@
"exclude_alerts": true,
"label_values_match": true,
"lifecycle": false,
+ "openapi_3.1": true,
+ "openapi_3.2": true,
"otlp_write_receiver": false,
"query_stats": true,
"query_warnings": true,
@@ -28,6 +30,9 @@
"by": true,
"delayed_name_removal": false,
"duration_expr": false,
+ "fill": false,
+ "fill_left": false,
+ "fill_right": false,
"group_left": true,
"group_right": true,
"ignoring": true,
@@ -191,6 +196,7 @@
"lightsail": true,
"linode": true,
"marathon": true,
+ "msk": true,
"nerve": true,
"nomad": true,
"openstack": true,
diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go
index 16cc40233a..183b918ba0 100644
--- a/cmd/promtool/main.go
+++ b/cmd/promtool/main.go
@@ -61,7 +61,10 @@ import (
"github.com/prometheus/prometheus/util/documentcli"
)
-var promqlEnableDelayedNameRemoval = false
+var (
+ promqlEnableDelayedNameRemoval = false
+ promtoolParserOpts parser.Options
+)
func init() {
// This can be removed when the legacy global mode is fully deprecated.
@@ -314,7 +317,7 @@ func main() {
promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String()
promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String()
- featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Valid options: promql-experimental-functions, promql-delayed-name-removal. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details").Default("").Strings()
+ featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Valid options: promql-experimental-functions, promql-delayed-name-removal, promql-duration-expr, promql-extended-range-selectors. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details").Default("").Strings()
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
@@ -348,9 +351,13 @@ func main() {
for o := range strings.SplitSeq(f, ",") {
switch o {
case "promql-experimental-functions":
- parser.EnableExperimentalFunctions = true
+ promtoolParserOpts.EnableExperimentalFunctions = true
case "promql-delayed-name-removal":
promqlEnableDelayedNameRemoval = true
+ case "promql-duration-expr":
+ promtoolParserOpts.ExperimentalDurationExpr = true
+ case "promql-extended-range-selectors":
+ promtoolParserOpts.EnableExtendedRangeSelectors = true
case "":
continue
default:
@@ -358,13 +365,14 @@ func main() {
}
}
}
+ promtoolParser := parser.NewParser(promtoolParserOpts)
switch parsedCmd {
case sdCheckCmd.FullCommand():
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer))
case checkConfigCmd.FullCommand():
- os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newConfigLintConfig(*checkConfigLint, *checkConfigLintFatal, *checkConfigIgnoreUnknownFields, model.UTF8Validation, model.Duration(*checkLookbackDelta)), *configFiles...))
+ os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newConfigLintConfig(*checkConfigLint, *checkConfigLintFatal, *checkConfigIgnoreUnknownFields, model.UTF8Validation, model.Duration(*checkLookbackDelta)), promtoolParser, *configFiles...))
case checkServerHealthCmd.FullCommand():
os.Exit(checkErr(CheckServerStatus(serverURL, checkHealth, httpRoundTripper)))
@@ -376,7 +384,7 @@ func main() {
os.Exit(CheckWebConfig(*webConfigFiles...))
case checkRulesCmd.FullCommand():
- os.Exit(CheckRules(newRulesLintConfig(*checkRulesLint, *checkRulesLintFatal, *checkRulesIgnoreUnknownFields, model.UTF8Validation), *ruleFiles...))
+ os.Exit(CheckRules(newRulesLintConfig(*checkRulesLint, *checkRulesLintFatal, *checkRulesIgnoreUnknownFields, model.UTF8Validation), promtoolParser, *ruleFiles...))
case checkMetricsCmd.FullCommand():
os.Exit(CheckMetrics(*checkMetricsExtended, *checkMetricsLint))
@@ -416,6 +424,7 @@ func main() {
EnableNegativeOffset: true,
EnableDelayedNameRemoval: promqlEnableDelayedNameRemoval,
},
+ promtoolParser,
*testRulesRun,
*testRulesDiff,
*testRulesDebug,
@@ -427,7 +436,7 @@ func main() {
os.Exit(checkErr(benchmarkWrite(*benchWriteOutPath, *benchSamplesFile, *benchWriteNumMetrics, *benchWriteNumScrapes)))
case tsdbAnalyzeCmd.FullCommand():
- os.Exit(checkErr(analyzeBlock(ctx, *analyzePath, *analyzeBlockID, *analyzeLimit, *analyzeRunExtended, *analyzeMatchers)))
+ os.Exit(checkErr(analyzeBlock(ctx, *analyzePath, *analyzeBlockID, *analyzeLimit, *analyzeRunExtended, *analyzeMatchers, promtoolParser)))
case tsdbListCmd.FullCommand():
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
@@ -437,10 +446,10 @@ func main() {
if *dumpFormat == "seriesjson" {
format = formatSeriesSetLabelsToJSON
}
- os.Exit(checkErr(dumpTSDBData(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, format)))
+ os.Exit(checkErr(dumpTSDBData(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, format, promtoolParser)))
case tsdbDumpOpenMetricsCmd.FullCommand():
- os.Exit(checkErr(dumpTSDBData(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
+ os.Exit(checkErr(dumpTSDBData(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics, promtoolParser)))
// TODO(aSquare14): Work on adding support for custom block size.
case openMetricsImportCmd.FullCommand():
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration, *openMetricsLabels))
@@ -456,15 +465,15 @@ func main() {
case promQLFormatCmd.FullCommand():
checkExperimental(*experimental)
- os.Exit(checkErr(formatPromQL(*promQLFormatQuery)))
+ os.Exit(checkErr(formatPromQL(*promQLFormatQuery, promtoolParser)))
case promQLLabelsSetCmd.FullCommand():
checkExperimental(*experimental)
- os.Exit(checkErr(labelsSetPromQL(*promQLLabelsSetQuery, *promQLLabelsSetType, *promQLLabelsSetName, *promQLLabelsSetValue)))
+ os.Exit(checkErr(labelsSetPromQL(*promQLLabelsSetQuery, *promQLLabelsSetType, *promQLLabelsSetName, *promQLLabelsSetValue, promtoolParser)))
case promQLLabelsDeleteCmd.FullCommand():
checkExperimental(*experimental)
- os.Exit(checkErr(labelsDeletePromQL(*promQLLabelsDeleteQuery, *promQLLabelsDeleteName)))
+ os.Exit(checkErr(labelsDeletePromQL(*promQLLabelsDeleteQuery, *promQLLabelsDeleteName, promtoolParser)))
}
}
@@ -589,7 +598,7 @@ func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper ht
}
// CheckConfig validates configuration files.
-func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings configLintConfig, files ...string) int {
+func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings configLintConfig, p parser.Parser, files ...string) int {
failed := false
hasErrors := false
@@ -610,7 +619,7 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings configLintConfig,
if !checkSyntaxOnly {
scrapeConfigsFailed := lintScrapeConfigs(scrapeConfigs, lintSettings)
failed = failed || scrapeConfigsFailed
- rulesFailed, rulesHaveErrors := checkRules(ruleFiles, lintSettings.rulesLintConfig)
+ rulesFailed, rulesHaveErrors := checkRules(ruleFiles, lintSettings.rulesLintConfig, p)
failed = failed || rulesFailed
hasErrors = hasErrors || rulesHaveErrors
}
@@ -837,13 +846,13 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) {
}
// CheckRules validates rule files.
-func CheckRules(ls rulesLintConfig, files ...string) int {
+func CheckRules(ls rulesLintConfig, p parser.Parser, files ...string) int {
failed := false
hasErrors := false
if len(files) == 0 {
- failed, hasErrors = checkRulesFromStdin(ls)
+ failed, hasErrors = checkRulesFromStdin(ls, p)
} else {
- failed, hasErrors = checkRules(files, ls)
+ failed, hasErrors = checkRules(files, ls, p)
}
if failed && hasErrors {
@@ -857,7 +866,7 @@ func CheckRules(ls rulesLintConfig, files ...string) int {
}
// checkRulesFromStdin validates rule from stdin.
-func checkRulesFromStdin(ls rulesLintConfig) (bool, bool) {
+func checkRulesFromStdin(ls rulesLintConfig, p parser.Parser) (bool, bool) {
failed := false
hasErrors := false
fmt.Println("Checking standard input")
@@ -866,7 +875,7 @@ func checkRulesFromStdin(ls rulesLintConfig) (bool, bool) {
fmt.Fprintln(os.Stderr, " FAILED:", err)
return true, true
}
- rgs, errs := rulefmt.Parse(data, ls.ignoreUnknownFields, ls.nameValidationScheme)
+ rgs, errs := rulefmt.Parse(data, ls.ignoreUnknownFields, ls.nameValidationScheme, p)
if errs != nil {
failed = true
fmt.Fprintln(os.Stderr, " FAILED:")
@@ -895,12 +904,12 @@ func checkRulesFromStdin(ls rulesLintConfig) (bool, bool) {
}
// checkRules validates rule files.
-func checkRules(files []string, ls rulesLintConfig) (bool, bool) {
+func checkRules(files []string, ls rulesLintConfig, p parser.Parser) (bool, bool) {
failed := false
hasErrors := false
for _, f := range files {
fmt.Println("Checking", f)
- rgs, errs := rulefmt.ParseFile(f, ls.ignoreUnknownFields, ls.nameValidationScheme)
+ rgs, errs := rulefmt.ParseFile(f, ls.ignoreUnknownFields, ls.nameValidationScheme, p)
if errs != nil {
failed = true
fmt.Fprintln(os.Stderr, " FAILED:")
@@ -1341,8 +1350,8 @@ func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *c
return nil
}
-func formatPromQL(query string) error {
- expr, err := parser.ParseExpr(query)
+func formatPromQL(query string, p parser.Parser) error {
+ expr, err := p.ParseExpr(query)
if err != nil {
return err
}
@@ -1351,8 +1360,8 @@ func formatPromQL(query string) error {
return nil
}
-func labelsSetPromQL(query, labelMatchType, name, value string) error {
- expr, err := parser.ParseExpr(query)
+func labelsSetPromQL(query, labelMatchType, name, value string, p parser.Parser) error {
+ expr, err := p.ParseExpr(query)
if err != nil {
return err
}
@@ -1396,8 +1405,8 @@ func labelsSetPromQL(query, labelMatchType, name, value string) error {
return nil
}
-func labelsDeletePromQL(query, name string) error {
- expr, err := parser.ParseExpr(query)
+func labelsDeletePromQL(query, name string, p parser.Parser) error {
+ expr, err := p.ParseExpr(query)
if err != nil {
return err
}
diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go
index 4f4ca3de71..297dd35d70 100644
--- a/cmd/promtool/main_test.go
+++ b/cmd/promtool/main_test.go
@@ -37,6 +37,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
+ "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
)
@@ -187,7 +188,7 @@ func TestCheckDuplicates(t *testing.T) {
c := test
t.Run(c.name, func(t *testing.T) {
t.Parallel()
- rgs, err := rulefmt.ParseFile(c.ruleFile, false, model.UTF8Validation)
+ rgs, err := rulefmt.ParseFile(c.ruleFile, false, model.UTF8Validation, parser.NewParser(parser.Options{}))
require.Empty(t, err)
dups := checkDuplicates(rgs.Groups)
require.Equal(t, c.expectedDups, dups)
@@ -196,7 +197,7 @@ func TestCheckDuplicates(t *testing.T) {
}
func BenchmarkCheckDuplicates(b *testing.B) {
- rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml", false, model.UTF8Validation)
+ rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml", false, model.UTF8Validation, parser.NewParser(parser.Options{}))
require.Empty(b, err)
for b.Loop() {
@@ -602,7 +603,7 @@ func TestCheckRules(t *testing.T) {
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
os.Stdin = r
- exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false, model.UTF8Validation))
+ exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false, model.UTF8Validation), parser.NewParser(parser.Options{}))
require.Equal(t, successExitCode, exitCode)
})
@@ -624,7 +625,7 @@ func TestCheckRules(t *testing.T) {
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
os.Stdin = r
- exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false, model.UTF8Validation))
+ exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false, model.UTF8Validation), parser.NewParser(parser.Options{}))
require.Equal(t, failureExitCode, exitCode)
})
@@ -646,7 +647,7 @@ func TestCheckRules(t *testing.T) {
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
os.Stdin = r
- exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false, model.UTF8Validation))
+ exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false, model.UTF8Validation), parser.NewParser(parser.Options{}))
require.Equal(t, lintErrExitCode, exitCode)
})
}
@@ -655,7 +656,7 @@ func TestCheckRulesWithFeatureFlag(t *testing.T) {
// As opposed to TestCheckRules calling CheckRules directly we run promtool
// so the feature flag parsing can be tested.
- args := []string{"-test.main", "--enable-feature=promql-experimental-functions", "check", "rules", "testdata/features.yml"}
+ args := []string{"-test.main", "--enable-feature=promql-experimental-functions", "--enable-feature=promql-duration-expr", "--enable-feature=promql-extended-range-selectors", "check", "rules", "testdata/features.yml"}
tool := exec.Command(promtoolPath, args...)
err := tool.Run()
require.NoError(t, err)
@@ -664,19 +665,19 @@ func TestCheckRulesWithFeatureFlag(t *testing.T) {
func TestCheckRulesWithRuleFiles(t *testing.T) {
t.Run("rules-good", func(t *testing.T) {
t.Parallel()
- exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false, model.UTF8Validation), "./testdata/rules.yml")
+ exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false, model.UTF8Validation), parser.NewParser(parser.Options{}), "./testdata/rules.yml")
require.Equal(t, successExitCode, exitCode)
})
t.Run("rules-bad", func(t *testing.T) {
t.Parallel()
- exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false, model.UTF8Validation), "./testdata/rules-bad.yml")
+ exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false, model.UTF8Validation), parser.NewParser(parser.Options{}), "./testdata/rules-bad.yml")
require.Equal(t, failureExitCode, exitCode)
})
t.Run("rules-lint-fatal", func(t *testing.T) {
t.Parallel()
- exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false, model.UTF8Validation), "./testdata/prometheus-rules.lint.yml")
+ exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false, model.UTF8Validation), parser.NewParser(parser.Options{}), "./testdata/prometheus-rules.lint.yml")
require.Equal(t, lintErrExitCode, exitCode)
})
}
@@ -705,20 +706,21 @@ func TestCheckScrapeConfigs(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
// Non-fatal linting.
- code := CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, false, false, model.UTF8Validation, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
+ p := parser.NewParser(parser.Options{})
+ code := CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, false, false, model.UTF8Validation, tc.lookbackDelta), p, "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
require.Equal(t, successExitCode, code, "Non-fatal linting should return success")
// Fatal linting.
- code = CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, model.UTF8Validation, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
+ code = CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, model.UTF8Validation, tc.lookbackDelta), p, "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
if tc.expectError {
require.Equal(t, lintErrExitCode, code, "Fatal linting should return error")
} else {
require.Equal(t, successExitCode, code, "Fatal linting should return success when there are no problems")
}
// Check syntax only, no linting.
- code = CheckConfig(false, true, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, model.UTF8Validation, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
+ code = CheckConfig(false, true, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, model.UTF8Validation, tc.lookbackDelta), p, "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
require.Equal(t, successExitCode, code, "Fatal linting should return success when checking syntax only")
// Lint option "none" should disable linting.
- code = CheckConfig(false, false, newConfigLintConfig(lintOptionNone+","+lintOptionTooLongScrapeInterval, true, false, model.UTF8Validation, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
+ code = CheckConfig(false, false, newConfigLintConfig(lintOptionNone+","+lintOptionTooLongScrapeInterval, true, false, model.UTF8Validation, tc.lookbackDelta), p, "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
require.Equal(t, successExitCode, code, `Fatal linting should return success when lint option "none" is specified`)
})
}
@@ -734,7 +736,6 @@ func TestTSDBDumpCommand(t *testing.T) {
load 1m
metric{foo="bar"} 1 2 3
`)
- t.Cleanup(func() { storage.Close() })
for _, c := range []struct {
name string
diff --git a/cmd/promtool/testdata/features.yml b/cmd/promtool/testdata/features.yml
index 769f8362bf..946e07d0d7 100644
--- a/cmd/promtool/testdata/features.yml
+++ b/cmd/promtool/testdata/features.yml
@@ -1,6 +1,10 @@
groups:
- name: features
rules:
- - record: x
- # We don't expect anything from this, just want to check the function parses.
+ # We don't expect anything from these, just want to check the syntax parses.
+ - record: promql-experimental-functions
expr: sort_by_label(up, "instance")
+ - record: promql-duration-expr
+ expr: rate(up[1m * 2])
+ - record: promql-extended-range-selectors
+ expr: rate(up[1m] anchored)
diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go
index d0016ec0aa..1aaf87bc42 100644
--- a/cmd/promtool/tsdb.go
+++ b/cmd/promtool/tsdb.go
@@ -408,13 +408,13 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
return db, b, nil
}
-func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExtended bool, matchers string) error {
+func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExtended bool, matchers string, p parser.Parser) error {
var (
selectors []*labels.Matcher
err error
)
if len(matchers) > 0 {
- selectors, err = parser.ParseMetricSelector(matchers)
+ selectors, err = p.ParseMetricSelector(matchers)
if err != nil {
return err
}
@@ -478,24 +478,24 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten
labelpairsCount := map[string]uint64{}
entries := 0
var (
- p index.Postings
- refs []storage.SeriesRef
+ postings index.Postings
+ refs []storage.SeriesRef
)
if len(matchers) > 0 {
- p, err = tsdb.PostingsForMatchers(ctx, ir, selectors...)
+ postings, err = tsdb.PostingsForMatchers(ctx, ir, selectors...)
if err != nil {
return err
}
// Expand refs first and cache in memory.
// So later we don't have to expand again.
- refs, err = index.ExpandPostings(p)
+ refs, err = index.ExpandPostings(postings)
if err != nil {
return err
}
fmt.Printf("Matched series: %d\n", len(refs))
- p = index.NewListPostings(refs)
+ postings = index.NewListPostings(refs)
} else {
- p, err = ir.Postings(ctx, "", "") // The special all key.
+ postings, err = ir.Postings(ctx, "", "") // The special all key.
if err != nil {
return err
}
@@ -503,8 +503,8 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten
chks := []chunks.Meta{}
builder := labels.ScratchBuilder{}
- for p.Next() {
- if err = ir.Series(p.At(), &builder, &chks); err != nil {
+ for postings.Next() {
+ if err = ir.Series(postings.At(), &builder, &chks); err != nil {
return err
}
// Amount of the block time range not covered by this series.
@@ -517,8 +517,8 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten
entries++
})
}
- if p.Err() != nil {
- return p.Err()
+ if postings.Err() != nil {
+ return postings.Err()
}
fmt.Printf("Postings (unique label pairs): %d\n", len(labelpairsUncovered))
fmt.Printf("Postings entries (total label pairs): %d\n", entries)
@@ -706,7 +706,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
type SeriesSetFormatter func(series storage.SeriesSet) error
-func dumpTSDBData(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
+func dumpTSDBData(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter, p parser.Parser) (err error) {
db, err := tsdb.OpenDBReadOnly(dbDir, sandboxDirRoot, nil)
if err != nil {
return err
@@ -720,7 +720,7 @@ func dumpTSDBData(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt
}
defer q.Close()
- matcherSets, err := parser.ParseMetricSelectors(match)
+ matcherSets, err := p.ParseMetricSelectors(match)
if err != nil {
return err
}
diff --git a/cmd/promtool/tsdb_test.go b/cmd/promtool/tsdb_test.go
index 3a2a5aff72..86d7c67d77 100644
--- a/cmd/promtool/tsdb_test.go
+++ b/cmd/promtool/tsdb_test.go
@@ -27,6 +27,7 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/tsdb"
)
@@ -71,6 +72,7 @@ func getDumpedSamples(t *testing.T, databasePath, sandboxDirRoot string, mint, m
maxt,
match,
formatter,
+ parser.NewParser(parser.Options{}),
)
require.NoError(t, err)
@@ -97,7 +99,6 @@ func TestTSDBDump(t *testing.T) {
heavy_metric{foo="bar"} 5 4 3 2 1
heavy_metric{foo="foo"} 5 4 3 2 1
`)
- t.Cleanup(func() { storage.Close() })
tests := []struct {
name string
@@ -196,7 +197,6 @@ func TestTSDBDumpOpenMetrics(t *testing.T) {
my_counter{foo="bar", baz="abc"} 1 2 3 4 5
my_gauge{bar="foo", abc="baz"} 9 8 0 4 7
`)
- t.Cleanup(func() { storage.Close() })
tests := []struct {
name string
diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go
index 105e626eba..c9278d8a46 100644
--- a/cmd/promtool/unittest.go
+++ b/cmd/promtool/unittest.go
@@ -47,11 +47,11 @@ import (
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
// More info about the file format can be found in the docs.
-func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug, ignoreUnknownFields bool, files ...string) int {
- return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, debug, ignoreUnknownFields, files...)
+func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, p parser.Parser, runStrings []string, diffFlag, debug, ignoreUnknownFields bool, files ...string) int {
+ return RulesUnitTestResult(io.Discard, queryOpts, p, runStrings, diffFlag, debug, ignoreUnknownFields, files...)
}
-func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug, ignoreUnknownFields bool, files ...string) int {
+func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, p parser.Parser, runStrings []string, diffFlag, debug, ignoreUnknownFields bool, files ...string) int {
failed := false
junit := &junitxml.JUnitXML{}
@@ -61,7 +61,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts,
}
for _, f := range files {
- if errs := ruleUnitTest(f, queryOpts, run, diffFlag, debug, ignoreUnknownFields, junit.Suite(f)); errs != nil {
+ if errs := ruleUnitTest(f, queryOpts, p, run, diffFlag, debug, ignoreUnknownFields, junit.Suite(f)); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
@@ -83,7 +83,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts,
return successExitCode
}
-func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag, debug, ignoreUnknownFields bool, ts *junitxml.TestSuite) []error {
+func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, p parser.Parser, run *regexp.Regexp, diffFlag, debug, ignoreUnknownFields bool, ts *junitxml.TestSuite) []error {
b, err := os.ReadFile(filename)
if err != nil {
ts.Abort(err)
@@ -132,6 +132,7 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg
if t.Interval == 0 {
t.Interval = unitTestInp.EvaluationInterval
}
+ t.parser = p
ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, ignoreUnknownFields, unitTestInp.FuzzyCompare, unitTestInp.RuleFiles...)
if ers != nil {
for _, e := range ers {
@@ -219,6 +220,8 @@ type testGroup struct {
ExternalURL string `yaml:"external_url,omitempty"`
TestGroupName string `yaml:"name,omitempty"`
StartTimestamp testStartTimestamp `yaml:"start_timestamp,omitempty"`
+
+ parser parser.Parser `yaml:"-"`
}
// test performs the unit tests.
@@ -252,6 +255,7 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
Context: context.Background(),
NotifyFunc: func(context.Context, string, ...*rules.Alert) {},
Logger: promslog.NewNopLogger(),
+ Parser: tg.parser,
}
m := rules.NewManager(opts)
groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ignoreUnknownFields, ruleFiles...)
@@ -482,10 +486,10 @@ Outer:
var expSamples []parsedSample
for _, s := range testCase.ExpSamples {
- lb, err := parser.ParseMetric(s.Labels)
+ lb, err := tg.parser.ParseMetric(s.Labels)
var hist *histogram.FloatHistogram
if err == nil && s.Histogram != "" {
- _, values, parseErr := parser.ParseSeriesDesc("{} " + s.Histogram)
+ _, values, parseErr := tg.parser.ParseSeriesDesc("{} " + s.Histogram)
switch {
case parseErr != nil:
err = parseErr
diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go
index 32886fc4df..ce317e5e41 100644
--- a/cmd/promtool/unittest_test.go
+++ b/cmd/promtool/unittest_test.go
@@ -21,6 +21,7 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/util/junitxml"
)
@@ -153,7 +154,7 @@ func TestRulesUnitTest(t *testing.T) {
}
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
- if got := RulesUnitTest(tt.queryOpts, nil, false, false, false, tt.args.files...); got != tt.want {
+ if got := RulesUnitTest(tt.queryOpts, parser.NewParser(parser.Options{}), nil, false, false, false, tt.args.files...); got != tt.want {
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
}
})
@@ -161,7 +162,7 @@ func TestRulesUnitTest(t *testing.T) {
t.Run("Junit xml output ", func(t *testing.T) {
t.Parallel()
var buf bytes.Buffer
- if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, false, reuseFiles...); got != 1 {
+ if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, parser.NewParser(parser.Options{}), nil, false, false, false, reuseFiles...); got != 1 {
t.Errorf("RulesUnitTestResults() = %v, want 1", got)
}
var test junitxml.JUnitXML
@@ -277,7 +278,7 @@ func TestRulesUnitTestRun(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
- got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.ignoreUnknownFields, tt.args.files...)
+ got := RulesUnitTest(tt.queryOpts, parser.NewParser(parser.Options{}), tt.args.run, false, false, tt.ignoreUnknownFields, tt.args.files...)
require.Equal(t, tt.want, got)
})
}
diff --git a/config/config.go b/config/config.go
index 0b9b059ab2..d721d7fb86 100644
--- a/config/config.go
+++ b/config/config.go
@@ -1107,6 +1107,10 @@ type TSDBConfig struct {
// This should not be used directly and must be converted into OutOfOrderTimeWindow.
OutOfOrderTimeWindowFlag model.Duration `yaml:"out_of_order_time_window,omitempty"`
+ // StaleSeriesCompactionThreshold is a number between 0.0-1.0 indicating the % of stale series in
+ // the in-memory Head block. If the % of stale series crosses this threshold, stale series compaction is run immediately.
+ StaleSeriesCompactionThreshold float64 `yaml:"stale_series_compaction_threshold,omitempty"`
+
Retention *TSDBRetentionConfig `yaml:"retention,omitempty"`
}
diff --git a/config/config_test.go b/config/config_test.go
index 08aa0b4f06..968b563e1e 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -1733,8 +1733,9 @@ var expectedConf = &Config{
},
StorageConfig: StorageConfig{
TSDBConfig: &TSDBConfig{
- OutOfOrderTimeWindow: 30 * time.Minute.Milliseconds(),
- OutOfOrderTimeWindowFlag: model.Duration(30 * time.Minute),
+ OutOfOrderTimeWindow: 30 * time.Minute.Milliseconds(),
+ OutOfOrderTimeWindowFlag: model.Duration(30 * time.Minute),
+ StaleSeriesCompactionThreshold: 0.5,
Retention: &TSDBRetentionConfig{
Time: model.Duration(24 * time.Hour),
Size: 1 * units.GiB,
diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml
index 7aa53b3b74..96bf9e2b33 100644
--- a/config/testdata/conf.good.yml
+++ b/config/testdata/conf.good.yml
@@ -453,6 +453,7 @@ alerting:
storage:
tsdb:
out_of_order_time_window: 30m
+ stale_series_compaction_threshold: 0.5
retention:
time: 1d
size: 1GB
diff --git a/discovery/aws/aws.go b/discovery/aws/aws.go
index 1ac97b3c9e..69b3b41c06 100644
--- a/discovery/aws/aws.go
+++ b/discovery/aws/aws.go
@@ -14,10 +14,13 @@
package aws
import (
+ "context"
"errors"
"fmt"
"time"
+ awsConfig "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
@@ -43,6 +46,7 @@ const (
RoleEC2 Role = "ec2"
RoleECS Role = "ecs"
RoleLightsail Role = "lightsail"
+ RoleMSK Role = "msk"
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@@ -51,7 +55,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(any) error) error {
return err
}
switch *c {
- case RoleEC2, RoleECS, RoleLightsail:
+ case RoleEC2, RoleECS, RoleLightsail, RoleMSK:
return nil
default:
return fmt.Errorf("unknown AWS SD role %q", *c)
@@ -78,13 +82,14 @@ type SDConfig struct {
// ec2 specific
Filters []*EC2Filter `yaml:"filters,omitempty"`
- // ecs specific
+ // ecs, msk specific
Clusters []string `yaml:"clusters,omitempty"`
// Embedded sub-configs (internal use only, not serialized)
*EC2SDConfig `yaml:"-"`
*ECSSDConfig `yaml:"-"`
*LightsailSDConfig `yaml:"-"`
+ *MSKSDConfig `yaml:"-"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface for SDConfig.
@@ -98,15 +103,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
}
*c = SDConfig(aux)
+ var err error
+ c.Region, err = loadRegion(context.Background(), c.Region)
+ if err != nil {
+ return fmt.Errorf("could not determine AWS region: %w", err)
+ }
+
switch c.Role {
case RoleEC2:
if c.EC2SDConfig == nil {
- c.EC2SDConfig = &DefaultEC2SDConfig
+ ec2Config := DefaultEC2SDConfig
+ c.EC2SDConfig = &ec2Config
}
c.EC2SDConfig.HTTPClientConfig = c.HTTPClientConfig
- if c.Region != "" {
- c.EC2SDConfig.Region = c.Region
- }
+ c.EC2SDConfig.Region = c.Region
if c.Endpoint != "" {
c.EC2SDConfig.Endpoint = c.Endpoint
}
@@ -133,12 +143,11 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
}
case RoleECS:
if c.ECSSDConfig == nil {
- c.ECSSDConfig = &DefaultECSSDConfig
+ ecsConfig := DefaultECSSDConfig
+ c.ECSSDConfig = &ecsConfig
}
c.ECSSDConfig.HTTPClientConfig = c.HTTPClientConfig
- if c.Region != "" {
- c.ECSSDConfig.Region = c.Region
- }
+ c.ECSSDConfig.Region = c.Region
if c.Endpoint != "" {
c.ECSSDConfig.Endpoint = c.Endpoint
}
@@ -165,12 +174,11 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
}
case RoleLightsail:
if c.LightsailSDConfig == nil {
- c.LightsailSDConfig = &DefaultLightsailSDConfig
+ lightsailConfig := DefaultLightsailSDConfig
+ c.LightsailSDConfig = &lightsailConfig
}
c.LightsailSDConfig.HTTPClientConfig = c.HTTPClientConfig
- if c.Region != "" {
- c.LightsailSDConfig.Region = c.Region
- }
+ c.LightsailSDConfig.Region = c.Region
if c.Endpoint != "" {
c.LightsailSDConfig.Endpoint = c.Endpoint
}
@@ -192,6 +200,37 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
if c.RefreshInterval != 0 {
c.LightsailSDConfig.RefreshInterval = c.RefreshInterval
}
+ case RoleMSK:
+ if c.MSKSDConfig == nil {
+ mskConfig := DefaultMSKSDConfig
+ c.MSKSDConfig = &mskConfig
+ }
+ c.MSKSDConfig.HTTPClientConfig = c.HTTPClientConfig
+ c.MSKSDConfig.Region = c.Region
+ if c.Endpoint != "" {
+ c.MSKSDConfig.Endpoint = c.Endpoint
+ }
+ if c.AccessKey != "" {
+ c.MSKSDConfig.AccessKey = c.AccessKey
+ }
+ if c.SecretKey != "" {
+ c.MSKSDConfig.SecretKey = c.SecretKey
+ }
+ if c.Profile != "" {
+ c.MSKSDConfig.Profile = c.Profile
+ }
+ if c.RoleARN != "" {
+ c.MSKSDConfig.RoleARN = c.RoleARN
+ }
+ if c.Port != 0 {
+ c.MSKSDConfig.Port = c.Port
+ }
+ if c.RefreshInterval != 0 {
+ c.MSKSDConfig.RefreshInterval = c.RefreshInterval
+ }
+ if c.Clusters != nil {
+ c.MSKSDConfig.Clusters = c.Clusters
+ }
default:
return fmt.Errorf("unknown AWS SD role %q", c.Role)
}
@@ -223,7 +262,39 @@ func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Di
case RoleLightsail:
opts.Metrics = &lightsailMetrics{refreshMetrics: awsMetrics.refreshMetrics}
return NewLightsailDiscovery(c.LightsailSDConfig, opts)
+ case RoleMSK:
+ opts.Metrics = &mskMetrics{refreshMetrics: awsMetrics.refreshMetrics}
+ return NewMSKDiscovery(c.MSKSDConfig, opts)
default:
return nil, fmt.Errorf("unknown AWS SD role %q", c.Role)
}
}
+
+// loadRegion finds the region in order: AWS config/env vars ->IMDS.
+func loadRegion(ctx context.Context, specifiedRegion string) (string, error) {
+ if specifiedRegion != "" {
+ return specifiedRegion, nil
+ }
+
+ cfg, err := awsConfig.LoadDefaultConfig(ctx)
+ if err != nil {
+ return "", fmt.Errorf("failed to load AWS config: %w", err)
+ }
+
+ if cfg.Region != "" {
+ return cfg.Region, nil
+ }
+
+ // Fallback (may fail in non-AWS environments)
+ imdsClient := imds.NewFromConfig(cfg)
+ region, err := imdsClient.GetRegion(ctx, &imds.GetRegionInput{})
+ if err != nil {
+ return "", fmt.Errorf("failed to get region from IMDS: %w", err)
+ }
+
+ if region.Region == "" {
+ return "", errors.New("region not found in AWS config or IMDS")
+ }
+
+ return region.Region, nil
+}
diff --git a/discovery/aws/aws_test.go b/discovery/aws/aws_test.go
index a2f03a8b99..d1ec7b2282 100644
--- a/discovery/aws/aws_test.go
+++ b/discovery/aws/aws_test.go
@@ -14,13 +14,19 @@
package aws
import (
+ "context"
"errors"
+ "math/rand/v2"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
"testing"
"time"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
- "gopkg.in/yaml.v3"
+ "go.yaml.in/yaml/v3"
)
func TestRoleUnmarshalYAML(t *testing.T) {
@@ -177,3 +183,307 @@ port: 9300`,
})
}
}
+
+// TestMultipleSDConfigsDoNotShareState verifies that multiple AWS SD configs
+// don't share the same underlying configuration object. This was a bug where
+// all configs pointed to the same global default, causing port and other
+// settings from one job to overwrite settings in another job.
+func TestMultipleSDConfigsDoNotShareState(t *testing.T) {
+ tests := []struct {
+ name string
+ yaml string
+ validateFunc func(t *testing.T, cfg1, cfg2 *SDConfig)
+ }{
+ {
+ name: "EC2MultipleJobsDifferentPorts",
+ yaml: `
+- role: ec2
+ region: us-west-2
+ port: 9100
+ filters:
+ - name: tag:Name
+ values: [host-1]
+- role: ec2
+ region: us-west-2
+ port: 9101
+ filters:
+ - name: tag:Name
+ values: [host-2]`,
+ validateFunc: func(t *testing.T, cfg1, cfg2 *SDConfig) {
+ require.Equal(t, RoleEC2, cfg1.Role)
+ require.Equal(t, RoleEC2, cfg2.Role)
+ require.NotNil(t, cfg1.EC2SDConfig)
+ require.NotNil(t, cfg2.EC2SDConfig)
+
+ // Verify ports are different and not shared
+ require.Equal(t, 9100, cfg1.EC2SDConfig.Port)
+ require.Equal(t, 9101, cfg2.EC2SDConfig.Port)
+
+ // Verify filters are different and not shared
+ require.Len(t, cfg1.EC2SDConfig.Filters, 1)
+ require.Len(t, cfg2.EC2SDConfig.Filters, 1)
+ require.Equal(t, []string{"host-1"}, cfg1.EC2SDConfig.Filters[0].Values)
+ require.Equal(t, []string{"host-2"}, cfg2.EC2SDConfig.Filters[0].Values)
+
+ // Most importantly: verify they're not the same pointer
+ require.NotSame(t, cfg1.EC2SDConfig, cfg2.EC2SDConfig,
+ "EC2SDConfig objects should not share the same memory address")
+ },
+ },
+ {
+ name: "ECSMultipleJobsDifferentPorts",
+ yaml: `
+- role: ecs
+ region: us-east-1
+ port: 8080
+ clusters: [cluster-a]
+- role: ecs
+ region: us-east-1
+ port: 8081
+ clusters: [cluster-b]`,
+ validateFunc: func(t *testing.T, cfg1, cfg2 *SDConfig) {
+ require.Equal(t, RoleECS, cfg1.Role)
+ require.Equal(t, RoleECS, cfg2.Role)
+ require.NotNil(t, cfg1.ECSSDConfig)
+ require.NotNil(t, cfg2.ECSSDConfig)
+
+ require.Equal(t, 8080, cfg1.ECSSDConfig.Port)
+ require.Equal(t, 8081, cfg2.ECSSDConfig.Port)
+ require.Equal(t, []string{"cluster-a"}, cfg1.ECSSDConfig.Clusters)
+ require.Equal(t, []string{"cluster-b"}, cfg2.ECSSDConfig.Clusters)
+
+ require.NotSame(t, cfg1.ECSSDConfig, cfg2.ECSSDConfig,
+ "ECSSDConfig objects should not share the same memory address")
+ },
+ },
+ {
+ name: "LightsailMultipleJobsDifferentPorts",
+ yaml: `
+- role: lightsail
+ region: eu-west-1
+ port: 7070
+- role: lightsail
+ region: eu-west-1
+ port: 7071`,
+ validateFunc: func(t *testing.T, cfg1, cfg2 *SDConfig) {
+ require.Equal(t, RoleLightsail, cfg1.Role)
+ require.Equal(t, RoleLightsail, cfg2.Role)
+ require.NotNil(t, cfg1.LightsailSDConfig)
+ require.NotNil(t, cfg2.LightsailSDConfig)
+
+ require.Equal(t, 7070, cfg1.LightsailSDConfig.Port)
+ require.Equal(t, 7071, cfg2.LightsailSDConfig.Port)
+
+ require.NotSame(t, cfg1.LightsailSDConfig, cfg2.LightsailSDConfig,
+ "LightsailSDConfig objects should not share the same memory address")
+ },
+ },
+ {
+ name: "MSKMultipleJobsDifferentPorts",
+ yaml: `
+- role: msk
+ region: ap-south-1
+ port: 6060
+ clusters: ["cluster-1"]
+- role: msk
+ region: ap-south-1
+ port: 6061
+ clusters: ["cluster-2"]`,
+ validateFunc: func(t *testing.T, cfg1, cfg2 *SDConfig) {
+ require.Equal(t, RoleMSK, cfg1.Role)
+ require.Equal(t, RoleMSK, cfg2.Role)
+ require.NotNil(t, cfg1.MSKSDConfig)
+ require.NotNil(t, cfg2.MSKSDConfig)
+
+ require.Equal(t, 6060, cfg1.MSKSDConfig.Port)
+ require.Equal(t, []string{"cluster-1"}, cfg1.MSKSDConfig.Clusters)
+ require.Equal(t, 6061, cfg2.MSKSDConfig.Port)
+ require.Equal(t, []string{"cluster-2"}, cfg2.MSKSDConfig.Clusters)
+
+ require.NotSame(t, cfg1.MSKSDConfig, cfg2.MSKSDConfig,
+ "MSKSDConfig objects should not share the same memory address")
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var configs []SDConfig
+ require.NoError(t, yaml.Unmarshal([]byte(tt.yaml), &configs))
+ require.Len(t, configs, 2)
+ tt.validateFunc(t, &configs[0], &configs[1])
+ })
+ }
+}
+
+// getRandomRegion is a helper to return a pseudo-random AWS region for testing.
+func getRandomRegion() string {
+ regions := []string{
+ "us-east-1",
+ "us-east-2",
+ "us-west-1",
+ "us-west-2",
+ "eu-west-1",
+ "eu-west-2",
+ "ap-southeast-1",
+ "ap-southeast-2",
+ "ap-northeast-1",
+ "ap-northeast-2",
+ }
+
+ return regions[rand.IntN(len(regions))]
+}
+
+func TestLoadRegion(t *testing.T) {
+ t.Run("with_env_region", func(t *testing.T) {
+ randomRegion := getRandomRegion()
+ t.Setenv("AWS_REGION", randomRegion)
+ t.Setenv("AWS_ACCESS_KEY_ID", "dummy")
+ t.Setenv("AWS_SECRET_ACCESS_KEY", "dummy")
+ t.Setenv("AWS_CONFIG_FILE", "") // Ensure no config file is used
+ t.Setenv("AWS_PROFILE", "") // Ensure no profile file is used
+
+ region, err := loadRegion(context.Background(), "")
+ require.NoError(t, err)
+ require.Equal(t, randomRegion, region)
+ })
+
+ t.Run("with_config_file_default_profile", func(t *testing.T) {
+ randomRegion := getRandomRegion()
+
+ // Create a temporary AWS config file
+ tmpDir := t.TempDir()
+ configFile := filepath.Join(tmpDir, "config")
+
+ configContent := `[default]
+region = ` + randomRegion + `
+`
+
+ err := os.WriteFile(configFile, []byte(configContent), 0o644)
+ require.NoError(t, err)
+ defer os.Remove(configFile)
+
+ // Set up environment to use the config file
+ t.Setenv("AWS_CONFIG_FILE", configFile)
+ t.Setenv("AWS_ACCESS_KEY_ID", "dummy")
+ t.Setenv("AWS_SECRET_ACCESS_KEY", "dummy")
+ // Clear any region environment variables to force config file usage
+ t.Setenv("AWS_REGION", "")
+ t.Setenv("AWS_PROFILE", "") // Ensure no profile file is used
+ t.Setenv("AWS_DEFAULT_REGION", "")
+
+ region, err := loadRegion(context.Background(), "")
+ require.NoError(t, err)
+ require.Equal(t, randomRegion, region)
+ })
+
+ t.Run("with_config_file_named_profile", func(t *testing.T) {
+ randomRegion := getRandomRegion()
+
+ // Create a temporary AWS config file
+ tmpDir := t.TempDir()
+ configFile := filepath.Join(tmpDir, "config")
+
+ configContent := `[default]
+region = ` + getRandomRegion() + `
+
+[profile ` + randomRegion + `-profile]
+region = ` + randomRegion + `
+`
+
+ err := os.WriteFile(configFile, []byte(configContent), 0o644)
+ require.NoError(t, err)
+ defer os.Remove(configFile)
+
+ // Set up environment to use the config file
+ t.Setenv("AWS_CONFIG_FILE", configFile)
+ t.Setenv("AWS_PROFILE", randomRegion+"-profile")
+ t.Setenv("AWS_ACCESS_KEY_ID", "dummy")
+ t.Setenv("AWS_SECRET_ACCESS_KEY", "dummy")
+ // Clear any region environment variables to force config file usage
+ t.Setenv("AWS_REGION", "")
+ t.Setenv("AWS_DEFAULT_REGION", "")
+
+ region, err := loadRegion(context.Background(), "")
+ require.NoError(t, err)
+ require.Equal(t, randomRegion, region)
+ })
+
+ t.Run("with_specified_region", func(t *testing.T) {
+ specifiedRegion := getRandomRegion()
+
+ // Even with environment region set differently, specified region should take precedence
+ t.Setenv("AWS_REGION", getRandomRegion())
+ t.Setenv("AWS_ACCESS_KEY_ID", "dummy")
+ t.Setenv("AWS_SECRET_ACCESS_KEY", "dummy")
+
+ region, err := loadRegion(context.Background(), specifiedRegion)
+ require.NoError(t, err)
+ require.Equal(t, specifiedRegion, region)
+ })
+
+ t.Run("imds_fallback", func(t *testing.T) {
+ randomRegion := getRandomRegion()
+
+ // Mock IMDS server that returns a region
+ mockIMDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Handle instance identity document (contains region info)
+ if r.URL.Path == "/latest/dynamic/instance-identity/document" {
+ imdsPayload := `{"region": "` + randomRegion + `"}`
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(imdsPayload))
+ return
+ }
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer mockIMDS.Close()
+
+ // Set up environment with no region but valid credentials
+ // This will force fallback to IMDS
+ t.Setenv("AWS_ACCESS_KEY_ID", "dummy")
+ t.Setenv("AWS_SECRET_ACCESS_KEY", "dummy")
+ // Unset any existing region
+ t.Setenv("AWS_REGION", "")
+ t.Setenv("AWS_DEFAULT_REGION", "")
+ t.Setenv("AWS_CONFIG_FILE", "") // Ensure no config file is used
+ t.Setenv("AWS_PROFILE", "") // Ensure no profile file is used
+ // Point IMDS to our mock server
+ t.Setenv("AWS_EC2_METADATA_SERVICE_ENDPOINT", mockIMDS.URL)
+
+ region, err := loadRegion(context.Background(), "")
+ require.NoError(t, err)
+ require.Equal(t, randomRegion, region)
+ })
+
+ t.Run("imds_empty_region", func(t *testing.T) {
+ // Mock IMDS server that returns empty region
+ mockIMDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Handle instance identity document with empty region
+ if r.URL.Path == "/latest/dynamic/instance-identity/document" {
+ imdsPayload := `{"region": ""}`
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(imdsPayload))
+ return
+ }
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer mockIMDS.Close()
+
+ // Set up environment with no region but valid credentials
+ t.Setenv("AWS_ACCESS_KEY_ID", "dummy")
+ t.Setenv("AWS_SECRET_ACCESS_KEY", "dummy")
+ // Unset any existing region
+ t.Setenv("AWS_REGION", "")
+ t.Setenv("AWS_DEFAULT_REGION", "")
+ t.Setenv("AWS_CONFIG_FILE", "") // Ensure no config file is used
+ t.Setenv("AWS_PROFILE", "") // Ensure no profile file is used
+ // Point IMDS to our mock server
+ t.Setenv("AWS_EC2_METADATA_SERVICE_ENDPOINT", mockIMDS.URL)
+
+ _, err := loadRegion(context.Background(), "")
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "failed to get region from IMDS")
+ })
+}
diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go
index 19ecebd491..4daff43ecc 100644
--- a/discovery/aws/ec2.go
+++ b/discovery/aws/ec2.go
@@ -27,7 +27,6 @@ import (
awsConfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
- "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/service/ec2"
ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
"github.com/aws/aws-sdk-go-v2/service/sts"
@@ -125,31 +124,10 @@ func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
return err
}
- if c.Region == "" {
- cfg, err := awsConfig.LoadDefaultConfig(context.Background())
- if err != nil {
- return err
- }
-
- if cfg.Region != "" {
- // If the region is already set in the config, use it.
- // This can happen if the user has set the region in the AWS config file or environment variables.
- c.Region = cfg.Region
- }
-
- if c.Region == "" {
- // Try to get the region from the instance metadata service (IMDS).
- imdsClient := imds.NewFromConfig(cfg)
- region, err := imdsClient.GetRegion(context.Background(), &imds.GetRegionInput{})
- if err != nil {
- return err
- }
- c.Region = region.Region
- }
- }
-
- if c.Region == "" {
- return errors.New("EC2 SD configuration requires a region")
+ // Check if the region is set, if not attempt to load it from the AWS SDK.
+ c.Region, err = loadRegion(context.Background(), c.Region)
+ if err != nil {
+ return fmt.Errorf("could not determine AWS region: %w", err)
}
for _, f := range c.Filters {
diff --git a/discovery/aws/ecs.go b/discovery/aws/ecs.go
index 1d5ff366de..18d2746cb6 100644
--- a/discovery/aws/ecs.go
+++ b/discovery/aws/ecs.go
@@ -19,7 +19,9 @@ import (
"fmt"
"log/slog"
"net"
+ "slices"
"strconv"
+ "strings"
"sync"
"time"
@@ -27,7 +29,6 @@ import (
awsConfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
- "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/service/ec2"
"github.com/aws/aws-sdk-go-v2/service/ecs"
"github.com/aws/aws-sdk-go-v2/service/ecs/types"
@@ -137,17 +138,9 @@ func (c *ECSSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
return err
}
- if c.Region == "" {
- cfg, err := awsConfig.LoadDefaultConfig(context.TODO())
- if err != nil {
- return err
- }
- client := imds.NewFromConfig(cfg)
- result, err := client.GetRegion(context.Background(), &imds.GetRegionInput{})
- if err != nil {
- return fmt.Errorf("ECS SD configuration requires a region. Tried to fetch it from the instance metadata: %w", err)
- }
- c.Region = result.Region
+ c.Region, err = loadRegion(context.Background(), c.Region)
+ if err != nil {
+ return fmt.Errorf("could not determine AWS region: %w", err)
}
return c.HTTPClientConfig.Validate()
@@ -273,7 +266,6 @@ func (d *ECSDiscovery) initEcsClient(ctx context.Context) error {
// listClusterARNs returns a slice of cluster arns.
// This method does not use concurrency as it's a simple paginated call.
-// AWS ECS Cluster read actions have burst=50, sustained=20 req/sec limits.
func (d *ECSDiscovery) listClusterARNs(ctx context.Context) ([]string, error) {
var (
clusterARNs []string
@@ -281,7 +273,8 @@ func (d *ECSDiscovery) listClusterARNs(ctx context.Context) ([]string, error) {
)
for {
resp, err := d.ecs.ListClusters(ctx, &ecs.ListClustersInput{
- NextToken: nextToken,
+ NextToken: nextToken,
+ MaxResults: aws.Int32(100),
})
if err != nil {
return nil, fmt.Errorf("could not list clusters: %w", err)
@@ -299,56 +292,61 @@ func (d *ECSDiscovery) listClusterARNs(ctx context.Context) ([]string, error) {
}
// describeClusters returns a map of cluster ARN to a slice of clusters.
-// This method processes clusters in batches without concurrency as it's typically
-// a single call handling up to 100 clusters. AWS ECS Cluster read actions have
-// burst=50, sustained=20 req/sec limits.
+// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
+// Clusters are described in batches of 100 to respect AWS API limits (DescribeClusters allows up to 100 clusters per call).
func (d *ECSDiscovery) describeClusters(ctx context.Context, clusters []string) (map[string]types.Cluster, error) {
+ mu := sync.Mutex{}
clusterMap := make(map[string]types.Cluster)
-
- // AWS DescribeClusters can handle up to 100 clusters per call
- batchSize := 100
- for _, batch := range batchSlice(clusters, batchSize) {
- resp, err := d.ecs.DescribeClusters(ctx, &ecs.DescribeClustersInput{
- Clusters: batch,
- Include: []types.ClusterField{"TAGS"},
- })
- if err != nil {
- d.logger.Error("Failed to describe clusters", "clusters", batch, "error", err)
- return nil, fmt.Errorf("could not describe clusters %v: %w", batch, err)
- }
-
- for _, c := range resp.Clusters {
- if c.ClusterArn != nil {
- clusterMap[*c.ClusterArn] = c
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.SetLimit(d.cfg.RequestConcurrency)
+ for batch := range slices.Chunk(clusters, 100) {
+ errg.Go(func() error {
+ resp, err := d.ecs.DescribeClusters(ectx, &ecs.DescribeClustersInput{
+ Clusters: batch,
+ Include: []types.ClusterField{"TAGS"},
+ })
+ if err != nil {
+ d.logger.Error("Failed to describe clusters", "clusters", batch, "error", err)
+ return fmt.Errorf("could not describe clusters %v: %w", batch, err)
}
- }
+
+ for _, cluster := range resp.Clusters {
+ if cluster.ClusterArn != nil {
+ mu.Lock()
+ clusterMap[*cluster.ClusterArn] = cluster
+ mu.Unlock()
+ }
+ }
+ return nil
+ })
}
- return clusterMap, nil
+ return clusterMap, errg.Wait()
}
// listServiceARNs returns a map of cluster ARN to a slice of service ARNs.
// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
-// AWS ECS Service read actions have burst=100, sustained=20 req/sec limits.
+// Services are listed in batches of 100 to respect AWS API limits (ListServices allows up to 100 services per call).
func (d *ECSDiscovery) listServiceARNs(ctx context.Context, clusters []string) (map[string][]string, error) {
- serviceARNsMu := sync.Mutex{}
- serviceARNs := make(map[string][]string)
+ mu := sync.Mutex{}
+ services := make(map[string][]string)
errg, ectx := errgroup.WithContext(ctx)
errg.SetLimit(d.cfg.RequestConcurrency)
for _, clusterARN := range clusters {
errg.Go(func() error {
var nextToken *string
- var clusterServiceARNs []string
+ var serviceARNs []string
for {
resp, err := d.ecs.ListServices(ectx, &ecs.ListServicesInput{
- Cluster: aws.String(clusterARN),
- NextToken: nextToken,
+ Cluster: aws.String(clusterARN),
+ NextToken: nextToken,
+ MaxResults: aws.Int32(100),
})
if err != nil {
return fmt.Errorf("could not list services for cluster %q: %w", clusterARN, err)
}
- clusterServiceARNs = append(clusterServiceARNs, resp.ServiceArns...)
+ serviceARNs = append(serviceARNs, resp.ServiceArns...)
if resp.NextToken == nil {
break
@@ -356,75 +354,76 @@ func (d *ECSDiscovery) listServiceARNs(ctx context.Context, clusters []string) (
nextToken = resp.NextToken
}
- serviceARNsMu.Lock()
- serviceARNs[clusterARN] = clusterServiceARNs
- serviceARNsMu.Unlock()
+ mu.Lock()
+ services[clusterARN] = serviceARNs
+ mu.Unlock()
return nil
})
}
- return serviceARNs, errg.Wait()
-}
-
-// describeServices returns a map of cluster ARN to services.
-// Uses concurrent requests with batching (10 services per request) to respect AWS API limits.
-// AWS ECS Service read actions have burst=100, sustained=20 req/sec limits.
-func (d *ECSDiscovery) describeServices(ctx context.Context, clusterServiceARNsMap map[string][]string) (map[string][]types.Service, error) {
- batchSize := 10 // AWS DescribeServices API limit is 10 services per request
- serviceMu := sync.Mutex{}
- services := make(map[string][]types.Service)
- errg, ectx := errgroup.WithContext(ctx)
- errg.SetLimit(d.cfg.RequestConcurrency)
- for clusterARN, serviceARNs := range clusterServiceARNsMap {
- for _, batch := range batchSlice(serviceARNs, batchSize) {
- errg.Go(func() error {
- resp, err := d.ecs.DescribeServices(ectx, &ecs.DescribeServicesInput{
- Services: batch,
- Cluster: aws.String(clusterARN),
- Include: []types.ServiceField{"TAGS"},
- })
- if err != nil {
- d.logger.Error("Failed to describe services", "cluster", clusterARN, "batch", batch, "error", err)
- return fmt.Errorf("could not describe services for cluster %q: %w", clusterARN, err)
- }
-
- serviceMu.Lock()
- services[clusterARN] = append(services[clusterARN], resp.Services...)
- serviceMu.Unlock()
-
- return nil
- })
- }
- }
-
return services, errg.Wait()
}
-// listTaskARNs returns a map of service ARN to a slice of task ARNs.
+// describeServices returns a map of service name to service.
// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
-// AWS ECS Cluster resource read actions have burst=100, sustained=20 req/sec limits.
-func (d *ECSDiscovery) listTaskARNs(ctx context.Context, services []types.Service) (map[string][]string, error) {
- taskARNsMu := sync.Mutex{}
- taskARNs := make(map[string][]string)
+// Services are described in batches of 10 to respect AWS API limits (DescribeServices allows up to 10 services per call).
+func (d *ECSDiscovery) describeServices(ctx context.Context, clusterARN string, serviceARNS []string) (map[string]types.Service, error) {
+ mu := sync.Mutex{}
+ services := make(map[string]types.Service)
errg, ectx := errgroup.WithContext(ctx)
errg.SetLimit(d.cfg.RequestConcurrency)
- for _, service := range services {
+ for batch := range slices.Chunk(serviceARNS, 10) {
errg.Go(func() error {
- serviceArn := aws.ToString(service.ServiceArn)
+ resp, err := d.ecs.DescribeServices(ectx, &ecs.DescribeServicesInput{
+ Cluster: aws.String(clusterARN),
+ Services: batch,
+ Include: []types.ServiceField{"TAGS"},
+ })
+ if err != nil {
+ d.logger.Error("Failed to describe services", "cluster", clusterARN, "batch", batch, "error", err)
+ return fmt.Errorf("could not describe services for cluster %q: batch %v: %w", clusterARN, batch, err)
+ }
- var nextToken *string
- var serviceTaskARNs []string
+ for _, service := range resp.Services {
+ if service.ServiceArn != nil {
+ mu.Lock()
+ services[*service.ServiceName] = service
+ mu.Unlock()
+ }
+ }
+ return nil
+ })
+ }
+
+ return services, errg.Wait()
+}
+
+// listTaskARNs returns a map of clustersARN to a slice of task ARNs.
+// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
+// Tasks are listed in batches of 100 to respect AWS API limits (ListTasks allows up to 100 tasks per call).
+// This method also uses pagination to handle cases where there are more than 100 tasks in a cluster.
+func (d *ECSDiscovery) listTaskARNs(ctx context.Context, clusterARNs []string) (map[string][]string, error) {
+ mu := sync.Mutex{}
+ tasks := make(map[string][]string)
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.SetLimit(d.cfg.RequestConcurrency)
+ for _, clusterARN := range clusterARNs {
+ errg.Go(func() error {
+ var (
+ nextToken *string
+ taskARNs []string
+ )
for {
resp, err := d.ecs.ListTasks(ectx, &ecs.ListTasksInput{
- Cluster: aws.String(*service.ClusterArn),
- ServiceName: aws.String(*service.ServiceName),
- NextToken: nextToken,
+ Cluster: aws.String(clusterARN),
+ NextToken: nextToken,
+ MaxResults: aws.Int32(100),
})
if err != nil {
- return fmt.Errorf("could not list tasks for service %q: %w", serviceArn, err)
+ return fmt.Errorf("could not list tasks for cluster %q: %w", clusterARN, err)
}
- serviceTaskARNs = append(serviceTaskARNs, resp.TaskArns...)
+ taskARNs = append(taskARNs, resp.TaskArns...)
if resp.NextToken == nil {
break
@@ -432,77 +431,87 @@ func (d *ECSDiscovery) listTaskARNs(ctx context.Context, services []types.Servic
nextToken = resp.NextToken
}
- taskARNsMu.Lock()
- taskARNs[serviceArn] = serviceTaskARNs
- taskARNsMu.Unlock()
+ mu.Lock()
+ tasks[clusterARN] = taskARNs
+ mu.Unlock()
return nil
})
}
- return taskARNs, errg.Wait()
+ return tasks, errg.Wait()
}
-// describeTasks returns a map of task arn to a slice task.
-// Uses concurrent requests with batching (100 tasks per request) to respect AWS API limits.
-// AWS ECS Cluster resource read actions have burst=100, sustained=20 req/sec limits.
-func (d *ECSDiscovery) describeTasks(ctx context.Context, clusterARN string, taskARNsMap map[string][]string) (map[string][]types.Task, error) {
- batchSize := 100 // AWS DescribeTasks API limit is 100 tasks per request
- taskMu := sync.Mutex{}
- tasks := make(map[string][]types.Task)
+// describeTasks returns a slice of tasks.
+// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
+// Tasks are described in batches of 100 to respect AWS API limits (DescribeTasks allows up to 100 tasks per call).
+func (d *ECSDiscovery) describeTasks(ctx context.Context, clusterARN string, taskARNs []string) ([]types.Task, error) {
+ mu := sync.Mutex{}
+ var tasks []types.Task
errg, ectx := errgroup.WithContext(ctx)
errg.SetLimit(d.cfg.RequestConcurrency)
- for serviceARN, taskARNs := range taskARNsMap {
- for _, batch := range batchSlice(taskARNs, batchSize) {
- errg.Go(func() error {
- resp, err := d.ecs.DescribeTasks(ectx, &ecs.DescribeTasksInput{
- Cluster: aws.String(clusterARN),
- Tasks: batch,
- Include: []types.TaskField{"TAGS"},
- })
- if err != nil {
- d.logger.Error("Failed to describe tasks", "service", serviceARN, "cluster", clusterARN, "batch", batch, "error", err)
- return fmt.Errorf("could not describe tasks for service %q in cluster %q: %w", serviceARN, clusterARN, err)
- }
-
- taskMu.Lock()
- tasks[serviceARN] = append(tasks[serviceARN], resp.Tasks...)
- taskMu.Unlock()
-
- return nil
+ for batch := range slices.Chunk(taskARNs, 100) {
+ errg.Go(func() error {
+ resp, err := d.ecs.DescribeTasks(ectx, &ecs.DescribeTasksInput{
+ Cluster: aws.String(clusterARN),
+ Tasks: batch,
+ Include: []types.TaskField{"TAGS"},
})
- }
+ if err != nil {
+ d.logger.Error("Failed to describe tasks", "cluster", clusterARN, "batch", batch, "error", err)
+ return fmt.Errorf("could not describe tasks in cluster %q: batch %v: %w", clusterARN, batch, err)
+ }
+
+ mu.Lock()
+ tasks = append(tasks, resp.Tasks...)
+ mu.Unlock()
+ return nil
+ })
}
return tasks, errg.Wait()
}
// describeContainerInstances returns a map of container instance ARN to EC2 instance ID
-// Uses batching to respect AWS API limits (100 container instances per request).
-func (d *ECSDiscovery) describeContainerInstances(ctx context.Context, clusterARN string, containerInstanceARNs []string) (map[string]string, error) {
+// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
+// Container instances are described in batches of 100 to respect AWS API limits (DescribeContainerInstances allows up to 100 container instances per call).
+func (d *ECSDiscovery) describeContainerInstances(ctx context.Context, clusterARN string, tasks []types.Task) (map[string]string, error) {
+ containerInstanceARNs := make([]string, 0, len(tasks))
+ for _, task := range tasks {
+ if task.ContainerInstanceArn != nil {
+ containerInstanceARNs = append(containerInstanceARNs, *task.ContainerInstanceArn)
+ }
+ }
+
if len(containerInstanceARNs) == 0 {
return make(map[string]string), nil
}
+ mu := sync.Mutex{}
containerInstToEC2 := make(map[string]string)
- batchSize := 100 // AWS API limit
-
- for _, batch := range batchSlice(containerInstanceARNs, batchSize) {
- resp, err := d.ecs.DescribeContainerInstances(ctx, &ecs.DescribeContainerInstancesInput{
- Cluster: aws.String(clusterARN),
- ContainerInstances: batch,
- })
- if err != nil {
- return nil, fmt.Errorf("could not describe container instances: %w", err)
- }
-
- for _, ci := range resp.ContainerInstances {
- if ci.ContainerInstanceArn != nil && ci.Ec2InstanceId != nil {
- containerInstToEC2[*ci.ContainerInstanceArn] = *ci.Ec2InstanceId
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.SetLimit(d.cfg.RequestConcurrency)
+ for batch := range slices.Chunk(containerInstanceARNs, 100) {
+ errg.Go(func() error {
+ resp, err := d.ecs.DescribeContainerInstances(ectx, &ecs.DescribeContainerInstancesInput{
+ Cluster: aws.String(clusterARN),
+ ContainerInstances: batch,
+ })
+ if err != nil {
+ return fmt.Errorf("could not describe container instances: %w", err)
}
- }
+
+ for _, ci := range resp.ContainerInstances {
+ if ci.ContainerInstanceArn != nil && ci.Ec2InstanceId != nil {
+ mu.Lock()
+ containerInstToEC2[*ci.ContainerInstanceArn] = *ci.Ec2InstanceId
+ mu.Unlock()
+ }
+ }
+ return nil
+ })
}
- return containerInstToEC2, nil
+ return containerInstToEC2, errg.Wait()
}
// ec2InstanceInfo holds information retrieved from EC2 DescribeInstances.
@@ -515,83 +524,112 @@ type ec2InstanceInfo struct {
}
// describeEC2Instances returns a map of EC2 instance ID to instance information.
+// Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling.
+// This method does not use concurrency as it's a simple paginated call.
func (d *ECSDiscovery) describeEC2Instances(ctx context.Context, instanceIDs []string) (map[string]ec2InstanceInfo, error) {
if len(instanceIDs) == 0 {
return make(map[string]ec2InstanceInfo), nil
}
instanceInfo := make(map[string]ec2InstanceInfo)
+ var nextToken *string
- resp, err := d.ec2.DescribeInstances(ctx, &ec2.DescribeInstancesInput{
- InstanceIds: instanceIDs,
- })
- if err != nil {
- return nil, fmt.Errorf("could not describe EC2 instances: %w", err)
- }
+ for {
+ resp, err := d.ec2.DescribeInstances(ctx, &ec2.DescribeInstancesInput{
+ InstanceIds: instanceIDs,
+ NextToken: nextToken,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not describe EC2 instances: %w", err)
+ }
- for _, reservation := range resp.Reservations {
- for _, instance := range reservation.Instances {
- if instance.InstanceId != nil && instance.PrivateIpAddress != nil {
- info := ec2InstanceInfo{
- privateIP: *instance.PrivateIpAddress,
- tags: make(map[string]string),
- }
- if instance.PublicIpAddress != nil {
- info.publicIP = *instance.PublicIpAddress
- }
- if instance.SubnetId != nil {
- info.subnetID = *instance.SubnetId
- }
- if instance.InstanceType != "" {
- info.instanceType = string(instance.InstanceType)
- }
- // Collect EC2 instance tags
- for _, tag := range instance.Tags {
- if tag.Key != nil && tag.Value != nil {
- info.tags[*tag.Key] = *tag.Value
+ for _, reservation := range resp.Reservations {
+ for _, instance := range reservation.Instances {
+ if instance.InstanceId != nil && instance.PrivateIpAddress != nil {
+ info := ec2InstanceInfo{
+ privateIP: *instance.PrivateIpAddress,
+ tags: make(map[string]string),
}
+ if instance.PublicIpAddress != nil {
+ info.publicIP = *instance.PublicIpAddress
+ }
+ if instance.SubnetId != nil {
+ info.subnetID = *instance.SubnetId
+ }
+ if instance.InstanceType != "" {
+ info.instanceType = string(instance.InstanceType)
+ }
+ // Collect EC2 instance tags
+ for _, tag := range instance.Tags {
+ if tag.Key != nil && tag.Value != nil {
+ info.tags[*tag.Key] = *tag.Value
+ }
+ }
+ instanceInfo[*instance.InstanceId] = info
}
- instanceInfo[*instance.InstanceId] = info
}
}
+
+ if resp.NextToken == nil {
+ break
+ }
+ nextToken = resp.NextToken
}
return instanceInfo, nil
}
// describeNetworkInterfaces returns a map of ENI ID to public IP address.
-func (d *ECSDiscovery) describeNetworkInterfaces(ctx context.Context, eniIDs []string) (map[string]string, error) {
+// This is needed to get the public IP for tasks using awsvpc network mode, as the ENI is what gets the public IP, not the EC2 instance.
+// This method does not use concurrency as it's a simple paginated call.
+func (d *ECSDiscovery) describeNetworkInterfaces(ctx context.Context, tasks []types.Task) (map[string]string, error) {
+ eniIDs := make([]string, 0, len(tasks))
+
+ for _, task := range tasks {
+ for _, attachment := range task.Attachments {
+ if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
+ for _, detail := range attachment.Details {
+ if detail.Name != nil && *detail.Name == "networkInterfaceId" && detail.Value != nil {
+ eniIDs = append(eniIDs, *detail.Value)
+ break
+ }
+ }
+ break
+ }
+ }
+ }
+
if len(eniIDs) == 0 {
return make(map[string]string), nil
}
eniToPublicIP := make(map[string]string)
+ var nextToken *string
- resp, err := d.ec2.DescribeNetworkInterfaces(ctx, &ec2.DescribeNetworkInterfacesInput{
- NetworkInterfaceIds: eniIDs,
- })
- if err != nil {
- return nil, fmt.Errorf("could not describe network interfaces: %w", err)
- }
-
- for _, eni := range resp.NetworkInterfaces {
- if eni.NetworkInterfaceId != nil && eni.Association != nil && eni.Association.PublicIp != nil {
- eniToPublicIP[*eni.NetworkInterfaceId] = *eni.Association.PublicIp
+ for {
+ resp, err := d.ec2.DescribeNetworkInterfaces(ctx, &ec2.DescribeNetworkInterfacesInput{
+ NetworkInterfaceIds: eniIDs,
+ NextToken: nextToken,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not describe network interfaces: %w", err)
}
+
+ for _, eni := range resp.NetworkInterfaces {
+ if eni.NetworkInterfaceId != nil && eni.Association != nil && eni.Association.PublicIp != nil {
+ eniToPublicIP[*eni.NetworkInterfaceId] = *eni.Association.PublicIp
+ }
+ }
+
+ if resp.NextToken == nil {
+ break
+ }
+ nextToken = resp.NextToken
}
return eniToPublicIP, nil
}
-func batchSlice[T any](a []T, size int) [][]T {
- batches := make([][]T, 0, len(a)/size+1)
- for i := 0; i < len(a); i += size {
- end := min(i+size, len(a))
- batches = append(batches, a[i:end])
- }
- return batches
-}
-
func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
err := d.initEcsClient(ctx)
if err != nil {
@@ -620,314 +658,338 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
Source: d.cfg.Region,
}
- clusterARNMap, err := d.describeClusters(ctx, clusters)
- if err != nil {
- return nil, err
- }
+ // Fetch cluster details, service ARNs, and task ARNs in parallel
+ var (
+ clusterMap map[string]types.Cluster
+ serviceMap map[string][]string
+ taskMap map[string][]string
+ )
- clusterServiceARNMap, err := d.listServiceARNs(ctx, clusters)
- if err != nil {
- return nil, err
- }
+ clusterErrg, clusterCtx := errgroup.WithContext(ctx)
+ clusterErrg.Go(func() error {
+ var err error
+ clusterMap, err = d.describeClusters(clusterCtx, clusters)
+ return err
+ })
+ clusterErrg.Go(func() error {
+ var err error
+ serviceMap, err = d.listServiceARNs(clusterCtx, clusters)
+ return err
+ })
+ clusterErrg.Go(func() error {
+ var err error
+ taskMap, err = d.listTaskARNs(clusterCtx, clusters)
+ return err
+ })
- clusterServicesMap, err := d.describeServices(ctx, clusterServiceARNMap)
- if err != nil {
+ if err := clusterErrg.Wait(); err != nil {
return nil, err
}
// Use goroutines to process clusters in parallel
var (
- targetsMu sync.Mutex
- wg sync.WaitGroup
+ clusterWg sync.WaitGroup
+ clusterMu sync.Mutex
+ clusterTargets []model.LabelSet
)
- for clusterArn, clusterServices := range clusterServicesMap {
- if len(clusterServices) == 0 {
+ for clusterARN, taskARNs := range taskMap {
+ if len(taskARNs) == 0 {
continue
}
- wg.Add(1)
- go func(clusterArn string, clusterServices []types.Service) {
- defer wg.Done()
+ clusterWg.Add(1)
- serviceTaskARNMap, err := d.listTaskARNs(ctx, clusterServices)
- if err != nil {
- d.logger.Error("Failed to list task ARNs for cluster", "cluster", clusterArn, "error", err)
- return
- }
+ go func(cluster types.Cluster, serviceARNs, taskARNs []string) {
+ defer clusterWg.Done()
- serviceTaskMap, err := d.describeTasks(ctx, clusterArn, serviceTaskARNMap)
- if err != nil {
- d.logger.Error("Failed to describe tasks for cluster", "cluster", clusterArn, "error", err)
- return
- }
-
- // Process services within this cluster in parallel
+ // Fetch services and tasks in parallel (they're independent)
var (
- serviceWg sync.WaitGroup
- localTargets []model.LabelSet
- localTargetsMu sync.Mutex
+ services map[string]types.Service
+ tasks []types.Task
)
- for _, clusterService := range clusterServices {
- serviceWg.Add(1)
- go func(clusterService types.Service) {
- defer serviceWg.Done()
+ resourceErrg, resourceCtx := errgroup.WithContext(ctx)
+ resourceErrg.Go(func() error {
+ var err error
+ services, err = d.describeServices(resourceCtx, *cluster.ClusterArn, serviceARNs)
+ if err != nil {
+ d.logger.Error("Failed to describe services for cluster", "cluster", *cluster.ClusterArn, "error", err)
+ }
+ return err
+ })
+ resourceErrg.Go(func() error {
+ var err error
+ tasks, err = d.describeTasks(resourceCtx, *cluster.ClusterArn, taskARNs)
+ if err != nil {
+ d.logger.Error("Failed to describe tasks for cluster", "cluster", *cluster.ClusterArn, "error", err)
+ }
+ return err
+ })
- serviceArn := *clusterService.ServiceArn
-
- if tasks, exists := serviceTaskMap[serviceArn]; exists {
- var serviceTargets []model.LabelSet
-
- // Collect container instance ARNs for all EC2 tasks to get instance type
- var containerInstanceARNs []string
- taskToContainerInstance := make(map[string]string)
- // Collect ENI IDs for awsvpc tasks to get public IPs
- var eniIDs []string
- taskToENI := make(map[string]string)
-
- for _, task := range tasks {
- // Collect container instance ARN for any task running on EC2
- if task.ContainerInstanceArn != nil {
- containerInstanceARNs = append(containerInstanceARNs, *task.ContainerInstanceArn)
- taskToContainerInstance[*task.TaskArn] = *task.ContainerInstanceArn
- }
-
- // Collect ENI IDs from awsvpc tasks
- for _, attachment := range task.Attachments {
- if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
- for _, detail := range attachment.Details {
- if detail.Name != nil && *detail.Name == "networkInterfaceId" && detail.Value != nil {
- eniIDs = append(eniIDs, *detail.Value)
- taskToENI[*task.TaskArn] = *detail.Value
- break
- }
- }
- break
- }
- }
- }
-
- // Batch describe container instances and EC2 instances to get instance type and other metadata
- var containerInstToEC2 map[string]string
- var ec2InstInfo map[string]ec2InstanceInfo
- if len(containerInstanceARNs) > 0 {
- var err error
- containerInstToEC2, err = d.describeContainerInstances(ctx, clusterArn, containerInstanceARNs)
- if err != nil {
- d.logger.Error("Failed to describe container instances", "cluster", clusterArn, "error", err)
- // Continue processing tasks
- } else {
- // Collect unique EC2 instance IDs
- ec2InstanceIDs := make([]string, 0, len(containerInstToEC2))
- for _, ec2ID := range containerInstToEC2 {
- ec2InstanceIDs = append(ec2InstanceIDs, ec2ID)
- }
-
- // Batch describe EC2 instances
- ec2InstInfo, err = d.describeEC2Instances(ctx, ec2InstanceIDs)
- if err != nil {
- d.logger.Error("Failed to describe EC2 instances", "cluster", clusterArn, "error", err)
- }
- }
- }
-
- // Batch describe ENIs to get public IPs for awsvpc tasks
- var eniToPublicIP map[string]string
- if len(eniIDs) > 0 {
- var err error
- eniToPublicIP, err = d.describeNetworkInterfaces(ctx, eniIDs)
- if err != nil {
- d.logger.Error("Failed to describe network interfaces", "cluster", clusterArn, "error", err)
- // Continue processing without ENI public IPs
- }
- }
-
- for _, task := range tasks {
- var ipAddress, subnetID, publicIP string
- var networkMode string
- var ec2InstanceID, ec2InstanceType, ec2InstancePrivateIP, ec2InstancePublicIP string
-
- // Try to get IP from ENI attachment (awsvpc mode)
- var eniAttachment *types.Attachment
- for _, attachment := range task.Attachments {
- if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
- eniAttachment = &attachment
- break
- }
- }
-
- if eniAttachment != nil {
- // awsvpc networking mode - get IP from ENI
- networkMode = "awsvpc"
- for _, detail := range eniAttachment.Details {
- switch *detail.Name {
- case "privateIPv4Address":
- ipAddress = *detail.Value
- case "subnetId":
- subnetID = *detail.Value
- }
- }
- // Get public IP from ENI if available
- if eniID, ok := taskToENI[*task.TaskArn]; ok {
- if eniPublicIP, ok := eniToPublicIP[eniID]; ok {
- publicIP = eniPublicIP
- }
- }
- } else if task.ContainerInstanceArn != nil {
- // bridge/host networking mode - need to get EC2 instance IP and subnet
- networkMode = "bridge"
- containerInstARN, ok := taskToContainerInstance[*task.TaskArn]
- if ok {
- ec2InstanceID, ok = containerInstToEC2[containerInstARN]
- if ok {
- info, ok := ec2InstInfo[ec2InstanceID]
- if ok {
- ipAddress = info.privateIP
- publicIP = info.publicIP
- subnetID = info.subnetID
- ec2InstanceType = info.instanceType
- ec2InstancePrivateIP = info.privateIP
- ec2InstancePublicIP = info.publicIP
- } else {
- d.logger.Debug("EC2 instance info not found", "instance", ec2InstanceID, "task", *task.TaskArn)
- }
- } else {
- d.logger.Debug("Container instance not found in map", "arn", containerInstARN, "task", *task.TaskArn)
- }
- }
- }
-
- // Get EC2 instance metadata for awsvpc tasks running on EC2
- // We want the instance type and the host IPs for advanced use cases
- if networkMode == "awsvpc" && task.ContainerInstanceArn != nil {
- containerInstARN, ok := taskToContainerInstance[*task.TaskArn]
- if ok {
- ec2InstanceID, ok = containerInstToEC2[containerInstARN]
- if ok {
- info, ok := ec2InstInfo[ec2InstanceID]
- if ok {
- ec2InstanceType = info.instanceType
- ec2InstancePrivateIP = info.privateIP
- ec2InstancePublicIP = info.publicIP
- }
- }
- }
- }
-
- if ipAddress == "" {
- continue
- }
-
- labels := model.LabelSet{
- ecsLabelClusterARN: model.LabelValue(*clusterService.ClusterArn),
- ecsLabelService: model.LabelValue(*clusterService.ServiceName),
- ecsLabelServiceARN: model.LabelValue(*clusterService.ServiceArn),
- ecsLabelServiceStatus: model.LabelValue(*clusterService.Status),
- ecsLabelTaskGroup: model.LabelValue(*task.Group),
- ecsLabelTaskARN: model.LabelValue(*task.TaskArn),
- ecsLabelTaskDefinition: model.LabelValue(*task.TaskDefinitionArn),
- ecsLabelIPAddress: model.LabelValue(ipAddress),
- ecsLabelRegion: model.LabelValue(d.cfg.Region),
- ecsLabelLaunchType: model.LabelValue(task.LaunchType),
- ecsLabelAvailabilityZone: model.LabelValue(*task.AvailabilityZone),
- ecsLabelDesiredStatus: model.LabelValue(*task.DesiredStatus),
- ecsLabelLastStatus: model.LabelValue(*task.LastStatus),
- ecsLabelHealthStatus: model.LabelValue(task.HealthStatus),
- ecsLabelNetworkMode: model.LabelValue(networkMode),
- }
-
- // Add subnet ID when available (awsvpc mode from ENI, bridge/host from EC2 instance)
- if subnetID != "" {
- labels[ecsLabelSubnetID] = model.LabelValue(subnetID)
- }
-
- // Add container instance and EC2 instance info for EC2 launch type
- if task.ContainerInstanceArn != nil {
- labels[ecsLabelContainerInstanceARN] = model.LabelValue(*task.ContainerInstanceArn)
- }
- if ec2InstanceID != "" {
- labels[ecsLabelEC2InstanceID] = model.LabelValue(ec2InstanceID)
- }
- if ec2InstanceType != "" {
- labels[ecsLabelEC2InstanceType] = model.LabelValue(ec2InstanceType)
- }
- if ec2InstancePrivateIP != "" {
- labels[ecsLabelEC2InstancePrivateIP] = model.LabelValue(ec2InstancePrivateIP)
- }
- if ec2InstancePublicIP != "" {
- labels[ecsLabelEC2InstancePublicIP] = model.LabelValue(ec2InstancePublicIP)
- }
- if publicIP != "" {
- labels[ecsLabelPublicIP] = model.LabelValue(publicIP)
- }
-
- if task.PlatformFamily != nil {
- labels[ecsLabelPlatformFamily] = model.LabelValue(*task.PlatformFamily)
- }
- if task.PlatformVersion != nil {
- labels[ecsLabelPlatformVersion] = model.LabelValue(*task.PlatformVersion)
- }
-
- labels[model.AddressLabel] = model.LabelValue(net.JoinHostPort(ipAddress, strconv.Itoa(d.cfg.Port)))
-
- // Add cluster tags
- if cluster, exists := clusterARNMap[*clusterService.ClusterArn]; exists {
- if cluster.ClusterName != nil {
- labels[ecsLabelCluster] = model.LabelValue(*cluster.ClusterName)
- }
-
- for _, clusterTag := range cluster.Tags {
- if clusterTag.Key != nil && clusterTag.Value != nil {
- labels[model.LabelName(ecsLabelTagCluster+strutil.SanitizeLabelName(*clusterTag.Key))] = model.LabelValue(*clusterTag.Value)
- }
- }
- }
-
- // Add service tags
- for _, serviceTag := range clusterService.Tags {
- if serviceTag.Key != nil && serviceTag.Value != nil {
- labels[model.LabelName(ecsLabelTagService+strutil.SanitizeLabelName(*serviceTag.Key))] = model.LabelValue(*serviceTag.Value)
- }
- }
-
- // Add task tags
- for _, taskTag := range task.Tags {
- if taskTag.Key != nil && taskTag.Value != nil {
- labels[model.LabelName(ecsLabelTagTask+strutil.SanitizeLabelName(*taskTag.Key))] = model.LabelValue(*taskTag.Value)
- }
- }
-
- // Add EC2 instance tags (if running on EC2)
- if ec2InstanceID != "" {
- if info, ok := ec2InstInfo[ec2InstanceID]; ok {
- for tagKey, tagValue := range info.tags {
- labels[model.LabelName(ecsLabelTagEC2+strutil.SanitizeLabelName(tagKey))] = model.LabelValue(tagValue)
- }
- }
- }
-
- serviceTargets = append(serviceTargets, labels)
- }
-
- // Add service targets to local targets with mutex protection
- localTargetsMu.Lock()
- localTargets = append(localTargets, serviceTargets...)
- localTargetsMu.Unlock()
- }
- }(clusterService)
+ if err := resourceErrg.Wait(); err != nil {
+ return
}
- serviceWg.Wait()
+ // Fetch container instances and network interfaces in parallel (both depend on tasks)
+ var (
+ containerInstances map[string]string
+ eniToPublicIP map[string]string
+ )
- // Add all local targets to main target group with mutex protection
- targetsMu.Lock()
- tg.Targets = append(tg.Targets, localTargets...)
- targetsMu.Unlock()
- }(clusterArn, clusterServices)
+ instanceErrg, instanceCtx := errgroup.WithContext(ctx)
+ instanceErrg.Go(func() error {
+ var err error
+ containerInstances, err = d.describeContainerInstances(instanceCtx, *cluster.ClusterArn, tasks)
+ if err != nil {
+ d.logger.Error("Failed to describe container instances for cluster", "cluster", *cluster.ClusterArn, "error", err)
+ }
+ return err
+ })
+ instanceErrg.Go(func() error {
+ var err error
+ eniToPublicIP, err = d.describeNetworkInterfaces(instanceCtx, tasks)
+ if err != nil {
+ d.logger.Error("Failed to describe network interfaces for cluster", "cluster", *cluster.ClusterArn, "error", err)
+ }
+ return err
+ })
+
+ if err := instanceErrg.Wait(); err != nil {
+ return
+ }
+
+ ec2Instances := make(map[string]ec2InstanceInfo)
+ if len(containerInstances) > 0 {
+ // Deduplicate EC2 instance IDs (multiple tasks can share the same instance)
+ ec2InstanceIDSet := make(map[string]struct{})
+ for _, ec2ID := range containerInstances {
+ ec2InstanceIDSet[ec2ID] = struct{}{}
+ }
+ ec2InstanceIDs := make([]string, 0, len(ec2InstanceIDSet))
+ for ec2ID := range ec2InstanceIDSet {
+ ec2InstanceIDs = append(ec2InstanceIDs, ec2ID)
+ }
+ ec2Instances, err = d.describeEC2Instances(ctx, ec2InstanceIDs)
+ if err != nil {
+ d.logger.Error("Failed to describe EC2 instances for cluster", "cluster", *cluster.ClusterArn, "error", err)
+ return
+ }
+ }
+
+ var (
+ taskWg sync.WaitGroup
+ taskMu sync.Mutex
+ taskTargets []model.LabelSet
+ )
+
+ for _, task := range tasks {
+ taskWg.Add(1)
+
+ go func(cluster types.Cluster, services map[string]types.Service, task types.Task, containerInstances map[string]string, ec2Instances map[string]ec2InstanceInfo, eniToPublicIP map[string]string) {
+ defer taskWg.Done()
+
+ var (
+ ipAddress, subnetID, publicIP string
+ networkMode string
+ ec2InstanceID, ec2InstanceType, ec2InstancePrivateIP, ec2InstancePublicIP string
+ )
+
+ // Try to get IP from ENI attachment (awsvpc mode)
+ var eniAttachment *types.Attachment
+ for _, attachment := range task.Attachments {
+ if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
+ eniAttachment = &attachment
+ break
+ }
+ }
+
+ if eniAttachment != nil {
+ // awsvpc networking mode - get IP from ENI
+ networkMode = "awsvpc"
+ var eniID string
+ for _, detail := range eniAttachment.Details {
+ switch *detail.Name {
+ case "privateIPv4Address":
+ ipAddress = *detail.Value
+ case "subnetId":
+ subnetID = *detail.Value
+ case "networkInterfaceId":
+ eniID = *detail.Value
+ }
+ }
+ // Get public IP from ENI if available
+ if eniID != "" {
+ if pub, ok := eniToPublicIP[eniID]; ok {
+ publicIP = pub
+ }
+ }
+ } else if task.ContainerInstanceArn != nil {
+ // bridge/host networking mode - need to get EC2 instance IP and subnet
+ networkMode = "bridge"
+ var ok bool
+ ec2InstanceID, ok = containerInstances[*task.ContainerInstanceArn]
+ if ok {
+ info, ok := ec2Instances[ec2InstanceID]
+ if ok {
+ ipAddress = info.privateIP
+ publicIP = info.publicIP
+ subnetID = info.subnetID
+ ec2InstanceType = info.instanceType
+ ec2InstancePrivateIP = info.privateIP
+ ec2InstancePublicIP = info.publicIP
+ } else {
+ d.logger.Debug("EC2 instance info not found", "instance", ec2InstanceID, "task", *task.TaskArn)
+ }
+ } else {
+ d.logger.Debug("Container instance not found in map", "arn", *task.ContainerInstanceArn, "task", *task.TaskArn)
+ }
+ }
+
+ // Get EC2 instance metadata for awsvpc tasks running on EC2
+ // We want the instance type and the host IPs for advanced use cases
+ if networkMode == "awsvpc" && task.ContainerInstanceArn != nil {
+ var ok bool
+ ec2InstanceID, ok = containerInstances[*task.ContainerInstanceArn]
+ if ok {
+ info, ok := ec2Instances[ec2InstanceID]
+ if ok {
+ ec2InstanceType = info.instanceType
+ ec2InstancePrivateIP = info.privateIP
+ ec2InstancePublicIP = info.publicIP
+ }
+ }
+ }
+
+ if ipAddress == "" {
+ return
+ }
+
+ labels := model.LabelSet{
+ ecsLabelClusterARN: model.LabelValue(*cluster.ClusterArn),
+ ecsLabelCluster: model.LabelValue(*cluster.ClusterName),
+ ecsLabelTaskGroup: model.LabelValue(*task.Group),
+ ecsLabelTaskARN: model.LabelValue(*task.TaskArn),
+ ecsLabelTaskDefinition: model.LabelValue(*task.TaskDefinitionArn),
+ ecsLabelIPAddress: model.LabelValue(ipAddress),
+ ecsLabelRegion: model.LabelValue(d.cfg.Region),
+ ecsLabelLaunchType: model.LabelValue(task.LaunchType),
+ ecsLabelAvailabilityZone: model.LabelValue(*task.AvailabilityZone),
+ ecsLabelDesiredStatus: model.LabelValue(*task.DesiredStatus),
+ ecsLabelLastStatus: model.LabelValue(*task.LastStatus),
+ ecsLabelHealthStatus: model.LabelValue(task.HealthStatus),
+ ecsLabelNetworkMode: model.LabelValue(networkMode),
+ }
+
+ // Add subnet ID when available (awsvpc mode from ENI, bridge/host from EC2 instance)
+ if subnetID != "" {
+ labels[ecsLabelSubnetID] = model.LabelValue(subnetID)
+ }
+
+ // Add container instance and EC2 instance info for EC2 launch type
+ if task.ContainerInstanceArn != nil {
+ labels[ecsLabelContainerInstanceARN] = model.LabelValue(*task.ContainerInstanceArn)
+ }
+ if ec2InstanceID != "" {
+ labels[ecsLabelEC2InstanceID] = model.LabelValue(ec2InstanceID)
+ }
+ if ec2InstanceType != "" {
+ labels[ecsLabelEC2InstanceType] = model.LabelValue(ec2InstanceType)
+ }
+ if ec2InstancePrivateIP != "" {
+ labels[ecsLabelEC2InstancePrivateIP] = model.LabelValue(ec2InstancePrivateIP)
+ }
+ if ec2InstancePublicIP != "" {
+ labels[ecsLabelEC2InstancePublicIP] = model.LabelValue(ec2InstancePublicIP)
+ }
+ if publicIP != "" {
+ labels[ecsLabelPublicIP] = model.LabelValue(publicIP)
+ }
+
+ if task.PlatformFamily != nil {
+ labels[ecsLabelPlatformFamily] = model.LabelValue(*task.PlatformFamily)
+ }
+ if task.PlatformVersion != nil {
+ labels[ecsLabelPlatformVersion] = model.LabelValue(*task.PlatformVersion)
+ }
+
+ labels[model.AddressLabel] = model.LabelValue(net.JoinHostPort(ipAddress, strconv.Itoa(d.cfg.Port)))
+
+ // Add cluster tags
+ for _, clusterTag := range cluster.Tags {
+ if clusterTag.Key != nil && clusterTag.Value != nil {
+ labels[model.LabelName(ecsLabelTagCluster+strutil.SanitizeLabelName(*clusterTag.Key))] = model.LabelValue(*clusterTag.Value)
+ }
+ }
+
+ // If this is not a standalone task, add service information and tags
+ if !isStandaloneTask(task) {
+ service, ok := services[getServiceNameFromTaskGroup(task)]
+ if !ok {
+ d.logger.Debug("Service not found for task", "task", *task.TaskArn, "service", getServiceNameFromTaskGroup(task))
+ }
+ if service.ServiceName != nil {
+ labels[ecsLabelService] = model.LabelValue(*service.ServiceName)
+ }
+ if service.ServiceArn != nil {
+ labels[ecsLabelServiceARN] = model.LabelValue(*service.ServiceArn)
+ }
+ if service.Status != nil {
+ labels[ecsLabelServiceStatus] = model.LabelValue(*service.Status)
+ }
+
+ // Add service tags
+ for _, serviceTag := range service.Tags {
+ if serviceTag.Key != nil && serviceTag.Value != nil {
+ labels[model.LabelName(ecsLabelTagService+strutil.SanitizeLabelName(*serviceTag.Key))] = model.LabelValue(*serviceTag.Value)
+ }
+ }
+ }
+
+ // Add task tags
+ for _, taskTag := range task.Tags {
+ if taskTag.Key != nil && taskTag.Value != nil {
+ labels[model.LabelName(ecsLabelTagTask+strutil.SanitizeLabelName(*taskTag.Key))] = model.LabelValue(*taskTag.Value)
+ }
+ }
+
+ // Add EC2 instance tags (if running on EC2)
+ if ec2InstanceID != "" {
+ if info, ok := ec2Instances[ec2InstanceID]; ok {
+ for tagKey, tagValue := range info.tags {
+ labels[model.LabelName(ecsLabelTagEC2+strutil.SanitizeLabelName(tagKey))] = model.LabelValue(tagValue)
+ }
+ }
+ }
+
+ taskMu.Lock()
+ taskTargets = append(taskTargets, labels)
+ taskMu.Unlock()
+ }(cluster, services, task, containerInstances, ec2Instances, eniToPublicIP)
+ }
+
+ taskWg.Wait()
+
+ // Add this cluster's task targets to the overall collection
+ clusterMu.Lock()
+ clusterTargets = append(clusterTargets, taskTargets...)
+ clusterMu.Unlock()
+ }(clusterMap[clusterARN], serviceMap[clusterARN], taskARNs)
}
- wg.Wait()
+ clusterWg.Wait()
+
+ // Set all targets to the target group
+ tg.Targets = clusterTargets
return []*targetgroup.Group{tg}, nil
}
+
+func isStandaloneTask(task types.Task) bool {
+ // A standalone task will have a group of "family:task-def-name"
+ return task.Group != nil && strings.HasPrefix(*task.Group, "family:")
+}
+
+func getServiceNameFromTaskGroup(task types.Task) string {
+ return strings.Split(*task.Group, ":")[1]
+}
diff --git a/discovery/aws/ecs_test.go b/discovery/aws/ecs_test.go
index 1cb48b27fa..bb1f96a28e 100644
--- a/discovery/aws/ecs_test.go
+++ b/discovery/aws/ecs_test.go
@@ -214,7 +214,6 @@ func TestECSDiscoveryDescribeClusters(t *testing.T) {
func TestECSDiscoveryListServiceARNs(t *testing.T) {
ctx := context.Background()
- // iterate through the test cases
for _, tt := range []struct {
name string
ecsData *ecsDataStore
@@ -225,33 +224,18 @@ func TestECSDiscoveryListServiceARNs(t *testing.T) {
name: "SingleClusterWithServices",
ecsData: &ecsDataStore{
region: "us-west-2",
- clusters: []ecsTypes.Cluster{
- {
- ClusterName: strptr("test-cluster"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("ACTIVE"),
- },
- },
services: []ecsTypes.Service{
{
ServiceName: strptr("web-service"),
ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"),
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
+ Status: strptr("ACTIVE"),
},
{
ServiceName: strptr("api-service"),
ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"),
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- },
- {
- // this is to test the old arn format without the cluster name in the service arn
- // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-arn-migration.html
- ServiceName: strptr("old-api-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/old-api-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
+ Status: strptr("ACTIVE"),
},
},
},
@@ -260,70 +244,50 @@ func TestECSDiscoveryListServiceARNs(t *testing.T) {
"arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": {
"arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service",
"arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service",
- "arn:aws:ecs:us-west-2:123456789012:service/old-api-service",
},
},
},
{
- name: "MultipleClustesWithServices",
+ name: "MultipleClusters",
ecsData: &ecsDataStore{
- region: "us-east-1",
- clusters: []ecsTypes.Cluster{
- {
- ClusterName: strptr("cluster-1"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"),
- Status: strptr("ACTIVE"),
- },
- {
- ClusterName: strptr("cluster-2"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"),
- Status: strptr("ACTIVE"),
- },
- },
+ region: "us-west-2",
services: []ecsTypes.Service{
{
- ServiceName: strptr("service-1"),
- ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"),
- Status: strptr("RUNNING"),
+ ServiceName: strptr("web-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/cluster-1/web-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/cluster-1"),
+ Status: strptr("ACTIVE"),
},
{
- ServiceName: strptr("service-2"),
- ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"),
- Status: strptr("RUNNING"),
+ ServiceName: strptr("api-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/cluster-2/api-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/cluster-2"),
+ Status: strptr("ACTIVE"),
},
},
},
clusterARNs: []string{
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1",
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2",
+ "arn:aws:ecs:us-west-2:123456789012:cluster/cluster-1",
+ "arn:aws:ecs:us-west-2:123456789012:cluster/cluster-2",
},
expected: map[string][]string{
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1": {
- "arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1",
+ "arn:aws:ecs:us-west-2:123456789012:cluster/cluster-1": {
+ "arn:aws:ecs:us-west-2:123456789012:service/cluster-1/web-service",
},
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2": {
- "arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2",
+ "arn:aws:ecs:us-west-2:123456789012:cluster/cluster-2": {
+ "arn:aws:ecs:us-west-2:123456789012:service/cluster-2/api-service",
},
},
},
{
- name: "ClusterWithNoServices",
+ name: "EmptyCluster",
ecsData: &ecsDataStore{
- region: "us-west-2",
- clusters: []ecsTypes.Cluster{
- {
- ClusterName: strptr("empty-cluster"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/empty-cluster"),
- Status: strptr("ACTIVE"),
- },
- },
+ region: "us-west-2",
services: []ecsTypes.Service{},
},
- clusterARNs: []string{"arn:aws:ecs:us-west-2:123456789012:cluster/empty-cluster"},
+ clusterARNs: []string{"arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"},
expected: map[string][]string{
- "arn:aws:ecs:us-west-2:123456789012:cluster/empty-cluster": nil,
+ "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": nil,
},
},
} {
@@ -334,7 +298,7 @@ func TestECSDiscoveryListServiceARNs(t *testing.T) {
ecs: client,
cfg: &ECSSDConfig{
Region: tt.ecsData.region,
- RequestConcurrency: 1,
+ RequestConcurrency: 2,
},
}
@@ -348,113 +312,178 @@ func TestECSDiscoveryListServiceARNs(t *testing.T) {
func TestECSDiscoveryDescribeServices(t *testing.T) {
ctx := context.Background()
- // iterate through the test cases
for _, tt := range []struct {
- name string
- ecsData *ecsDataStore
- clusterServiceARNsMap map[string][]string
- expected map[string][]ecsTypes.Service
+ name string
+ ecsData *ecsDataStore
+ clusterARN string
+ serviceARNs []string
+ expected map[string]ecsTypes.Service
}{
{
- name: "SingleClusterServices",
+ name: "ServicesWithTags",
ecsData: &ecsDataStore{
region: "us-west-2",
services: []ecsTypes.Service{
{
- ServiceName: strptr("web-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- TaskDefinition: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"),
+ ServiceName: strptr("web-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Status: strptr("ACTIVE"),
Tags: []ecsTypes.Tag{
{Key: strptr("Environment"), Value: strptr("production")},
+ {Key: strptr("Team"), Value: strptr("platform")},
},
},
{
- ServiceName: strptr("api-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- TaskDefinition: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/api-task:2"),
- },
- },
- },
- clusterServiceARNsMap: map[string][]string{
- "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": {
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service",
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service",
- },
- },
- expected: map[string][]ecsTypes.Service{
- "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": {
- {
- ServiceName: strptr("web-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- TaskDefinition: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"),
+ ServiceName: strptr("api-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Status: strptr("ACTIVE"),
Tags: []ecsTypes.Tag{
- {Key: strptr("Environment"), Value: strptr("production")},
+ {Key: strptr("Environment"), Value: strptr("staging")},
},
},
- {
- ServiceName: strptr("api-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- TaskDefinition: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/api-task:2"),
+ },
+ },
+ clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
+ serviceARNs: []string{
+ "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service",
+ "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service",
+ },
+ expected: map[string]ecsTypes.Service{
+ "web-service": {
+ ServiceName: strptr("web-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Status: strptr("ACTIVE"),
+ Tags: []ecsTypes.Tag{
+ {Key: strptr("Environment"), Value: strptr("production")},
+ {Key: strptr("Team"), Value: strptr("platform")},
+ },
+ },
+ "api-service": {
+ ServiceName: strptr("api-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Status: strptr("ACTIVE"),
+ Tags: []ecsTypes.Tag{
+ {Key: strptr("Environment"), Value: strptr("staging")},
},
},
},
},
{
- name: "MultipleClustersServices",
+ name: "EmptyServiceList",
ecsData: &ecsDataStore{
- region: "us-east-1",
- services: []ecsTypes.Service{
+ region: "us-west-2",
+ services: []ecsTypes.Service{},
+ },
+ clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
+ serviceARNs: []string{},
+ expected: map[string]ecsTypes.Service{},
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ client := newMockECSClient(tt.ecsData)
+
+ d := &ECSDiscovery{
+ ecs: client,
+ cfg: &ECSSDConfig{
+ Region: tt.ecsData.region,
+ RequestConcurrency: 2,
+ },
+ }
+
+ services, err := d.describeServices(ctx, tt.clusterARN, tt.serviceARNs)
+ require.NoError(t, err)
+ require.Equal(t, tt.expected, services)
+ })
+ }
+}
+
+func TestECSDiscoveryDescribeContainerInstances(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ ecsData *ecsDataStore
+ clusterARN string
+ tasks []ecsTypes.Task
+ expected map[string]string
+ }{
+ {
+ name: "EC2Tasks",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ containerInstances: []ecsTypes.ContainerInstance{
{
- ServiceName: strptr("service-1"),
- ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"),
- Status: strptr("RUNNING"),
- TaskDefinition: strptr("arn:aws:ecs:us-east-1:123456789012:task-definition/task-1:1"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
+ Ec2InstanceId: strptr("i-1234567890abcdef0"),
},
{
- ServiceName: strptr("service-2"),
- ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"),
- Status: strptr("DRAINING"),
- TaskDefinition: strptr("arn:aws:ecs:us-east-1:123456789012:task-definition/task-2:1"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/xyz789"),
+ Ec2InstanceId: strptr("i-0987654321fedcba0"),
},
},
},
- clusterServiceARNsMap: map[string][]string{
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1": {
- "arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1",
+ clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
+ LaunchType: ecsTypes.LaunchTypeEc2,
},
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2": {
- "arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2",
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/xyz789"),
+ LaunchType: ecsTypes.LaunchTypeEc2,
},
},
- expected: map[string][]ecsTypes.Service{
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1": {
+ expected: map[string]string{
+ "arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123": "i-1234567890abcdef0",
+ "arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/xyz789": "i-0987654321fedcba0",
+ },
+ },
+ {
+ name: "FargateTasks",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ containerInstances: []ecsTypes.ContainerInstance{},
+ },
+ clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ },
+ },
+ expected: map[string]string{},
+ },
+ {
+ name: "MixedTasks",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ containerInstances: []ecsTypes.ContainerInstance{
{
- ServiceName: strptr("service-1"),
- ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"),
- Status: strptr("RUNNING"),
- TaskDefinition: strptr("arn:aws:ecs:us-east-1:123456789012:task-definition/task-1:1"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
+ Ec2InstanceId: strptr("i-1234567890abcdef0"),
},
},
- "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2": {
- {
- ServiceName: strptr("service-2"),
- ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2"),
- ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"),
- Status: strptr("DRAINING"),
- TaskDefinition: strptr("arn:aws:ecs:us-east-1:123456789012:task-definition/task-2:1"),
- },
+ },
+ clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-ec2"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
+ LaunchType: ecsTypes.LaunchTypeEc2,
},
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-fargate"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ },
+ },
+ expected: map[string]string{
+ "arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123": "i-1234567890abcdef0",
},
},
} {
@@ -465,13 +494,267 @@ func TestECSDiscoveryDescribeServices(t *testing.T) {
ecs: client,
cfg: &ECSSDConfig{
Region: tt.ecsData.region,
- RequestConcurrency: 1,
+ RequestConcurrency: 2,
},
}
- serviceMap, err := d.describeServices(ctx, tt.clusterServiceARNsMap)
+ containerInstances, err := d.describeContainerInstances(ctx, tt.clusterARN, tt.tasks)
require.NoError(t, err)
- require.Equal(t, tt.expected, serviceMap)
+ require.Equal(t, tt.expected, containerInstances)
+ })
+ }
+}
+
+func TestECSDiscoveryDescribeEC2Instances(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ ecsData *ecsDataStore
+ instanceIDs []string
+ expected map[string]ec2InstanceInfo
+ }{
+ {
+ name: "InstancesWithTags",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ ec2Instances: map[string]ec2InstanceInfo{
+ "i-1234567890abcdef0": {
+ privateIP: "10.0.1.50",
+ publicIP: "54.1.2.3",
+ subnetID: "subnet-12345",
+ instanceType: "t3.medium",
+ tags: map[string]string{
+ "Name": "ecs-host-1",
+ "Environment": "production",
+ },
+ },
+ "i-0987654321fedcba0": {
+ privateIP: "10.0.1.75",
+ publicIP: "54.2.3.4",
+ subnetID: "subnet-67890",
+ instanceType: "t3.large",
+ tags: map[string]string{
+ "Name": "ecs-host-2",
+ "Team": "platform",
+ },
+ },
+ },
+ },
+ instanceIDs: []string{"i-1234567890abcdef0", "i-0987654321fedcba0"},
+ expected: map[string]ec2InstanceInfo{
+ "i-1234567890abcdef0": {
+ privateIP: "10.0.1.50",
+ publicIP: "54.1.2.3",
+ subnetID: "subnet-12345",
+ instanceType: "t3.medium",
+ tags: map[string]string{
+ "Name": "ecs-host-1",
+ "Environment": "production",
+ },
+ },
+ "i-0987654321fedcba0": {
+ privateIP: "10.0.1.75",
+ publicIP: "54.2.3.4",
+ subnetID: "subnet-67890",
+ instanceType: "t3.large",
+ tags: map[string]string{
+ "Name": "ecs-host-2",
+ "Team": "platform",
+ },
+ },
+ },
+ },
+ {
+ name: "EmptyList",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ ec2Instances: map[string]ec2InstanceInfo{},
+ },
+ instanceIDs: []string{},
+ expected: map[string]ec2InstanceInfo{},
+ },
+ {
+ name: "InstanceWithoutPublicIP",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ ec2Instances: map[string]ec2InstanceInfo{
+ "i-privateonly": {
+ privateIP: "10.0.1.100",
+ publicIP: "",
+ subnetID: "subnet-private",
+ instanceType: "t3.micro",
+ tags: map[string]string{},
+ },
+ },
+ },
+ instanceIDs: []string{"i-privateonly"},
+ expected: map[string]ec2InstanceInfo{
+ "i-privateonly": {
+ privateIP: "10.0.1.100",
+ publicIP: "",
+ subnetID: "subnet-private",
+ instanceType: "t3.micro",
+ tags: map[string]string{},
+ },
+ },
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ ec2Client := newMockECSEC2Client(tt.ecsData.ec2Instances, nil)
+
+ d := &ECSDiscovery{
+ ec2: ec2Client,
+ cfg: &ECSSDConfig{
+ Region: tt.ecsData.region,
+ RequestConcurrency: 2,
+ },
+ }
+
+ instances, err := d.describeEC2Instances(ctx, tt.instanceIDs)
+ require.NoError(t, err)
+ require.Equal(t, tt.expected, instances)
+ })
+ }
+}
+
+func TestECSDiscoveryDescribeNetworkInterfaces(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ ecsData *ecsDataStore
+ tasks []ecsTypes.Task
+ expected map[string]string
+ }{
+ {
+ name: "AwsvpcTasksWithPublicIPs",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ eniPublicIPs: map[string]string{
+ "eni-12345": "52.1.2.3",
+ "eni-67890": "52.2.3.4",
+ },
+ },
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ Attachments: []ecsTypes.Attachment{
+ {
+ Type: strptr("ElasticNetworkInterface"),
+ Details: []ecsTypes.KeyValuePair{
+ {Name: strptr("networkInterfaceId"), Value: strptr("eni-12345")},
+ {Name: strptr("privateIPv4Address"), Value: strptr("10.0.1.100")},
+ },
+ },
+ },
+ },
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ Attachments: []ecsTypes.Attachment{
+ {
+ Type: strptr("ElasticNetworkInterface"),
+ Details: []ecsTypes.KeyValuePair{
+ {Name: strptr("networkInterfaceId"), Value: strptr("eni-67890")},
+ {Name: strptr("privateIPv4Address"), Value: strptr("10.0.1.200")},
+ },
+ },
+ },
+ },
+ },
+ expected: map[string]string{
+ "eni-12345": "52.1.2.3",
+ "eni-67890": "52.2.3.4",
+ },
+ },
+ {
+ name: "AwsvpcTasksWithoutPublicIPs",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ eniPublicIPs: map[string]string{},
+ },
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ Attachments: []ecsTypes.Attachment{
+ {
+ Type: strptr("ElasticNetworkInterface"),
+ Details: []ecsTypes.KeyValuePair{
+ {Name: strptr("networkInterfaceId"), Value: strptr("eni-private")},
+ {Name: strptr("privateIPv4Address"), Value: strptr("10.0.1.100")},
+ },
+ },
+ },
+ },
+ },
+ expected: map[string]string{},
+ },
+ {
+ name: "BridgeTasksNoENI",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ eniPublicIPs: map[string]string{},
+ },
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
+ LaunchType: ecsTypes.LaunchTypeEc2,
+ // No ENI attachment for bridge networking
+ Attachments: []ecsTypes.Attachment{},
+ },
+ },
+ expected: map[string]string{},
+ },
+ {
+ name: "MixedTasks",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ eniPublicIPs: map[string]string{
+ "eni-fargate": "52.1.2.3",
+ },
+ },
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-fargate"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ Attachments: []ecsTypes.Attachment{
+ {
+ Type: strptr("ElasticNetworkInterface"),
+ Details: []ecsTypes.KeyValuePair{
+ {Name: strptr("networkInterfaceId"), Value: strptr("eni-fargate")},
+ {Name: strptr("privateIPv4Address"), Value: strptr("10.0.1.100")},
+ },
+ },
+ },
+ },
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-bridge"),
+ LaunchType: ecsTypes.LaunchTypeEc2,
+ Attachments: []ecsTypes.Attachment{},
+ },
+ },
+ expected: map[string]string{
+ "eni-fargate": "52.1.2.3",
+ },
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ ec2Client := newMockECSEC2Client(nil, tt.ecsData.eniPublicIPs)
+
+ d := &ECSDiscovery{
+ ec2: ec2Client,
+ cfg: &ECSSDConfig{
+ Region: tt.ecsData.region,
+ RequestConcurrency: 2,
+ },
+ }
+
+ eniMap, err := d.describeNetworkInterfaces(ctx, tt.tasks)
+ require.NoError(t, err)
+ require.Equal(t, tt.expected, eniMap)
})
}
}
@@ -481,13 +764,13 @@ func TestECSDiscoveryListTaskARNs(t *testing.T) {
// iterate through the test cases
for _, tt := range []struct {
- name string
- ecsData *ecsDataStore
- services []ecsTypes.Service
- expected map[string][]string
+ name string
+ ecsData *ecsDataStore
+ clusterARNs []string
+ expected map[string][]string
}{
{
- name: "ServicesWithTasks",
+ name: "TasksInCluster",
ecsData: &ecsDataStore{
region: "us-west-2",
tasks: []ecsTypes.Task{
@@ -511,46 +794,24 @@ func TestECSDiscoveryListTaskARNs(t *testing.T) {
},
},
},
- services: []ecsTypes.Service{
- {
- ServiceName: strptr("web-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- },
- {
- ServiceName: strptr("api-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- },
- },
+ clusterARNs: []string{"arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"},
expected: map[string][]string{
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service": {
+ "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": {
"arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1",
"arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2",
- },
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service": {
"arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-3",
},
},
},
{
- name: "ServiceWithNoTasks",
+ name: "EmptyCluster",
ecsData: &ecsDataStore{
region: "us-west-2",
tasks: []ecsTypes.Task{},
},
- services: []ecsTypes.Service{
- {
- ServiceName: strptr("empty-service"),
- ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/empty-service"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Status: strptr("RUNNING"),
- },
- },
+ clusterARNs: []string{"arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"},
expected: map[string][]string{
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/empty-service": nil,
+ "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": nil,
},
},
} {
@@ -565,7 +826,7 @@ func TestECSDiscoveryListTaskARNs(t *testing.T) {
},
}
- taskMap, err := d.listTaskARNs(ctx, tt.services)
+ taskMap, err := d.listTaskARNs(ctx, tt.clusterARNs)
require.NoError(t, err)
require.Equal(t, tt.expected, taskMap)
})
@@ -577,11 +838,11 @@ func TestECSDiscoveryDescribeTasks(t *testing.T) {
// iterate through the test cases
for _, tt := range []struct {
- name string
- ecsData *ecsDataStore
- clusterARN string
- taskARNsMap map[string][]string
- expected map[string][]ecsTypes.Task
+ name string
+ ecsData *ecsDataStore
+ clusterARN string
+ taskARNs []string
+ expected []ecsTypes.Task
}{
{
name: "TasksInCluster",
@@ -608,47 +869,39 @@ func TestECSDiscoveryDescribeTasks(t *testing.T) {
},
},
clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
- taskARNsMap: map[string][]string{
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service": {
- "arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1",
- },
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service": {
- "arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2",
- },
+ taskARNs: []string{
+ "arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1",
+ "arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2",
},
- expected: map[string][]ecsTypes.Task{
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service": {
- {
- TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Group: strptr("service:web-service"),
- TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"),
- LastStatus: strptr("RUNNING"),
- Tags: []ecsTypes.Tag{
- {Key: strptr("Environment"), Value: strptr("production")},
- },
+ expected: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Group: strptr("service:web-service"),
+ TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"),
+ LastStatus: strptr("RUNNING"),
+ Tags: []ecsTypes.Tag{
+ {Key: strptr("Environment"), Value: strptr("production")},
},
},
- "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service": {
- {
- TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2"),
- ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
- Group: strptr("service:api-service"),
- TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/api-task:2"),
- LastStatus: strptr("RUNNING"),
- },
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Group: strptr("service:api-service"),
+ TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/api-task:2"),
+ LastStatus: strptr("RUNNING"),
},
},
},
{
- name: "EmptyTaskARNsMap",
+ name: "EmptyTaskList",
ecsData: &ecsDataStore{
region: "us-west-2",
tasks: []ecsTypes.Task{},
},
- clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
- taskARNsMap: map[string][]string{},
- expected: map[string][]ecsTypes.Task{},
+ clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster",
+ taskARNs: []string{},
+ expected: nil,
},
} {
t.Run(tt.name, func(t *testing.T) {
@@ -662,9 +915,9 @@ func TestECSDiscoveryDescribeTasks(t *testing.T) {
},
}
- taskMap, err := d.describeTasks(ctx, tt.clusterARN, tt.taskARNsMap)
+ tasks, err := d.describeTasks(ctx, tt.clusterARN, tt.taskARNs)
require.NoError(t, err)
- require.Equal(t, tt.expected, taskMap)
+ require.Equal(t, tt.expected, tasks)
})
}
}
@@ -836,6 +1089,75 @@ func TestECSDiscoveryRefresh(t *testing.T) {
},
},
},
+ {
+ name: "StandaloneTaskNoService",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ clusters: []ecsTypes.Cluster{
+ {
+ ClusterName: strptr("standalone-cluster"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/standalone-cluster"),
+ Status: strptr("ACTIVE"),
+ },
+ },
+ services: []ecsTypes.Service{},
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/standalone-cluster/task-standalone"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/standalone-cluster"),
+ TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/standalone-task:1"),
+ Group: strptr("family:standalone-task"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ LastStatus: strptr("RUNNING"),
+ DesiredStatus: strptr("RUNNING"),
+ HealthStatus: ecsTypes.HealthStatusHealthy,
+ AvailabilityZone: strptr("us-west-2a"),
+ Attachments: []ecsTypes.Attachment{
+ {
+ Type: strptr("ElasticNetworkInterface"),
+ Details: []ecsTypes.KeyValuePair{
+ {Name: strptr("subnetId"), Value: strptr("subnet-standalone-1")},
+ {Name: strptr("privateIPv4Address"), Value: strptr("10.0.4.10")},
+ {Name: strptr("networkInterfaceId"), Value: strptr("eni-standalone-123")},
+ },
+ },
+ },
+ Tags: []ecsTypes.Tag{
+ {Key: strptr("Role"), Value: strptr("batch")},
+ },
+ },
+ },
+ eniPublicIPs: map[string]string{
+ "eni-standalone-123": "52.4.5.6",
+ },
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-west-2",
+ Targets: []model.LabelSet{
+ {
+ model.AddressLabel: model.LabelValue("10.0.4.10:80"),
+ "__meta_ecs_cluster": model.LabelValue("standalone-cluster"),
+ "__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/standalone-cluster"),
+ "__meta_ecs_task_group": model.LabelValue("family:standalone-task"),
+ "__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/standalone-cluster/task-standalone"),
+ "__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/standalone-task:1"),
+ "__meta_ecs_region": model.LabelValue("us-west-2"),
+ "__meta_ecs_availability_zone": model.LabelValue("us-west-2a"),
+ "__meta_ecs_subnet_id": model.LabelValue("subnet-standalone-1"),
+ "__meta_ecs_ip_address": model.LabelValue("10.0.4.10"),
+ "__meta_ecs_launch_type": model.LabelValue("FARGATE"),
+ "__meta_ecs_desired_status": model.LabelValue("RUNNING"),
+ "__meta_ecs_last_status": model.LabelValue("RUNNING"),
+ "__meta_ecs_health_status": model.LabelValue("HEALTHY"),
+ "__meta_ecs_network_mode": model.LabelValue("awsvpc"),
+ "__meta_ecs_public_ip": model.LabelValue("52.4.5.6"),
+ "__meta_ecs_tag_task_Role": model.LabelValue("batch"),
+ },
+ },
+ },
+ },
+ },
{
name: "TaskWithBridgeNetworking",
ecsData: &ecsDataStore{
@@ -1184,7 +1506,14 @@ func TestECSDiscoveryRefresh(t *testing.T) {
groups, err := d.refresh(ctx)
require.NoError(t, err)
- require.Equal(t, tt.expected, groups)
+ if tt.name == "MixedNetworkingModes" {
+ // Use ElementsMatch for tests with multiple tasks as goroutines can affect order
+ require.Len(t, groups, len(tt.expected))
+ require.Equal(t, tt.expected[0].Source, groups[0].Source)
+ require.ElementsMatch(t, tt.expected[0].Targets, groups[0].Targets)
+ } else {
+ require.Equal(t, tt.expected, groups)
+ }
})
}
}
@@ -1381,3 +1710,98 @@ func (m *mockECSEC2Client) DescribeNetworkInterfaces(_ context.Context, input *e
NetworkInterfaces: networkInterfaces,
}, nil
}
+
+func TestIsStandaloneTask(t *testing.T) {
+ tests := []struct {
+ name string
+ task ecsTypes.Task
+ expected bool
+ }{
+ {
+ name: "StandaloneTask",
+ task: ecsTypes.Task{
+ Group: strptr("family:my-task-definition"),
+ },
+ expected: true,
+ },
+ {
+ name: "ServiceTask",
+ task: ecsTypes.Task{
+ Group: strptr("service:my-service"),
+ },
+ expected: false,
+ },
+ {
+ name: "ServiceTaskWithColon",
+ task: ecsTypes.Task{
+ Group: strptr("service:my:service:name"),
+ },
+ expected: false,
+ },
+ {
+ name: "NilGroup",
+ task: ecsTypes.Task{
+ Group: nil,
+ },
+ expected: false,
+ },
+ {
+ name: "EmptyGroup",
+ task: ecsTypes.Task{
+ Group: strptr(""),
+ },
+ expected: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := isStandaloneTask(tt.task)
+ require.Equal(t, tt.expected, result)
+ })
+ }
+}
+
+func TestGetServiceNameFromTaskGroup(t *testing.T) {
+ tests := []struct {
+ name string
+ task ecsTypes.Task
+ expected string
+ }{
+ {
+ name: "SimpleServiceName",
+ task: ecsTypes.Task{
+ Group: strptr("service:my-service"),
+ },
+ expected: "my-service",
+ },
+ {
+ name: "ServiceNameWithHyphens",
+ task: ecsTypes.Task{
+ Group: strptr("service:web-api-service"),
+ },
+ expected: "web-api-service",
+ },
+ {
+ name: "ServiceNameWithColons",
+ task: ecsTypes.Task{
+ Group: strptr("service:my:service:name"),
+ },
+ expected: "my",
+ },
+ {
+ name: "FamilyGroup",
+ task: ecsTypes.Task{
+ Group: strptr("family:my-task-def"),
+ },
+ expected: "my-task-def",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := getServiceNameFromTaskGroup(tt.task)
+ require.Equal(t, tt.expected, result)
+ })
+ }
+}
diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go
index b13f26cc5f..69a5b6625f 100644
--- a/discovery/aws/lightsail.go
+++ b/discovery/aws/lightsail.go
@@ -26,7 +26,6 @@ import (
awsConfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
- "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/service/lightsail"
"github.com/aws/aws-sdk-go-v2/service/sts"
"github.com/aws/smithy-go"
@@ -106,30 +105,9 @@ func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
return err
}
- if c.Region == "" {
- cfg, err := awsConfig.LoadDefaultConfig(context.Background())
- if err != nil {
- return err
- }
-
- if cfg.Region != "" {
- // Use the region from the AWS config. It will load environment variables and shared config files.
- c.Region = cfg.Region
- }
-
- if c.Region == "" {
- // Try to get the region from the instance metadata service (IMDS).
- imdsClient := imds.NewFromConfig(cfg)
- region, err := imdsClient.GetRegion(context.Background(), &imds.GetRegionInput{})
- if err != nil {
- return err
- }
- c.Region = region.Region
- }
- }
-
- if c.Region == "" {
- return errors.New("lightsail SD configuration requires a region")
+ c.Region, err = loadRegion(context.Background(), c.Region)
+ if err != nil {
+ return fmt.Errorf("could not determine AWS region: %w", err)
}
return c.HTTPClientConfig.Validate()
diff --git a/discovery/aws/metrics_msk.go b/discovery/aws/metrics_msk.go
new file mode 100644
index 0000000000..fc69f57aa1
--- /dev/null
+++ b/discovery/aws/metrics_msk.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aws
+
+import (
+ "github.com/prometheus/prometheus/discovery"
+)
+
+type mskMetrics struct {
+ refreshMetrics discovery.RefreshMetricsInstantiator
+}
+
+var _ discovery.DiscovererMetrics = (*mskMetrics)(nil)
+
+// Register implements discovery.DiscovererMetrics.
+func (*mskMetrics) Register() error {
+ return nil
+}
+
+// Unregister implements discovery.DiscovererMetrics.
+func (*mskMetrics) Unregister() {}
diff --git a/discovery/aws/msk.go b/discovery/aws/msk.go
new file mode 100644
index 0000000000..3ecc1e6235
--- /dev/null
+++ b/discovery/aws/msk.go
@@ -0,0 +1,451 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aws
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsConfig "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+ "github.com/aws/aws-sdk-go-v2/service/kafka"
+ "github.com/aws/aws-sdk-go-v2/service/kafka/types"
+ "github.com/aws/aws-sdk-go-v2/service/sts"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/common/promslog"
+ "golang.org/x/sync/errgroup"
+
+ "github.com/prometheus/prometheus/discovery"
+ "github.com/prometheus/prometheus/discovery/refresh"
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+ "github.com/prometheus/prometheus/util/strutil"
+)
+
+type NodeType string
+
+const (
+ NodeTypeBroker NodeType = "BROKER"
+ NodeTypeController NodeType = "CONTROLLER"
+)
+
+const (
+ mskLabel = model.MetaLabelPrefix + "msk_"
+
+ // Cluster labels.
+ mskLabelCluster = mskLabel + "cluster_"
+ mskLabelClusterName = mskLabelCluster + "name"
+ mskLabelClusterARN = mskLabelCluster + "arn"
+ mskLabelClusterState = mskLabelCluster + "state"
+ mskLabelClusterType = mskLabelCluster + "type"
+ mskLabelClusterVersion = mskLabelCluster + "version"
+ mskLabelClusterJmxExporterEnabled = mskLabelCluster + "jmx_exporter_enabled"
+ mskLabelClusterConfigurationARN = mskLabelCluster + "configuration_arn"
+ mskLabelClusterConfigurationRevision = mskLabelCluster + "configuration_revision"
+ mskLabelClusterKafkaVersion = mskLabelCluster + "kafka_version"
+ mskLabelClusterTags = mskLabelCluster + "tag_"
+
+ // Node labels.
+ mskLabelNode = mskLabel + "node_"
+ mskLabelNodeType = mskLabelNode + "type"
+ mskLabelNodeARN = mskLabelNode + "arn"
+ mskLabelNodeAddedTime = mskLabelNode + "added_time"
+ mskLabelNodeInstanceType = mskLabelNode + "instance_type"
+ mskLabelNodeAttachedENI = mskLabelNode + "attached_eni"
+
+ // Broker labels.
+ mskLabelBroker = mskLabel + "broker_"
+ mskLabelBrokerEndpointIndex = mskLabelBroker + "endpoint_index"
+ mskLabelBrokerID = mskLabelBroker + "id"
+ mskLabelBrokerClientSubnet = mskLabelBroker + "client_subnet"
+ mskLabelBrokerClientVPCIP = mskLabelBroker + "client_vpc_ip"
+ mskLabelBrokerNodeExporterEnabled = mskLabelBroker + "node_exporter_enabled"
+
+ // Controller labels.
+ mskLabelController = mskLabel + "controller_"
+ mskLabelControllerEndpointIndex = mskLabelController + "endpoint_index"
+)
+
+// DefaultMSKSDConfig is the default MSK SD configuration.
+var DefaultMSKSDConfig = MSKSDConfig{
+ Port: 80,
+ RefreshInterval: model.Duration(60 * time.Second),
+ RequestConcurrency: 10,
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
+}
+
+func init() {
+ discovery.RegisterConfig(&MSKSDConfig{})
+}
+
+// MSKSDConfig is the configuration for MSK based service discovery.
+type MSKSDConfig struct {
+ Region string `yaml:"region"`
+ Endpoint string `yaml:"endpoint"`
+ AccessKey string `yaml:"access_key,omitempty"`
+ SecretKey config.Secret `yaml:"secret_key,omitempty"`
+ Profile string `yaml:"profile,omitempty"`
+ RoleARN string `yaml:"role_arn,omitempty"`
+ Clusters []string `yaml:"clusters,omitempty"`
+ Port int `yaml:"port"`
+ RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
+
+ RequestConcurrency int `yaml:"request_concurrency,omitempty"`
+ HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
+}
+
+// NewDiscovererMetrics implements discovery.Config.
+func (*MSKSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
+ return &mskMetrics{
+ refreshMetrics: rmi,
+ }
+}
+
+// Name returns the name of the MSK Config.
+func (*MSKSDConfig) Name() string { return "msk" }
+
+// NewDiscoverer returns a Discoverer for the MSK Config.
+func (c *MSKSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
+ return NewMSKDiscovery(c, opts)
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface for the MSK Config.
+func (c *MSKSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
+ *c = DefaultMSKSDConfig
+ type plain MSKSDConfig
+ err := unmarshal((*plain)(c))
+ if err != nil {
+ return err
+ }
+
+ c.Region, err = loadRegion(context.Background(), c.Region)
+ if err != nil {
+ return fmt.Errorf("could not determine AWS region: %w", err)
+ }
+
+ return c.HTTPClientConfig.Validate()
+}
+
+type mskClient interface {
+ DescribeClusterV2(context.Context, *kafka.DescribeClusterV2Input, ...func(*kafka.Options)) (*kafka.DescribeClusterV2Output, error)
+ ListClustersV2(context.Context, *kafka.ListClustersV2Input, ...func(*kafka.Options)) (*kafka.ListClustersV2Output, error)
+ ListNodes(context.Context, *kafka.ListNodesInput, ...func(*kafka.Options)) (*kafka.ListNodesOutput, error)
+}
+
+// MSKDiscovery periodically performs MSK-SD requests. It implements
+// the Discoverer interface.
+type MSKDiscovery struct {
+ *refresh.Discovery
+ logger *slog.Logger
+ cfg *MSKSDConfig
+ msk mskClient
+}
+
+// NewMSKDiscovery returns a new MSKDiscovery which periodically refreshes its targets.
+func NewMSKDiscovery(conf *MSKSDConfig, opts discovery.DiscovererOptions) (*MSKDiscovery, error) {
+ m, ok := opts.Metrics.(*mskMetrics)
+ if !ok {
+ return nil, errors.New("invalid discovery metrics type")
+ }
+
+ if opts.Logger == nil {
+ opts.Logger = promslog.NewNopLogger()
+ }
+ d := &MSKDiscovery{
+ logger: opts.Logger,
+ cfg: conf,
+ }
+ d.Discovery = refresh.NewDiscovery(
+ refresh.Options{
+ Logger: opts.Logger,
+ Mech: "msk",
+ Interval: time.Duration(d.cfg.RefreshInterval),
+ RefreshF: d.refresh,
+ MetricsInstantiator: m.refreshMetrics,
+ },
+ )
+ return d, nil
+}
+
+func (d *MSKDiscovery) initMskClient(ctx context.Context) error {
+ if d.msk != nil {
+ return nil
+ }
+
+ if d.cfg.Region == "" {
+ return errors.New("region must be set for MSK service discovery")
+ }
+
+ // Build the HTTP client from the provided HTTPClientConfig.
+ client, err := config.NewClientFromConfig(d.cfg.HTTPClientConfig, "msk_sd")
+ if err != nil {
+ return err
+ }
+
+ // Build the AWS config with the provided region.
+ var configOptions []func(*awsConfig.LoadOptions) error
+ configOptions = append(configOptions, awsConfig.WithRegion(d.cfg.Region))
+ configOptions = append(configOptions, awsConfig.WithHTTPClient(client))
+
+ // Only set static credentials if both access key and secret key are provided
+ // Otherwise, let AWS SDK use its default credential chain
+ if d.cfg.AccessKey != "" && d.cfg.SecretKey != "" {
+ credProvider := credentials.NewStaticCredentialsProvider(d.cfg.AccessKey, string(d.cfg.SecretKey), "")
+ configOptions = append(configOptions, awsConfig.WithCredentialsProvider(credProvider))
+ }
+
+ if d.cfg.Profile != "" {
+ configOptions = append(configOptions, awsConfig.WithSharedConfigProfile(d.cfg.Profile))
+ }
+
+ cfg, err := awsConfig.LoadDefaultConfig(ctx, configOptions...)
+ if err != nil {
+ d.logger.Error("Failed to create AWS config", "error", err)
+ return fmt.Errorf("could not create aws config: %w", err)
+ }
+
+ // If the role ARN is set, assume the role to get credentials and set the credentials provider in the config.
+ if d.cfg.RoleARN != "" {
+ assumeProvider := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), d.cfg.RoleARN)
+ cfg.Credentials = aws.NewCredentialsCache(assumeProvider)
+ }
+
+ d.msk = kafka.NewFromConfig(cfg, func(options *kafka.Options) {
+ if d.cfg.Endpoint != "" {
+ options.BaseEndpoint = &d.cfg.Endpoint
+ }
+ options.HTTPClient = client
+ })
+
+ // Test credentials by making a simple API call
+ testCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel()
+
+ _, err = d.msk.ListClustersV2(testCtx, &kafka.ListClustersV2Input{})
+ if err != nil {
+ d.logger.Error("Failed to test MSK credentials", "error", err)
+ return fmt.Errorf("MSK credential test failed: %w", err)
+ }
+
+ return nil
+}
+
+// describeClusters describes the clusters with the given ARNs and returns their details.
+func (d *MSKDiscovery) describeClusters(ctx context.Context, clusterARNs []string) ([]types.Cluster, error) {
+ var (
+ clusters []types.Cluster
+ mu sync.Mutex
+ )
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.SetLimit(d.cfg.RequestConcurrency)
+ for _, clusterARN := range clusterARNs {
+ errg.Go(func() error {
+ cluster, err := d.msk.DescribeClusterV2(ectx, &kafka.DescribeClusterV2Input{
+ ClusterArn: aws.String(clusterARN),
+ })
+ if err != nil {
+ return fmt.Errorf("could not describe cluster %v: %w", clusterARN, err)
+ }
+ mu.Lock()
+ clusters = append(clusters, *cluster.ClusterInfo)
+ mu.Unlock()
+ return nil
+ })
+ }
+
+ return clusters, errg.Wait()
+}
+
+// listClusters lists all MSK clusters in the configured region and returns their details.
+func (d *MSKDiscovery) listClusters(ctx context.Context) ([]types.Cluster, error) {
+ var (
+ clusters []types.Cluster
+ nextToken *string
+ )
+ for {
+ listClustersInput := kafka.ListClustersV2Input{
+ ClusterTypeFilter: aws.String("PROVISIONED"),
+ MaxResults: aws.Int32(100),
+ NextToken: nextToken,
+ }
+
+ resp, err := d.msk.ListClustersV2(ctx, &listClustersInput)
+ if err != nil {
+ return nil, fmt.Errorf("could not list clusters: %w", err)
+ }
+
+ clusters = append(clusters, resp.ClusterInfoList...)
+ if resp.NextToken == nil {
+ break
+ }
+ nextToken = resp.NextToken
+ }
+
+ return clusters, nil
+}
+
+// listNodes lists all nodes for the given clusters and returns a map of cluster ARN to its nodes.
+func (d *MSKDiscovery) listNodes(ctx context.Context, clusters []types.Cluster) (map[string][]types.NodeInfo, error) {
+ clusterNodeMap := make(map[string][]types.NodeInfo)
+ mu := sync.Mutex{}
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.SetLimit(d.cfg.RequestConcurrency)
+ for _, cluster := range clusters {
+ clusterARN := aws.ToString(cluster.ClusterArn)
+ errg.Go(func() error {
+ var clusterNodes []types.NodeInfo
+ var nextToken *string
+ for {
+ resp, err := d.msk.ListNodes(ectx, &kafka.ListNodesInput{
+ ClusterArn: aws.String(clusterARN),
+ MaxResults: aws.Int32(100),
+ NextToken: nextToken,
+ })
+ if err != nil {
+ return fmt.Errorf("could not list nodes for cluster %v: %w", clusterARN, err)
+ }
+
+ clusterNodes = append(clusterNodes, resp.NodeInfoList...)
+ if resp.NextToken == nil {
+ break
+ }
+ nextToken = resp.NextToken
+ }
+
+ mu.Lock()
+ clusterNodeMap[clusterARN] = clusterNodes
+ mu.Unlock()
+ return nil
+ })
+ }
+
+ return clusterNodeMap, errg.Wait()
+}
+
+func (d *MSKDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+ err := d.initMskClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ tg := &targetgroup.Group{
+ Source: d.cfg.Region,
+ }
+
+ var clusters []types.Cluster
+ if len(d.cfg.Clusters) > 0 {
+ clusters, err = d.describeClusters(ctx, d.cfg.Clusters)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ clusters, err = d.listClusters(ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ clusterNodeMap, err := d.listNodes(ctx, clusters)
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ targetsMu sync.Mutex
+ wg sync.WaitGroup
+ )
+ for _, cluster := range clusters {
+ wg.Add(1)
+
+ go func(cluster types.Cluster, nodes []types.NodeInfo) {
+ defer wg.Done()
+ for _, node := range nodes {
+ labels := model.LabelSet{
+ mskLabelClusterName: model.LabelValue(aws.ToString(cluster.ClusterName)),
+ mskLabelClusterARN: model.LabelValue(aws.ToString(cluster.ClusterArn)),
+ mskLabelClusterState: model.LabelValue(string(cluster.State)),
+ mskLabelClusterType: model.LabelValue(string(cluster.ClusterType)),
+ mskLabelClusterVersion: model.LabelValue(aws.ToString(cluster.CurrentVersion)),
+ mskLabelNodeARN: model.LabelValue(aws.ToString(node.NodeARN)),
+ mskLabelNodeAddedTime: model.LabelValue(aws.ToString(node.AddedToClusterTime)),
+ mskLabelNodeInstanceType: model.LabelValue(aws.ToString(node.InstanceType)),
+ mskLabelClusterJmxExporterEnabled: model.LabelValue(strconv.FormatBool(*cluster.Provisioned.OpenMonitoring.Prometheus.JmxExporter.EnabledInBroker)),
+ mskLabelClusterConfigurationARN: model.LabelValue(aws.ToString(cluster.Provisioned.CurrentBrokerSoftwareInfo.ConfigurationArn)),
+ mskLabelClusterConfigurationRevision: model.LabelValue(strconv.FormatInt(*cluster.Provisioned.CurrentBrokerSoftwareInfo.ConfigurationRevision, 10)),
+ mskLabelClusterKafkaVersion: model.LabelValue(aws.ToString(cluster.Provisioned.CurrentBrokerSoftwareInfo.KafkaVersion)),
+ }
+
+ for key, value := range cluster.Tags {
+ labels[model.LabelName(mskLabelClusterTags+strutil.SanitizeLabelName(key))] = model.LabelValue(value)
+ }
+
+ switch nodeType(node) {
+ case NodeTypeBroker:
+ labels[mskLabelNodeType] = model.LabelValue(NodeTypeBroker)
+ labels[mskLabelNodeAttachedENI] = model.LabelValue(aws.ToString(node.BrokerNodeInfo.AttachedENIId))
+ labels[mskLabelBrokerID] = model.LabelValue(fmt.Sprintf("%.0f", aws.ToFloat64(node.BrokerNodeInfo.BrokerId)))
+ labels[mskLabelBrokerClientSubnet] = model.LabelValue(aws.ToString(node.BrokerNodeInfo.ClientSubnet))
+ labels[mskLabelBrokerClientVPCIP] = model.LabelValue(aws.ToString(node.BrokerNodeInfo.ClientVpcIpAddress))
+ labels[mskLabelBrokerNodeExporterEnabled] = model.LabelValue(strconv.FormatBool(*cluster.Provisioned.OpenMonitoring.Prometheus.NodeExporter.EnabledInBroker))
+
+ for idx, endpoint := range node.BrokerNodeInfo.Endpoints {
+ endpointLabels := labels.Clone()
+ endpointLabels[mskLabelBrokerEndpointIndex] = model.LabelValue(strconv.Itoa(idx))
+ endpointLabels[model.AddressLabel] = model.LabelValue(net.JoinHostPort(endpoint, strconv.Itoa(d.cfg.Port)))
+
+ targetsMu.Lock()
+ tg.Targets = append(tg.Targets, endpointLabels)
+ targetsMu.Unlock()
+ }
+
+ case NodeTypeController:
+ labels[mskLabelNodeType] = model.LabelValue(NodeTypeController)
+
+ for idx, endpoint := range node.ControllerNodeInfo.Endpoints {
+ endpointLabels := labels.Clone()
+ endpointLabels[mskLabelControllerEndpointIndex] = model.LabelValue(strconv.Itoa(idx))
+ endpointLabels[model.AddressLabel] = model.LabelValue(net.JoinHostPort(endpoint, strconv.Itoa(d.cfg.Port)))
+
+ targetsMu.Lock()
+ tg.Targets = append(tg.Targets, endpointLabels)
+ targetsMu.Unlock()
+ }
+ default:
+ continue
+ }
+ }
+ }(cluster, clusterNodeMap[aws.ToString(cluster.ClusterArn)])
+ }
+ wg.Wait()
+
+ return []*targetgroup.Group{tg}, nil
+}
+
+func nodeType(node types.NodeInfo) NodeType {
+ if node.BrokerNodeInfo != nil {
+ return NodeTypeBroker
+ } else if node.ControllerNodeInfo != nil {
+ return NodeTypeController
+ }
+ return ""
+}
diff --git a/discovery/aws/msk_test.go b/discovery/aws/msk_test.go
new file mode 100644
index 0000000000..b1d48a7ea6
--- /dev/null
+++ b/discovery/aws/msk_test.go
@@ -0,0 +1,1131 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aws
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "testing"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/kafka"
+ "github.com/aws/aws-sdk-go-v2/service/kafka/types"
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+)
+
+// Struct for test data.
+type mskDataStore struct {
+ region string
+ clusters []types.Cluster
+ nodes map[string][]types.NodeInfo // keyed by cluster ARN
+}
+
+func TestMSKDiscoveryListClusters(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ mskData *mskDataStore
+ expected []types.Cluster
+ }{
+ {
+ name: "MultipleClusters",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("test-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ {
+ ClusterName: strptr("prod-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/prod-cluster/def-456"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ },
+ },
+ expected: []types.Cluster{
+ {
+ ClusterName: strptr("test-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ {
+ ClusterName: strptr("prod-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/prod-cluster/def-456"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ },
+ },
+ {
+ name: "SingleCluster",
+ mskData: &mskDataStore{
+ region: "us-east-1",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("single-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/single-cluster/xyz-789"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ },
+ },
+ expected: []types.Cluster{
+ {
+ ClusterName: strptr("single-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/single-cluster/xyz-789"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ },
+ },
+ {
+ name: "NoClusters",
+ mskData: &mskDataStore{
+ region: "us-east-1",
+ clusters: []types.Cluster{},
+ },
+ expected: nil,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ client := newMockMSKClient(tt.mskData)
+
+ d := &MSKDiscovery{
+ msk: client,
+ cfg: &MSKSDConfig{
+ Region: tt.mskData.region,
+ },
+ }
+
+ clusters, err := d.listClusters(ctx)
+ require.NoError(t, err)
+ require.Equal(t, tt.expected, clusters)
+ })
+ }
+}
+
+func TestMSKDiscoveryDescribeClusters(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ mskData *mskDataStore
+ clusterARNs []string
+ expected []types.Cluster
+ }{
+ {
+ name: "SingleCluster",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("test-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ CurrentVersion: strptr("1.2.3"),
+ Tags: map[string]string{
+ "Environment": "production",
+ "Team": "platform",
+ },
+ },
+ },
+ },
+ clusterARNs: []string{"arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"},
+ expected: []types.Cluster{
+ {
+ ClusterName: strptr("test-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ CurrentVersion: strptr("1.2.3"),
+ Tags: map[string]string{
+ "Environment": "production",
+ "Team": "platform",
+ },
+ },
+ },
+ },
+ {
+ name: "MultipleClusters",
+ mskData: &mskDataStore{
+ region: "us-east-1",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("cluster-1"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/cluster-1/xyz-789"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ {
+ ClusterName: strptr("cluster-2"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/cluster-2/def-456"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ Tags: map[string]string{
+ "Stage": "prod",
+ },
+ },
+ },
+ },
+ clusterARNs: []string{
+ "arn:aws:kafka:us-east-1:123456789012:cluster/cluster-1/xyz-789",
+ "arn:aws:kafka:us-east-1:123456789012:cluster/cluster-2/def-456",
+ },
+ expected: []types.Cluster{
+ {
+ ClusterName: strptr("cluster-1"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/cluster-1/xyz-789"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ },
+ {
+ ClusterName: strptr("cluster-2"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/cluster-2/def-456"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ Tags: map[string]string{
+ "Stage": "prod",
+ },
+ },
+ },
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ client := newMockMSKClient(tt.mskData)
+
+ d := &MSKDiscovery{
+ msk: client,
+ cfg: &MSKSDConfig{
+ Region: tt.mskData.region,
+ RequestConcurrency: 10,
+ },
+ }
+
+ clusters, err := d.describeClusters(ctx, tt.clusterARNs)
+ require.NoError(t, err)
+
+ // Sort clusters by ARN to handle non-deterministic ordering from goroutines
+ sort.Slice(clusters, func(i, j int) bool {
+ return aws.ToString(clusters[i].ClusterArn) < aws.ToString(clusters[j].ClusterArn)
+ })
+ sort.Slice(tt.expected, func(i, j int) bool {
+ return aws.ToString(tt.expected[i].ClusterArn) < aws.ToString(tt.expected[j].ClusterArn)
+ })
+
+ require.Equal(t, tt.expected, clusters)
+ })
+ }
+}
+
+func TestMSKDiscoveryListNodes(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ mskData *mskDataStore
+ clusters []types.Cluster
+ expected map[string][]types.NodeInfo
+ }{
+ {
+ name: "ClusterWithBrokers",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ ClientSubnet: strptr("subnet-12345"),
+ ClientVpcIpAddress: strptr("10.0.1.100"),
+ Endpoints: []string{"b-1.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-12345"),
+ },
+ },
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(2),
+ ClientSubnet: strptr("subnet-67890"),
+ ClientVpcIpAddress: strptr("10.0.1.101"),
+ Endpoints: []string{"b-2.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-67890"),
+ },
+ },
+ },
+ },
+ },
+ clusters: []types.Cluster{
+ {
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
+ },
+ },
+ expected: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ ClientSubnet: strptr("subnet-12345"),
+ ClientVpcIpAddress: strptr("10.0.1.100"),
+ Endpoints: []string{"b-1.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-12345"),
+ },
+ },
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(2),
+ ClientSubnet: strptr("subnet-67890"),
+ ClientVpcIpAddress: strptr("10.0.1.101"),
+ Endpoints: []string{"b-2.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-67890"),
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "ClusterWithNoNodes",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/empty-cluster/xyz-789": {},
+ },
+ },
+ clusters: []types.Cluster{
+ {
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/empty-cluster/xyz-789"),
+ },
+ },
+ expected: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/empty-cluster/xyz-789": nil,
+ },
+ },
+ {
+ name: "MultipleClusters",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/cluster-1/abc-123": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ },
+ },
+ },
+ "arn:aws:kafka:us-west-2:123456789012:cluster/cluster-2/def-456": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
+ InstanceType: strptr("kafka.m5.xlarge"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(2),
+ },
+ },
+ },
+ },
+ },
+ clusters: []types.Cluster{
+ {
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/cluster-1/abc-123"),
+ },
+ {
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/cluster-2/def-456"),
+ },
+ },
+ expected: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/cluster-1/abc-123": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ },
+ },
+ },
+ "arn:aws:kafka:us-west-2:123456789012:cluster/cluster-2/def-456": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
+ InstanceType: strptr("kafka.m5.xlarge"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(2),
+ },
+ },
+ },
+ },
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ client := newMockMSKClient(tt.mskData)
+
+ d := &MSKDiscovery{
+ msk: client,
+ cfg: &MSKSDConfig{
+ Region: tt.mskData.region,
+ RequestConcurrency: 10,
+ },
+ }
+
+ nodes, err := d.listNodes(ctx, tt.clusters)
+ require.NoError(t, err)
+ require.Equal(t, tt.expected, nodes)
+ })
+ }
+}
+
+func TestMSKDiscoveryRefresh(t *testing.T) {
+ ctx := context.Background()
+
+ tests := []struct {
+ name string
+ mskData *mskDataStore
+ config *MSKSDConfig
+ expected []*targetgroup.Group
+ }{
+ {
+ name: "ClusterWithBrokersUsingClustersConfig",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("test-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ CurrentVersion: strptr("1.2.3"),
+ Tags: map[string]string{
+ "Environment": "production",
+ "Team": "platform",
+ },
+ Provisioned: &types.Provisioned{
+ CurrentBrokerSoftwareInfo: &types.BrokerSoftwareInfo{
+ ConfigurationArn: strptr("arn:aws:kafka:us-west-2:123456789012:configuration/my-config/abc-123"),
+ ConfigurationRevision: aws.Int64(1),
+ KafkaVersion: strptr("2.8.1"),
+ },
+ OpenMonitoring: &types.OpenMonitoringInfo{
+ Prometheus: &types.PrometheusInfo{
+ JmxExporter: &types.JmxExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ NodeExporter: &types.NodeExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ },
+ },
+ },
+ },
+ },
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ ClientSubnet: strptr("subnet-12345"),
+ ClientVpcIpAddress: strptr("10.0.1.100"),
+ Endpoints: []string{"b-1.test-cluster.abc123.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-12345"),
+ },
+ },
+ },
+ },
+ },
+ config: &MSKSDConfig{
+ Region: "us-west-2",
+ Port: 80,
+ RequestConcurrency: 10,
+ Clusters: []string{"arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"},
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-west-2",
+ Targets: []model.LabelSet{
+ {
+ model.AddressLabel: model.LabelValue("b-1.test-cluster.abc123.kafka.us-west-2.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("test-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:cluster/test-cluster/abc-123"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("1.2.3"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:configuration/my-config/abc-123"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("2.8.1"),
+ "__meta_msk_cluster_tag_Environment": model.LabelValue("production"),
+ "__meta_msk_cluster_tag_Team": model.LabelValue("platform"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-01-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-12345"),
+ "__meta_msk_broker_id": model.LabelValue("1"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-12345"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.1.100"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("0"),
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "NoClustersWithEmptyClustersConfig",
+ mskData: &mskDataStore{
+ region: "us-east-1",
+ clusters: []types.Cluster{},
+ },
+ config: &MSKSDConfig{
+ Region: "us-east-1",
+ Port: 80,
+ RequestConcurrency: 10,
+ Clusters: []string{}, // Empty clusters list uses listClusters
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-east-1",
+ },
+ },
+ },
+ {
+ name: "ClusterWithBrokersUsingListClusters",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("auto-discovered-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/auto-discovered-cluster/xyz-123"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ CurrentVersion: strptr("1.0.0"),
+ Provisioned: &types.Provisioned{
+ CurrentBrokerSoftwareInfo: &types.BrokerSoftwareInfo{
+ ConfigurationArn: strptr("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ ConfigurationRevision: aws.Int64(1),
+ KafkaVersion: strptr("3.3.1"),
+ },
+ OpenMonitoring: &types.OpenMonitoringInfo{
+ Prometheus: &types.PrometheusInfo{
+ JmxExporter: &types.JmxExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ NodeExporter: &types.NodeExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ },
+ },
+ },
+ },
+ },
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/auto-discovered-cluster/xyz-123": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-auto"),
+ AddedToClusterTime: strptr("2023-01-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ ClientSubnet: strptr("subnet-auto"),
+ ClientVpcIpAddress: strptr("10.0.1.200"),
+ Endpoints: []string{"b-auto.cluster.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-auto"),
+ },
+ },
+ },
+ },
+ },
+ config: &MSKSDConfig{
+ Region: "us-west-2",
+ Port: 80,
+ RequestConcurrency: 10,
+ Clusters: nil, // nil clusters list uses listClusters (backward compatibility)
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-west-2",
+ Targets: []model.LabelSet{
+ {
+ model.AddressLabel: model.LabelValue("b-auto.cluster.kafka.us-west-2.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("auto-discovered-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:cluster/auto-discovered-cluster/xyz-123"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("1.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.3.1"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:node/broker-auto"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-01-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-auto"),
+ "__meta_msk_broker_id": model.LabelValue("1"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-auto"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.1.200"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("0"),
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "ClusterWithBrokersAndControllersUsingClustersConfig",
+ mskData: &mskDataStore{
+ region: "us-west-2",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("kraft-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ CurrentVersion: strptr("1.0.0"),
+ Tags: map[string]string{
+ "Type": "kraft",
+ },
+ Provisioned: &types.Provisioned{
+ CurrentBrokerSoftwareInfo: &types.BrokerSoftwareInfo{
+ ConfigurationArn: strptr("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ ConfigurationRevision: aws.Int64(2),
+ KafkaVersion: strptr("3.3.1"),
+ },
+ OpenMonitoring: &types.OpenMonitoringInfo{
+ Prometheus: &types.PrometheusInfo{
+ JmxExporter: &types.JmxExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ NodeExporter: &types.NodeExporterInfo{
+ EnabledInBroker: aws.Bool(false),
+ },
+ },
+ },
+ },
+ },
+ },
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ AddedToClusterTime: strptr("2023-06-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(1),
+ ClientSubnet: strptr("subnet-abc123"),
+ ClientVpcIpAddress: strptr("10.0.2.100"),
+ Endpoints: []string{"b-1.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-broker-1"),
+ },
+ },
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
+ AddedToClusterTime: strptr("2023-06-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(2),
+ ClientSubnet: strptr("subnet-abc124"),
+ ClientVpcIpAddress: strptr("10.0.2.101"),
+ Endpoints: []string{"b-2.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com"},
+ AttachedENIId: strptr("eni-broker-2"),
+ },
+ },
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/controller-1"),
+ AddedToClusterTime: strptr("2023-06-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ ControllerNodeInfo: &types.ControllerNodeInfo{
+ Endpoints: []string{"c-1.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com"},
+ },
+ },
+ {
+ NodeARN: strptr("arn:aws:kafka:us-west-2:123456789012:node/controller-2"),
+ AddedToClusterTime: strptr("2023-06-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ ControllerNodeInfo: &types.ControllerNodeInfo{
+ Endpoints: []string{"c-2.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com"},
+ },
+ },
+ },
+ },
+ },
+ config: &MSKSDConfig{
+ Region: "us-west-2",
+ Port: 80,
+ RequestConcurrency: 10,
+ Clusters: []string{"arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"},
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-west-2",
+ Targets: []model.LabelSet{
+ {
+ model.AddressLabel: model.LabelValue("b-1.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("kraft-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("1.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("2"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.3.1"),
+ "__meta_msk_cluster_tag_Type": model.LabelValue("kraft"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:node/broker-1"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-06-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-broker-1"),
+ "__meta_msk_broker_id": model.LabelValue("1"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-abc123"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.2.100"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("false"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("0"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("b-2.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("kraft-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("1.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("2"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.3.1"),
+ "__meta_msk_cluster_tag_Type": model.LabelValue("kraft"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:node/broker-2"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-06-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-broker-2"),
+ "__meta_msk_broker_id": model.LabelValue("2"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-abc124"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.2.101"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("false"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("0"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("c-1.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("kraft-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("1.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("2"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.3.1"),
+ "__meta_msk_cluster_tag_Type": model.LabelValue("kraft"),
+ "__meta_msk_node_type": model.LabelValue("CONTROLLER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:node/controller-1"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-06-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_controller_endpoint_index": model.LabelValue("0"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("c-2.kraft-cluster.xyz789.kafka.us-west-2.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("kraft-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:cluster/kraft-cluster/xyz-789"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("1.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:configuration/config/xyz"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("2"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.3.1"),
+ "__meta_msk_cluster_tag_Type": model.LabelValue("kraft"),
+ "__meta_msk_node_type": model.LabelValue("CONTROLLER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-west-2:123456789012:node/controller-2"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-06-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_controller_endpoint_index": model.LabelValue("0"),
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "NodesWithMultipleEndpointsUsingClustersConfig",
+ mskData: &mskDataStore{
+ region: "us-east-1",
+ clusters: []types.Cluster{
+ {
+ ClusterName: strptr("multi-endpoint-cluster"),
+ ClusterArn: strptr("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ State: types.ClusterStateActive,
+ ClusterType: types.ClusterTypeProvisioned,
+ CurrentVersion: strptr("2.0.0"),
+ Provisioned: &types.Provisioned{
+ CurrentBrokerSoftwareInfo: &types.BrokerSoftwareInfo{
+ ConfigurationArn: strptr("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ ConfigurationRevision: aws.Int64(1),
+ KafkaVersion: strptr("3.4.0"),
+ },
+ OpenMonitoring: &types.OpenMonitoringInfo{
+ Prometheus: &types.PrometheusInfo{
+ JmxExporter: &types.JmxExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ NodeExporter: &types.NodeExporterInfo{
+ EnabledInBroker: aws.Bool(true),
+ },
+ },
+ },
+ },
+ },
+ },
+ nodes: map[string][]types.NodeInfo{
+ "arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999": {
+ {
+ NodeARN: strptr("arn:aws:kafka:us-east-1:123456789012:node/broker-multi"),
+ AddedToClusterTime: strptr("2023-08-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.xlarge"),
+ BrokerNodeInfo: &types.BrokerNodeInfo{
+ BrokerId: aws.Float64(3),
+ ClientSubnet: strptr("subnet-multi-1"),
+ ClientVpcIpAddress: strptr("10.0.3.50"),
+ // Multiple endpoints for this broker
+ Endpoints: []string{"b-3-1.cluster.kafka.us-east-1.amazonaws.com", "b-3-2.cluster.kafka.us-east-1.amazonaws.com", "b-3-3.cluster.kafka.us-east-1.amazonaws.com"},
+ AttachedENIId: strptr("eni-multi-broker"),
+ },
+ },
+ {
+ NodeARN: strptr("arn:aws:kafka:us-east-1:123456789012:node/controller-multi"),
+ AddedToClusterTime: strptr("2023-08-01T00:00:00Z"),
+ InstanceType: strptr("kafka.m5.large"),
+ ControllerNodeInfo: &types.ControllerNodeInfo{
+ // Multiple endpoints for this controller
+ Endpoints: []string{"c-1-1.cluster.kafka.us-east-1.amazonaws.com", "c-1-2.cluster.kafka.us-east-1.amazonaws.com", "c-1-3.cluster.kafka.us-east-1.amazonaws.com", "c-1-4.cluster.kafka.us-east-1.amazonaws.com"},
+ },
+ },
+ },
+ },
+ },
+ config: &MSKSDConfig{
+ Region: "us-east-1",
+ Port: 80,
+ RequestConcurrency: 10,
+ Clusters: []string{"arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"},
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-east-1",
+ Targets: []model.LabelSet{
+ // Broker with 3 endpoints - creates 3 targets with different endpoint indices
+ {
+ model.AddressLabel: model.LabelValue("b-3-1.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/broker-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.xlarge"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-multi-broker"),
+ "__meta_msk_broker_id": model.LabelValue("3"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-multi-1"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.3.50"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("0"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("b-3-2.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/broker-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.xlarge"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-multi-broker"),
+ "__meta_msk_broker_id": model.LabelValue("3"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-multi-1"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.3.50"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("1"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("b-3-3.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("BROKER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/broker-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.xlarge"),
+ "__meta_msk_node_attached_eni": model.LabelValue("eni-multi-broker"),
+ "__meta_msk_broker_id": model.LabelValue("3"),
+ "__meta_msk_broker_client_subnet": model.LabelValue("subnet-multi-1"),
+ "__meta_msk_broker_client_vpc_ip": model.LabelValue("10.0.3.50"),
+ "__meta_msk_broker_node_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_broker_endpoint_index": model.LabelValue("2"),
+ },
+ // Controller with 4 endpoints - creates 4 targets with different endpoint indices
+ {
+ model.AddressLabel: model.LabelValue("c-1-1.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("CONTROLLER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/controller-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_controller_endpoint_index": model.LabelValue("0"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("c-1-2.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("CONTROLLER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/controller-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_controller_endpoint_index": model.LabelValue("1"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("c-1-3.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("CONTROLLER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/controller-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_controller_endpoint_index": model.LabelValue("2"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("c-1-4.cluster.kafka.us-east-1.amazonaws.com:80"),
+ "__meta_msk_cluster_name": model.LabelValue("multi-endpoint-cluster"),
+ "__meta_msk_cluster_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:cluster/multi-endpoint-cluster/abc-999"),
+ "__meta_msk_cluster_state": model.LabelValue("ACTIVE"),
+ "__meta_msk_cluster_type": model.LabelValue("PROVISIONED"),
+ "__meta_msk_cluster_version": model.LabelValue("2.0.0"),
+ "__meta_msk_cluster_jmx_exporter_enabled": model.LabelValue("true"),
+ "__meta_msk_cluster_configuration_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:configuration/config/abc"),
+ "__meta_msk_cluster_configuration_revision": model.LabelValue("1"),
+ "__meta_msk_cluster_kafka_version": model.LabelValue("3.4.0"),
+ "__meta_msk_node_type": model.LabelValue("CONTROLLER"),
+ "__meta_msk_node_arn": model.LabelValue("arn:aws:kafka:us-east-1:123456789012:node/controller-multi"),
+ "__meta_msk_node_added_time": model.LabelValue("2023-08-01T00:00:00Z"),
+ "__meta_msk_node_instance_type": model.LabelValue("kafka.m5.large"),
+ "__meta_msk_controller_endpoint_index": model.LabelValue("3"),
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ client := newMockMSKClient(tt.mskData)
+
+ config := tt.config
+ if config == nil {
+ // Default config for backward compatibility
+ config = &MSKSDConfig{
+ Region: tt.mskData.region,
+ Port: 80,
+ RequestConcurrency: 10,
+ }
+ }
+
+ d := &MSKDiscovery{
+ msk: client,
+ cfg: config,
+ }
+
+ groups, err := d.refresh(ctx)
+ require.NoError(t, err)
+
+ // Sort targets within each group by address to handle non-deterministic ordering from goroutines
+ for _, group := range groups {
+ if len(group.Targets) > 0 {
+ sort.Slice(group.Targets, func(i, j int) bool {
+ return string(group.Targets[i][model.AddressLabel]) < string(group.Targets[j][model.AddressLabel])
+ })
+ }
+ }
+ for _, group := range tt.expected {
+ if len(group.Targets) > 0 {
+ sort.Slice(group.Targets, func(i, j int) bool {
+ return string(group.Targets[i][model.AddressLabel]) < string(group.Targets[j][model.AddressLabel])
+ })
+ }
+ }
+
+ require.Equal(t, tt.expected, groups)
+ })
+ }
+}
+
+func TestNodeType(t *testing.T) {
+ tests := []struct {
+ name string
+ node types.NodeInfo
+ expected NodeType
+ }{
+ {
+ name: "BrokerNode",
+ node: types.NodeInfo{
+ BrokerNodeInfo: &types.BrokerNodeInfo{},
+ },
+ expected: NodeTypeBroker,
+ },
+ {
+ name: "ControllerNode",
+ node: types.NodeInfo{
+ ControllerNodeInfo: &types.ControllerNodeInfo{},
+ },
+ expected: NodeTypeController,
+ },
+ {
+ name: "UnknownNode",
+ node: types.NodeInfo{},
+ expected: "",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := nodeType(tt.node)
+ require.Equal(t, tt.expected, result)
+ })
+ }
+}
+
+// MSK client mock.
+type mockMSKClient struct {
+ mskData mskDataStore
+}
+
+func newMockMSKClient(mskData *mskDataStore) *mockMSKClient {
+ return &mockMSKClient{
+ mskData: *mskData,
+ }
+}
+
+func (m *mockMSKClient) DescribeClusterV2(_ context.Context, input *kafka.DescribeClusterV2Input, _ ...func(*kafka.Options)) (*kafka.DescribeClusterV2Output, error) {
+ inputARN := aws.ToString(input.ClusterArn)
+ for i := range m.mskData.clusters {
+ cluster := &m.mskData.clusters[i]
+ if aws.ToString(cluster.ClusterArn) == inputARN {
+ return &kafka.DescribeClusterV2Output{
+ ClusterInfo: cluster,
+ }, nil
+ }
+ }
+
+ return nil, fmt.Errorf("cluster not found: %s", inputARN)
+}
+
+func (m *mockMSKClient) ListClustersV2(_ context.Context, input *kafka.ListClustersV2Input, _ ...func(*kafka.Options)) (*kafka.ListClustersV2Output, error) {
+ var clusters []types.Cluster
+
+ for _, cluster := range m.mskData.clusters {
+ // Apply cluster name filter if specified
+ if input.ClusterNameFilter != nil && *input.ClusterNameFilter != "" {
+ if cluster.ClusterName != nil && *cluster.ClusterName != *input.ClusterNameFilter {
+ continue
+ }
+ }
+
+ // Apply cluster type filter if specified
+ if input.ClusterTypeFilter != nil && *input.ClusterTypeFilter != "" {
+ if string(cluster.ClusterType) != *input.ClusterTypeFilter {
+ continue
+ }
+ }
+
+ clusters = append(clusters, cluster)
+ }
+
+ return &kafka.ListClustersV2Output{
+ ClusterInfoList: clusters,
+ }, nil
+}
+
+func (m *mockMSKClient) ListNodes(_ context.Context, input *kafka.ListNodesInput, _ ...func(*kafka.Options)) (*kafka.ListNodesOutput, error) {
+ clusterARN := aws.ToString(input.ClusterArn)
+ nodes, exists := m.mskData.nodes[clusterARN]
+ if !exists {
+ return &kafka.ListNodesOutput{
+ NodeInfoList: nil,
+ }, nil
+ }
+
+ return &kafka.ListNodesOutput{
+ NodeInfoList: nodes,
+ }, nil
+}
diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go
index a68a7c9a43..b4bba381a4 100644
--- a/discovery/kubernetes/kubernetes_test.go
+++ b/discovery/kubernetes/kubernetes_test.go
@@ -17,6 +17,7 @@ import (
"context"
"encoding/json"
"errors"
+ "os"
"testing"
"time"
@@ -42,6 +43,14 @@ import (
)
func TestMain(m *testing.M) {
+ // Disable the WatchListClient feature gate that is enabled by default in
+ // client-go v0.35.0+. The WatchList flow requires the server to support
+ // SendInitialEvents and to send a bookmark event with the
+ // "k8s.io/initial-events-end" annotation. The fake clientset used in tests
+ // does not support this protocol, causing informers to hang indefinitely
+ // waiting for the bookmark. Disabling this feature restores the traditional
+ // List+Watch flow which is compatible with the fake clientset.
+ os.Setenv("KUBE_FEATURE_WatchListClient", "false")
testutil.TolerantVerifyLeak(m)
}
@@ -52,7 +61,7 @@ func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime
// makeDiscoveryWithVersion creates a kubernetes.Discovery instance with the specified kubernetes version for testing.
func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer string, objects ...runtime.Object) (*Discovery, kubernetes.Interface) {
- clientset := fake.NewSimpleClientset(objects...)
+ clientset := fake.NewClientset(objects...)
fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery)
fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: k8sVer}
diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md
index d4a8cd4f20..251fdfd6a4 100644
--- a/docs/command-line/prometheus.md
+++ b/docs/command-line/prometheus.md
@@ -59,7 +59,7 @@ The Prometheus monitoring server
| --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
-| --enable-feature ... | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io, promql-extended-range-selectors. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
+| --enable-feature ... | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io, promql-extended-range-selectors, promql-binop-fill-modifiers. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| --agent | Run Prometheus in 'Agent mode'. | |
| --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` |
diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md
index f6737bc37f..e8ffa75aaa 100644
--- a/docs/command-line/promtool.md
+++ b/docs/command-line/promtool.md
@@ -12,7 +12,7 @@ Tooling for the Prometheus monitoring system.
| -h, --help | Show context-sensitive help (also try --help-long and --help-man). |
| --version | Show application version. |
| --experimental | Enable experimental commands. |
-| --enable-feature ... | Comma separated feature names to enable. Valid options: promql-experimental-functions, promql-delayed-name-removal. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details |
+| --enable-feature ... | Comma separated feature names to enable. Valid options: promql-experimental-functions, promql-delayed-name-removal, promql-duration-expr, promql-extended-range-selectors. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details |
diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md
index 4079daae02..49b7774b5f 100644
--- a/docs/configuration/configuration.md
+++ b/docs/configuration/configuration.md
@@ -984,11 +984,56 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_ecs_tag_task_`: each task tag value, keyed by tag name
* `__meta_ecs_tag_ec2_`: each EC2 instance tag value, keyed by tag name (EC2 launch type only)
+#### `msk`
+
+The `msk` role discovers targets from AWS MSK (Managed Streaming for Apache Kafka) provisioned clusters.
+
+**Important**: This service discovery only works with **provisioned clusters**. Serverless clusters are not supported as they do not expose individual broker nodes.
+
+Discovery includes:
+- **Broker nodes**: Kafka broker instances (supports both ZooKeeper-based and KRaft-based clusters)
+- **KRaft Controller nodes**: Controller instances (KRaft-based clusters only)
+
+Note: ZooKeeper nodes are not discoverable via the MSK API. For monitoring, MSK provides:
+- **JMX Exporter**: Available on both broker and KRaft controller nodes (when enabled)
+- **Node Exporter**: Available on broker nodes only (when enabled)
+
+The IAM credentials used must have the following permissions to discover
+scrape targets:
+
+- `kafka:DescribeClusterV2`
+- `kafka:ListClustersV2`
+- `kafka:ListNodes`
+
+The following meta labels are available on targets during [relabeling](#relabel_config):
+
+* `__meta_msk_cluster_name`: the name of the MSK cluster
+* `__meta_msk_cluster_arn`: the ARN of the MSK cluster
+* `__meta_msk_cluster_state`: the state of the MSK cluster (e.g., ACTIVE, CREATING, DELETING)
+* `__meta_msk_cluster_type`: the type of the MSK cluster (e.g., PROVISIONED, SERVERLESS)
+* `__meta_msk_cluster_version`: the current version of the MSK cluster
+* `__meta_msk_cluster_kafka_version`: the Kafka version running on the cluster
+* `__meta_msk_cluster_jmx_exporter_enabled`: whether JMX exporter is enabled on the cluster
+* `__meta_msk_cluster_configuration_arn`: the ARN of the MSK configuration
+* `__meta_msk_cluster_configuration_revision`: the revision of the MSK configuration
+* `__meta_msk_cluster_tag_`: each cluster tag value, keyed by tag name
+* `__meta_msk_node_type`: the type of the node (BROKER or CONTROLLER)
+* `__meta_msk_node_arn`: the ARN of the node
+* `__meta_msk_node_added_time`: the time the node was added to the cluster
+* `__meta_msk_node_instance_type`: the instance type of the node
+* `__meta_msk_node_attached_eni`: the ID of the attached ENI
+* `__meta_msk_broker_id`: the broker ID (broker nodes only)
+* `__meta_msk_broker_endpoint_index`: the index of the broker endpoint (broker nodes only)
+* `__meta_msk_broker_client_subnet`: the client subnet of the broker (broker nodes only)
+* `__meta_msk_broker_client_vpc_ip`: the VPC IP address of the broker (broker nodes only)
+* `__meta_msk_broker_node_exporter_enabled`: whether node exporter is enabled on brokers (broker nodes only)
+* `__meta_msk_controller_endpoint_index`: the index of the controller endpoint (controller nodes only)
+
See below for the configuration options for AWS discovery:
```yaml
# The AWS role to use for service discovery.
-# Must be one of: ec2, lightsail, or ecs.
+# Must be one of: ec2, lightsail, ecs, or msk.
role:
# The AWS region. If blank, the region from the instance metadata is used.
@@ -1024,7 +1069,7 @@ filters:
[ - name:
values: , [...] ]
-# List of ECS cluster ARNs to discover (ecs role only). If empty, all clusters in the region are discovered.
+# List of ECS or MSK cluster ARNs (ecs and msk roles only) to discover. If empty, all clusters in the region are discovered.
# This can significantly improve performance when you only need to monitor specific clusters.
[ clusters: [, ...] ]
@@ -2483,8 +2528,7 @@ in the configuration file), which can also be changed using relabeling.
### ``
-Nerve SD configurations allow retrieving scrape targets from [AirBnB's Nerve]
-(https://github.com/airbnb/nerve) which are stored in
+Nerve SD configurations allow retrieving scrape targets from [AirBnB's Nerve](https://github.com/airbnb/nerve) which are stored in
[Zookeeper](https://zookeeper.apache.org/).
The following meta labels are available on targets during [relabeling](#relabel_config):
@@ -2538,8 +2582,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
### ``
-Serverset SD configurations allow retrieving scrape targets from [Serversets]
-(https://github.com/twitter/finagle/tree/develop/finagle-serversets) which are
+Serverset SD configurations allow retrieving scrape targets from [Serversets](https://github.com/twitter/finagle/tree/develop/finagle-serversets) which are
stored in [Zookeeper](https://zookeeper.apache.org/). Serversets are commonly
used by [Finagle](https://twitter.github.io/finagle/) and
[Aurora](https://aurora.apache.org/).
@@ -2973,6 +3016,11 @@ labels:
[ : ... ]
```
+The special labels mentioned in the [relabeling](#relabel_config) section can also be
+used here to override the respective settings in the scrape configuration. This is
+especially useful when combined with any of the service discovery mechanisms that do not
+support these settings directly.
+
### ``
Relabeling is a powerful tool to dynamically rewrite the label set of a target before
@@ -2982,6 +3030,11 @@ in the configuration file.
Initially, aside from the configured per-target labels, a target's `job`
label is set to the `job_name` value of the respective scrape configuration.
+
+You can also use special labels like `__address__`, `__scheme__`, `__metrics_path__`,
+`__scrape_interval__`, `__scrape_timeout__` to customize the defined targets. These will
+override the respective settings in the scrape configuration.
+
The `__address__` label is set to the `:` address of the target.
After relabeling, the `instance` label is set to the value of `__address__` by default if
it was not set during relabeling.
@@ -3496,6 +3549,19 @@ with this feature.
# to the timestamp of the last appended sample for the same series.
[ out_of_order_time_window: | default = 0s ]
+# Configures the trigger point for compacting the stale series from the memory into persistent blocks
+# and remove those stale series from the memory.
+#
+# The threshold is a number between 0.0 and 1.0. It represents the ratio of stale series in the memory
+# to the total series in the memory. The stale series compaction is triggered when this ratio crosses
+# the configured threshold. It may not trigger the stale series compaction if the usual head compaction
+# is about to happen soon.
+#
+# If set to 0, stale series compaction is disabled.
+#
+# This is an experimental feature, this behaviour could change or be removed in the future.
+[ stale_series_compaction_threshold: | default = 0 ]
+
# Configures data retention settings for TSDB.
#
diff --git a/docs/feature_flags.md b/docs/feature_flags.md
index af08eebb45..247941c5ce 100644
--- a/docs/feature_flags.md
+++ b/docs/feature_flags.md
@@ -67,12 +67,12 @@ Currently, Prometheus supports start timestamps on the
* `PrometheusProto`
* `OpenMetrics1.0.0`
-
+
From the above, Prometheus recommends `PrometheusProto`. This is because OpenMetrics 1.0 Start Timestamp information is shared as a `_created` metric and parsing those
are prone to errors and expensive (thus, adding an overhead). You also need to be careful to not pollute your Prometheus with extra `_created` metrics.
-
-Therefore, when `created-timestamp-zero-ingestion` is enabled Prometheus changes the global `scrape_protocols` default configuration option to
+
+Therefore, when `created-timestamp-zero-ingestion` is enabled Prometheus changes the global `scrape_protocols` default configuration option to
`[ PrometheusProto, OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText0.0.4 ]`, resulting in negotiating the Prometheus Protobuf protocol first (unless the `scrape_protocols` option is set to a different value explicitly).
Besides enabling this feature in Prometheus, start timestamps need to be exposed by the application being scraped.
@@ -288,8 +288,8 @@ when wrong types are used on wrong functions, automatic renames, delta types and
### Behavior with metadata records
-When this feature is enabled and the metadata WAL records exists, in an unlikely situation when type or unit are different across those,
-the Prometheus outputs intends to prefer the `__type__` and `__unit__` labels values. For example on Remote Write 2.0,
+When this feature is enabled and the metadata WAL records exists, in an unlikely situation when type or unit are different across those,
+the Prometheus outputs intends to prefer the `__type__` and `__unit__` labels values. For example on Remote Write 2.0,
if the metadata record somehow (e.g. due to bug) says "counter", but `__type__="gauge"` the remote time series will be set to a gauge.
## Use Uncached IO
@@ -338,9 +338,25 @@ Example query:
> **Note for alerting and recording rules:**
> The `smoothed` modifier requires samples after the evaluation interval, so using it directly in alerting or recording rules will typically *under-estimate* the result, as future samples are not available at evaluation time.
-> To use `smoothed` safely in rules, you **must** apply a `query_offset` to the rule group (see [documentation](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule_group)) to ensure the calculation window is fully in the past and all needed samples are available.
+> To use `smoothed` safely in rules, you **must** apply a `query_offset` to the rule group (see [documentation](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule_group)) to ensure the calculation window is fully in the past and all needed samples are available.
> For critical alerting, set the offset to at least one scrape interval; for less critical or more resilient use cases, consider a larger offset (multiple scrape intervals) to tolerate missed scrapes.
For more details, see the [design doc](https://github.com/prometheus/proposals/blob/main/proposals/2025-04-04_extended-range-selectors-semantics.md).
**Note**: Extended Range Selectors are not supported for subqueries.
+
+## Binary operator fill modifiers
+
+`--enable-feature=promql-binop-fill-modifiers`
+
+Enables experimental `fill()`, `fill_left()`, and `fill_right()` modifiers for PromQL binary operators. These modifiers allow filling in missing matches on either side of a binary operation with a provided default sample value.
+
+Example query:
+
+```
+ rate(successful_requests[5m])
++ fill(0)
+ rate(failed_requests[5m])
+```
+
+See [the fill modifiers documentation](querying/operators.md#filling-in-missing-matches) for more details and examples.
diff --git a/docs/prometheus_agent.md b/docs/prometheus_agent.md
index 468b5565d1..0d8c3fa94a 100644
--- a/docs/prometheus_agent.md
+++ b/docs/prometheus_agent.md
@@ -20,8 +20,8 @@ In essence, it looks like this:
### Benefits of agent mode
-- Improved efficency. The customized Agent TSDB WAL removes the data immediately after successful writes. If it cannot reach the remote endpoint, it persists the data temporarily on the disk until the remote endpoint is back online. This is currently limited to a two-hour buffer only, similar to non-agent Prometheus. This means that there is no need to build chunks of data in memory or maintain a full index for querying purposes. Essentially the Agent mode uses a fraction of the resources that a normal Prometheus server would use in a similar situation.
-- Agent mode eables easier [horizontal scalability for ingestion](https://prometheus.io/blog/2021/11/16/agent/#the-dream-auto-scalable-metric-ingestion).
+- Improved efficiency. The customized Agent TSDB WAL removes the data immediately after successful writes. If it cannot reach the remote endpoint, it persists the data temporarily on the disk until the remote endpoint is back online. This is currently limited to a two-hour buffer only, similar to non-agent Prometheus. This means that there is no need to build chunks of data in memory or maintain a full index for querying purposes. Essentially the Agent mode uses a fraction of the resources that a normal Prometheus server would use in a similar situation.
+- Agent mode enables easier [horizontal scalability for ingestion](https://prometheus.io/blog/2021/11/16/agent/#the-dream-auto-scalable-metric-ingestion).
### Downsides of agent mode
diff --git a/docs/querying/api.md b/docs/querying/api.md
index 4891db8980..78574ec103 100644
--- a/docs/querying/api.md
+++ b/docs/querying/api.md
@@ -6,6 +6,22 @@ sort_rank: 7
The current stable HTTP API is reachable under `/api/v1` on a Prometheus
server. Any non-breaking additions will be added under that endpoint.
+## OpenAPI Specification
+
+An OpenAPI specification for the HTTP API is available at `/api/v1/openapi.yaml`.
+By default, it returns OpenAPI 3.1 for broader compatibility. Use `?openapi_version=3.2`
+for OpenAPI 3.2, which includes advanced features and endpoints like `/api/v1/notifications/live`.
+
+This machine-readable specification describes all available endpoints, request parameters,
+response formats, and schemas.
+
+The OpenAPI specification can be used to:
+
+- Generate client libraries in various programming languages.
+- Validate API requests and responses.
+- Generate interactive API documentation.
+- Test API endpoints.
+
## Format overview
The API response format is JSON. Every successful API request returns a `2xx`
@@ -1013,6 +1029,7 @@ curl http://localhost:9090/api/v1/alerts
## Querying target metadata
The following endpoint returns metadata about metrics currently scraped from targets.
+The endpoint has the limitation that only metadata scraped from targets directly is returned, metadata sent over Remote-Write or OTLP to Prometheus is not included in this endpoint and will not show up on the UI in "Explore Metrics".
This is **experimental** and might change in the future.
```
diff --git a/docs/querying/functions.md b/docs/querying/functions.md
index 0cae149dd7..3a9b7025f8 100644
--- a/docs/querying/functions.md
+++ b/docs/querying/functions.md
@@ -568,6 +568,8 @@ While `info` normally automatically finds all matching info series, it's possibl
restrict them by providing a `__name__` label matcher, e.g.
`{__name__="target_info"}`.
+Note that if there are any time series in `v` that match the `data-label-selector` (or the default `target_info` if that argument is not specified), they will be treated as info series and will be returned unchanged.
+
### Limitations
In its current iteration, `info` defaults to considering only info series with
diff --git a/docs/querying/operators.md b/docs/querying/operators.md
index b320d8e86e..b15c02aedc 100644
--- a/docs/querying/operators.md
+++ b/docs/querying/operators.md
@@ -47,9 +47,9 @@ special values like `NaN`, `+Inf`, and `-Inf`.
scalar that is the result of the operator applied to both scalar operands.
**Between an instant vector and a scalar**, the operator is applied to the
-value of every data sample in the vector.
+value of every data sample in the vector.
-If the data sample is a float, the operation is performed between that float and the scalar.
+If the data sample is a float, the operation is performed between that float and the scalar.
For example, if an instant vector of float samples is multiplied by 2,
the result is another vector of float samples in which every sample value of
the original vector is multiplied by 2.
@@ -81,8 +81,9 @@ following:
**Between two instant vectors**, a binary arithmetic operator is applied to
each entry in the LHS vector and its [matching element](#vector-matching) in
the RHS vector. The result is propagated into the result vector with the
-grouping labels becoming the output label set. Entries for which no matching
-entry in the right-hand vector can be found are not part of the result.
+grouping labels becoming the output label set. By default, series for which
+no matching entry in the opposite vector can be found are not part of the
+result. This behavior can be adjusted using [fill modifiers](#filling-in-missing-matches).
If two float samples are matched, the arithmetic operator is applied to the two
input values.
@@ -97,7 +98,7 @@ If two histogram samples are matched, only `+` and `-` are valid operations,
each adding or subtracting all matching bucket populations and the count and
the sum of observations. All other operations result in the removal of the
corresponding element from the output vector, flagged by an info-level
-annotation. The `+` and -` operations should generally only be applied to gauge
+annotation. The `+` and `-` operations should generally only be applied to gauge
histograms, but PromQL allows them for counter histograms, too, to cover
specific use cases, for which special attention is required to avoid problems
with unaligned counter resets. (Certain incompatibilities of counter resets can
@@ -106,7 +107,7 @@ two counter histograms results in a counter histogram. All other combination of
operands and all subtractions result in a gauge histogram.
**In any arithmetic binary operation involving vectors**, the metric name is
-dropped. This occurs even if `__name__` is explicitly mentioned in `on`
+dropped. This occurs even if `__name__` is explicitly mentioned in `on`
(see https://github.com/prometheus/prometheus/issues/16631 for further discussion).
**For any arithmetic binary operation that may result in a negative
@@ -156,9 +157,9 @@ info-level annotation.
applied to matching entries. Vector elements for which the expression is not
true or which do not find a match on the other side of the expression get
dropped from the result, while the others are propagated into a result vector
-with the grouping labels becoming the output label set.
+with the grouping labels becoming the output label set.
-Matches between two float samples work as usual.
+Matches between two float samples work as usual.
Matches between a float sample and a histogram sample are invalid, and the
corresponding element is removed from the result vector, flagged by an info-level
@@ -171,8 +172,8 @@ comparison binary operations are again invalid.
modifier changes the behavior in the following ways:
* Vector elements which find a match on the other side of the expression but for
- which the expression is false instead have the value `0` and vector elements
- that do find a match and for which the expression is true have the value `1`.
+ which the expression is false instead have the value `0`, and vector elements
+ that do find a match and for which the expression is true have the value `1`.
(Note that elements with no match or invalid operations involving histogram
samples still return no result rather than the value `0`.)
* The metric name is dropped.
@@ -216,11 +217,10 @@ matching behavior: One-to-one and many-to-one/one-to-many.
### Vector matching keywords
-These vector matching keywords allow for matching between series with different label sets
-providing:
+These vector matching keywords allow for matching between series with different label sets:
-* `on`
-* `ignoring`
+* `on()`: Only match on provided labels.
+* `ignoring()`: Ignore provided labels when matching.
Label lists provided to matching keywords will determine how vectors are combined. Examples
can be found in [One-to-one vector matches](#one-to-one-vector-matches) and in
@@ -230,8 +230,8 @@ can be found in [One-to-one vector matches](#one-to-one-vector-matches) and in
These group modifiers enable many-to-one/one-to-many vector matching:
-* `group_left`
-* `group_right`
+* `group_left`: Allow many-to-one matching, where the left vector has higher cardinality.
+* `group_right`: Allow one-to-many matching, where the right vector has higher cardinality.
Label lists can be provided to the group modifier which contain labels from the "one"-side to
be included in the result metrics.
@@ -239,11 +239,9 @@ be included in the result metrics.
_Many-to-one and one-to-many matching are advanced use cases that should be carefully considered.
Often a proper use of `ignoring()` provides the desired outcome._
-_Grouping modifiers can only be used for
-[comparison](#comparison-binary-operators) and
-[arithmetic](#arithmetic-binary-operators). Operations as `and`, `unless` and
-`or` operations match with all possible entries in the right vector by
-default._
+_Grouping modifiers can only be used for [comparison](#comparison-binary-operators),
+[arithmetic](#arithmetic-binary-operators), and [trigonometric](#trigonometric-binary-operators)
+operators. Set operators match with all possible entries on either side by default._
### One-to-one vector matches
@@ -311,6 +309,58 @@ left:
{method="post", code="500"} 0.05 // 6 / 120
{method="post", code="404"} 0.175 // 21 / 120
+### Filling in missing matches
+
+Fill modifiers are **experimental** and must be enabled with `--enable-feature=promql-binop-fill-modifiers`.
+
+By default, vector elements that do not find a match on the other side of a binary operation
+are not included in the result vector. Fill modifiers allow overriding this behavior by filling
+in missing series on either side of a binary operation with a provided default sample value:
+
+* `fill()`: Fill in missing matches on either side with `value`.
+* `fill_left()`: Fill in missing matches on the left side with `value`.
+* `fill_right()`: Fill in missing matches on the right side with `value`.
+
+`value` has to be a numeric literal representing a float sample. Histogram samples are not supported.
+
+Note that these modifiers can only fill in series that are missing on one side of the operation.
+If a series is missing on both sides, it cannot be created by these modifiers.
+
+The fill modifiers can be used in the following combinations:
+
+* `fill()`
+* `fill_left()`
+* `fill_right()`
+* `fill_left() fill_right()`
+* `fill_right() fill_left()`
+
+If other binary operator modifiers like `bool`, `on`, `ignoring`, `group_left`, or `group_right`
+are used, the fill modifiers must be provided last.
+
+When using fill modifiers in combination with `group_left` or `group_right`, they behave as follows:
+
+* If a fill modifier is used on the "many" side of a match, it will only fill in a single series
+ for the "many" side of each match group, using the group's matching labels as the series identity.
+* If a fill modifier is used on the "one" side of a match and the grouping modifier specifies
+ label names to include from the "one" side (e.g. `left_vector * on(instance, job) group_left(info_label) fill_right(1) right_vector`), those labels will not be filled in for missing
+ series, as there is no source for their values.
+
+Fill modifiers are not supported for set operators (`and`, `or`, `unless`), as the purpose of those
+operators is to filter series based on presence or absence in the other vector.
+
+Example query, filling in missing series on the either side with `0`:
+
+ method_code:http_errors:rate5m{status="500"} / ignoring(code) fill(0) method:http_requests:rate5m
+
+This returns a result vector containing the fraction of HTTP requests with status code
+of 500 for each method, as measured over the last 5 minutes. The entries with methods `put` and `del`
+are now included in the result with a filled-in default sample value of `0`, as they had no matching
+series on the respective other side:
+
+ {method="get"} 0.04 # 24 / 600
+ {method="put"} +Inf # 3 / 0 (missing right side filled in)
+ {method="del"} 0 # 0 / 34 (missing left side filled in)
+ {method="post"} 0.05 # 6 / 120
## Aggregation operators
@@ -357,7 +407,7 @@ identical between all elements of the vector.
#### `sum`
`sum(v)` sums up sample values in `v` in the same way as the `+` binary operator does
-between two values.
+between two values.
All sample values being aggregated into a single resulting vector element must either be
float samples or histogram samples. An aggregation of a mix of both is invalid,
@@ -393,7 +443,7 @@ vector, flagged by a warn-level annotation.
#### `min` and `max`
-`min(v)` and `max(v)` return the minimum or maximum value, respectively, in `v`.
+`min(v)` and `max(v)` return the minimum or maximum value, respectively, in `v`.
They only operate on float samples, following IEEE 754 floating
point arithmetic, which in particular implies that `NaN` is only ever
@@ -403,9 +453,9 @@ samples in the input vector are ignored, flagged by an info-level annotation.
#### `topk` and `bottomk`
`topk(k, v)` and `bottomk(k, v)` are different from other aggregators in that a subset of
-`k` values from the input samples, including the original labels, are returned in the result vector.
+`k` values from the input samples, including the original labels, are returned in the result vector.
-`by` and `without` are only used to bucket the input vector.
+`by` and `without` are only used to bucket the input vector.
Similar to `min` and `max`, they only operate on float samples, considering `NaN` values
to be farthest from the top or bottom, respectively. Histogram samples in the
@@ -415,7 +465,7 @@ If used in an instant query, `topk` and `bottomk` return series ordered by
value in descending or ascending order, respectively. If used with `by` or
`without`, then series within each bucket are sorted by value, and series in
the same bucket are returned consecutively, but there is no guarantee that
-buckets of series will be returned in any particular order.
+buckets of series will be returned in any particular order.
No sorting applies to range queries.
@@ -428,11 +478,11 @@ To get the 5 instances with the highest memory consumption across all instances
#### `limitk`
`limitk(k, v)` returns a subset of `k` input samples, including
-the original labels in the result vector.
+the original labels in the result vector.
The subset is selected in a deterministic pseudo-random way.
-This happens independent of the sample type.
-Therefore, it works for both float samples and histogram samples.
+This happens independent of the sample type.
+Therefore, it works for both float samples and histogram samples.
##### Example
@@ -470,8 +520,8 @@ The value may be a float or histogram sample.
#### `count_values`
-`count_values(l, v)` outputs one time series per unique sample value in `v`.
-Each series has an additional label, given by `l`, and the label value is the
+`count_values(l, v)` outputs one time series per unique sample value in `v`.
+Each series has an additional label, given by `l`, and the label value is the
unique sample value. The value of each time series is the number of times that sample value was present.
`count_values` works with both float samples and histogram samples. For the
@@ -486,7 +536,7 @@ To count the number of binaries running each build version we could write:
#### `stddev`
-`stddev(v)` returns the standard deviation of `v`.
+`stddev(v)` returns the standard deviation of `v`.
`stddev` only works with float samples, following IEEE 754 floating
point arithmetic. Histogram samples in the input vector are ignored, flagged by
@@ -494,7 +544,7 @@ an info-level annotation.
#### `stdvar`
-`stdvar(v)` returns the standard deviation of `v`.
+`stdvar(v)` returns the standard deviation of `v`.
`stdvar` only works with float samples, following IEEE 754 floating
point arithmetic. Histogram samples in the input vector are ignored, flagged by
@@ -510,12 +560,12 @@ are ignored, flagged by an info-level annotation.
`NaN` is considered the smallest possible value.
-For example, `quantile(0.5, ...)` calculates the median, `quantile(0.95, ...)` the 95th percentile.
+For example, `quantile(0.5, ...)` calculates the median, `quantile(0.95, ...)` the 95th percentile.
Special cases:
* For φ = `NaN`, `NaN` is returned.
-* For φ < 0, `-Inf` is returned.
+* For φ < 0, `-Inf` is returned.
* For φ > 1, `+Inf` is returned.
## Binary operator precedence
diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod
index 17076faddd..0c80c6e7c6 100644
--- a/documentation/examples/remote_storage/go.mod
+++ b/documentation/examples/remote_storage/go.mod
@@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/documentation/examples/remote_storage
-go 1.24.0
+go 1.25.5
require (
github.com/alecthomas/kingpin/v2 v2.4.0
@@ -8,59 +8,78 @@ require (
github.com/golang/snappy v1.0.0
github.com/influxdata/influxdb-client-go/v2 v2.14.0
github.com/prometheus/client_golang v1.23.2
- github.com/prometheus/common v0.67.4
+ github.com/prometheus/common v0.67.5
github.com/prometheus/prometheus v0.308.1
github.com/stretchr/testify v1.11.1
)
require (
- cloud.google.com/go/auth v0.17.0 // indirect
+ cloud.google.com/go/auth v0.18.1 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
- github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
+ github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
- github.com/aws/aws-sdk-go-v2 v1.41.0 // indirect
- github.com/aws/aws-sdk-go-v2/config v1.32.6 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.19.6 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
+ github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ec2 v1.286.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect
- github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
+ github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.11 // indirect
+ github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
github.com/aws/smithy-go v1.24.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dennwc/varint v1.0.0 // indirect
+ github.com/digitalocean/godo v1.174.0 // indirect
+ github.com/distribution/reference v0.6.0 // indirect
+ github.com/docker/go-connections v0.6.0 // indirect
+ github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect
+ github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
+ github.com/go-openapi/jsonreference v0.21.4 // indirect
+ github.com/go-openapi/swag v0.25.4 // indirect
+ github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
- github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
+ github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
- github.com/googleapis/gax-go/v2 v2.15.0 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect
+ github.com/googleapis/gax-go/v2 v2.17.0 // indirect
+ github.com/gophercloud/gophercloud/v2 v2.10.0 // indirect
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 // indirect
- github.com/hashicorp/go-version v1.7.0 // indirect
+ github.com/hashicorp/consul/api v1.33.2 // indirect
+ github.com/hashicorp/go-version v1.8.0 // indirect
+ github.com/hashicorp/nomad/api v0.0.0-20260209224925-94b77491c895 // indirect
+ github.com/hetznercloud/hcloud-go/v2 v2.36.0 // indirect
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
+ github.com/ionos-cloud/sdk-go/v6 v6.3.6 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.18.2 // indirect
+ github.com/klauspost/compress v1.18.4 // indirect
github.com/knadh/koanf/maps v0.1.2 // indirect
github.com/knadh/koanf/providers/confmap v1.0.0 // indirect
- github.com/knadh/koanf/v2 v2.3.0 // indirect
+ github.com/knadh/koanf/v2 v2.3.2 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
+ github.com/linode/linodego v1.65.0 // indirect
+ github.com/mattn/go-colorable v0.1.14 // indirect
+ github.com/miekg/dns v1.1.72 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@@ -68,53 +87,60 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/oapi-codegen/runtime v1.0.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.139.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.139.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.139.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.145.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.145.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.145.0 // indirect
+ github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_golang/exp v0.0.0-20251212205219-7ba246a648ca // indirect
+ github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/otlptranslator v1.0.0 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
- github.com/prometheus/sigv4 v0.3.0 // indirect
+ github.com/prometheus/sigv4 v0.4.1 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
+ github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36 // indirect
+ github.com/spf13/pflag v1.0.10 // indirect
+ github.com/stackitcloud/stackit-sdk-go/core v0.21.1 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
- go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/collector/component v1.45.0 // indirect
- go.opentelemetry.io/collector/confmap v1.45.0 // indirect
- go.opentelemetry.io/collector/confmap/xconfmap v0.139.0 // indirect
- go.opentelemetry.io/collector/consumer v1.45.0 // indirect
- go.opentelemetry.io/collector/featuregate v1.45.0 // indirect
- go.opentelemetry.io/collector/pdata v1.45.0 // indirect
- go.opentelemetry.io/collector/pipeline v1.45.0 // indirect
- go.opentelemetry.io/collector/processor v1.45.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
+ go.opentelemetry.io/collector/component v1.51.0 // indirect
+ go.opentelemetry.io/collector/confmap v1.51.0 // indirect
+ go.opentelemetry.io/collector/confmap/xconfmap v0.145.0 // indirect
+ go.opentelemetry.io/collector/consumer v1.51.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.51.0 // indirect
+ go.opentelemetry.io/collector/internal/componentalias v0.145.0 // indirect
+ go.opentelemetry.io/collector/pdata v1.51.0 // indirect
+ go.opentelemetry.io/collector/pipeline v1.51.0 // indirect
+ go.opentelemetry.io/collector/processor v1.51.0 // indirect
go.opentelemetry.io/collector/semconv v0.128.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
- go.opentelemetry.io/otel v1.38.0 // indirect
- go.opentelemetry.io/otel/metric v1.38.0 // indirect
- go.opentelemetry.io/otel/trace v1.38.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.65.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect
+ go.opentelemetry.io/otel v1.40.0 // indirect
+ go.opentelemetry.io/otel/metric v1.40.0 // indirect
+ go.opentelemetry.io/otel/trace v1.40.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
- go.uber.org/zap v1.27.0 // indirect
+ go.uber.org/zap v1.27.1 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
- golang.org/x/crypto v0.43.0 // indirect
- golang.org/x/net v0.46.0 // indirect
- golang.org/x/oauth2 v0.32.0 // indirect
- golang.org/x/sys v0.37.0 // indirect
- golang.org/x/text v0.30.0 // indirect
- golang.org/x/time v0.13.0 // indirect
- google.golang.org/api v0.252.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 // indirect
- google.golang.org/grpc v1.76.0 // indirect
- google.golang.org/protobuf v1.36.10 // indirect
+ golang.org/x/crypto v0.47.0 // indirect
+ golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect
+ golang.org/x/net v0.49.0 // indirect
+ golang.org/x/oauth2 v0.35.0 // indirect
+ golang.org/x/sys v0.41.0 // indirect
+ golang.org/x/text v0.34.0 // indirect
+ golang.org/x/time v0.14.0 // indirect
+ google.golang.org/api v0.266.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect
+ google.golang.org/grpc v1.78.0 // indirect
+ google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/apimachinery v0.34.1 // indirect
- k8s.io/client-go v0.34.1 // indirect
+ k8s.io/apimachinery v0.35.0 // indirect
+ k8s.io/client-go v0.35.0 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
- k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
+ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
)
exclude (
diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum
index 1a3e86ff22..6ebede1adf 100644
--- a/documentation/examples/remote_storage/go.sum
+++ b/documentation/examples/remote_storage/go.sum
@@ -1,13 +1,13 @@
-cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4=
-cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ=
+cloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs=
+cloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA=
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 h1:wL5IEG5zb7BVv1Kv0Xm92orq+5hB5Nipn3B5tn4Rqfk=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 h1:fou+2+WFTib47nS+nz/ozhEBnvU96bKHy6LjRsY4E28=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0/go.mod h1:t76Ruy8AHvUAC8GfMWJMa0ElSbuIcO03NLpynfbgsPA=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
@@ -18,12 +18,12 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
-github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
-github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
@@ -33,38 +33,38 @@ github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7D
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
-github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4=
-github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
-github.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8=
-github.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI=
-github.com/aws/aws-sdk-go-v2/credentials v1.19.6 h1:F9vWao2TwjV2MyiyVS+duza0NIRtAslgLUM0vTA1ZaE=
-github.com/aws/aws-sdk-go-v2/credentials v1.19.6/go.mod h1:SgHzKjEVsdQr6Opor0ihgWtkWdfRAIwxYzSJ8O85VHY=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc=
+github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
+github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
+github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
+github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
-github.com/aws/aws-sdk-go-v2/service/ec2 v1.262.0 h1:5qBb1XV/D18qtCHd3bmmxoVglI+fZ4QWuS/EB8kIXYQ=
-github.com/aws/aws-sdk-go-v2/service/ec2 v1.262.0/go.mod h1:NDdDLLW5PtLLXN661gKcvJvqAH5OBXsfhMlmKVu1/pY=
-github.com/aws/aws-sdk-go-v2/service/ecs v1.67.2 h1:oeICOX/+D0XXV1aMYJPXVe3CO37zYr7fB6HFgxchleU=
-github.com/aws/aws-sdk-go-v2/service/ecs v1.67.2/go.mod h1:rrhqfkXfa2DSNq0RyFhnnFEAyI+yJB4+2QlZKeJvMjs=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.286.0 h1:GgLc+o2oD2sXxlEwGUCCWz/1v3Wa8dN9RRebcIFXeOo=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.286.0/go.mod h1:Uy+C+Sc58jozdoL1McQr8bDsEvNFx+/nBY+vpO1HVUY=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0 h1:MzP/ElwTpINq+hS80ZQz4epKVnUTlz8Sz+P/AFORCKM=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0/go.mod h1:pMlGFDpHoLTJOIZHGdJOAWmi+xeIlQXuFTuQxs1epYE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM=
-github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.4 h1:/1o2AYwHJojUDeMvQNyJiKZwcWCc3e4kQuTXqRLuThc=
-github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.4/go.mod h1:Nn2xx6HojGuNMtUFxxz/nyNLSS+tHMRsMhe3+W3wB5k=
-github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ=
-github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU=
-github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 h1:aM/Q24rIlS3bRAhTyFurowU8A0SMyGDtEOY/l/s/1Uw=
-github.com/aws/aws-sdk-go-v2/service/sso v1.30.8/go.mod h1:+fWt2UHSb4kS7Pu8y+BMBvJF0EWx+4H0hzNwtDNRTrg=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 h1:AHDr0DaHIAo8c9t1emrzAlVDFp+iMMKnPdYy6XO4MCE=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12/go.mod h1:GQ73XawFFiWxyWXMHWfhiomvP3tXtdNar/fi8z18sx0=
-github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70=
-github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
+github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.11 h1:VM5e5M39zRSs+aT0O9SoxHjUXqXxhbw3Yi0FdMQWPIc=
+github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.11/go.mod h1:0jvzYPIQGCpnY/dmdaotTk2JH4QuBlnW0oeyrcGLWJ4=
+github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
+github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
+github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
+github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
@@ -74,8 +74,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls=
-github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0=
+github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
@@ -86,25 +86,24 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
-github.com/digitalocean/godo v1.168.0 h1:mlORtUcPD91LQeJoznrH3XvfvgK3t8Wvrpph9giUT/Q=
-github.com/digitalocean/godo v1.168.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU=
-github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
-github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/digitalocean/godo v1.174.0 h1:9nVX8WqAPd7ZN9Yn63HeLRAI8m2vi9QeotcDvYmB+ns=
+github.com/digitalocean/godo v1.174.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
-github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
+github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
-github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo=
-github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs=
-github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
-github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
+github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g=
+github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98=
+github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4=
+github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
@@ -120,24 +119,46 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
-github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
-github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
-github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
-github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
-github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
-github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM=
-github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=
-github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
-github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=
+github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=
+github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8=
+github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4=
+github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU=
+github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ=
+github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4=
+github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0=
+github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4=
+github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU=
+github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y=
+github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk=
+github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI=
+github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=
+github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA=
+github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY=
+github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s=
+github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE=
+github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48=
+github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg=
+github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0=
+github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg=
+github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8=
+github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0=
+github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw=
+github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE=
+github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw=
+github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc=
+github.com/go-resty/resty/v2 v2.17.1 h1:x3aMpHK1YM9e4va/TMDRlusDDoZiQ+ViDu/WpA6xTM4=
+github.com/go-resty/resty/v2 v2.17.1/go.mod h1:kCKZ3wWmwJaNc7S29BRtUhJwy7iqmn+2mLtQrOyQlVA=
+github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
+github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I=
github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
-github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
+github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
+github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
@@ -146,25 +167,25 @@ github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnL
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
-github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
-github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
+github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0=
+github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
-github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
-github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
-github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
-github.com/gophercloud/gophercloud/v2 v2.8.0 h1:of2+8tT6+FbEYHfYC8GBu8TXJNsXYSNm9KuvpX7Neqo=
-github.com/gophercloud/gophercloud/v2 v2.8.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
+github.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao=
+github.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8=
+github.com/googleapis/gax-go/v2 v2.17.0 h1:RksgfBpxqff0EZkDWYuz9q/uWsTVz+kf43LsZ1J6SMc=
+github.com/googleapis/gax-go/v2 v2.17.0/go.mod h1:mzaqghpQp4JDh3HvADwrat+6M3MOIDp5YKHhb9PAgDY=
+github.com/gophercloud/gophercloud/v2 v2.10.0 h1:NRadC0aHNvy4iMoFXj5AFiPmut/Sj3hAPAo9B59VMGc=
+github.com/gophercloud/gophercloud/v2 v2.10.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 h1:cLN4IBkmkYZNnk7EAJ0BHIethd+J6LqxFNw5mSiI2bM=
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
-github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg=
-github.com/hashicorp/consul/api v1.32.0/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40=
+github.com/hashicorp/consul/api v1.33.2 h1:Q6mE0WZsUTJerlnl9TuXzqrtZ0cKdOCsxcZhj5mKbMs=
+github.com/hashicorp/consul/api v1.33.2/go.mod h1:K3yoL/vnIBcQV/25NeMZVokRvPPERiqp2Udtr4xAfhs=
github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4=
github.com/hashicorp/cronexpr v1.1.3/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
@@ -181,24 +202,22 @@ github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISH
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
-github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
-github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
+github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/nomad/api v0.0.0-20250930071859-eaa0fe0e27af h1:ScAYf8O+9xTqTJPZH8MIlUfO+ak8cb31rW1aYJgS+jE=
-github.com/hashicorp/nomad/api v0.0.0-20250930071859-eaa0fe0e27af/go.mod h1:sldFTIgs+FsUeKU3LwVjviAIuksxD8TzDOn02MYwslE=
+github.com/hashicorp/nomad/api v0.0.0-20260209224925-94b77491c895 h1:JAnsaAOxJDDHvd9E9DtbXCheE9nVUbS4gchQeV4Lt98=
+github.com/hashicorp/nomad/api v0.0.0-20260209224925-94b77491c895/go.mod h1:JAmS1nGJ1KcTM+MHAkgyrL0GDbsnKiJsp75KyqO2wWc=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
-github.com/hetznercloud/hcloud-go/v2 v2.29.0 h1:LzNFw5XLBfftyu3WM1sdSLjOZBlWORtz2hgGydHaYV8=
-github.com/hetznercloud/hcloud-go/v2 v2.29.0/go.mod h1:XBU4+EDH2KVqu2KU7Ws0+ciZcX4ygukQl/J0L5GS8P8=
+github.com/hetznercloud/hcloud-go/v2 v2.36.0 h1:HlLL/aaVXUulqe+rsjoJmrxKhPi1MflL5O9iq5QEtvo=
+github.com/hetznercloud/hcloud-go/v2 v2.36.0/go.mod h1:MnN/QJEa/RYNQiiVoJjNHPntM7Z1wlYPgJ2HA40/cDE=
github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4=
github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI=
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU=
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
-github.com/ionos-cloud/sdk-go/v6 v6.3.4 h1:jTvGl4LOF8v8OYoEIBNVwbFoqSGAFqn6vGE7sp7/BqQ=
-github.com/ionos-cloud/sdk-go/v6 v6.3.4/go.mod h1:wCVwNJ/21W29FWFUv+fNawOTMlFoP1dS3L+ZuztFW48=
-github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
-github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/ionos-cloud/sdk-go/v6 v6.3.6 h1:l/TtKgdQ1wUH3DDe2SfFD78AW+TJWdEbDpQhHkWd6CM=
+github.com/ionos-cloud/sdk-go/v6 v6.3.6/go.mod h1:nUGHP4kZHAZngCVr4v6C8nuargFrtvt7GrzH/hqn7c4=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@@ -208,14 +227,14 @@ github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRt
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
-github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
+github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
+github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo=
github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE=
github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A=
-github.com/knadh/koanf/v2 v2.3.0 h1:Qg076dDRFHvqnKG97ZEsi9TAg2/nFTa9hCdcSa1lvlM=
-github.com/knadh/koanf/v2 v2.3.0/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28=
+github.com/knadh/koanf/v2 v2.3.2 h1:Ee6tuzQYFwcZXQpc2MiVeC6qHMandf5SMUJJNoFp/c4=
+github.com/knadh/koanf/v2 v2.3.2/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -224,22 +243,18 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/linode/linodego v1.60.0 h1:SgsebJFRCi+lSmYy+C40wmKZeJllGGm+W12Qw4+yVdI=
-github.com/linode/linodego v1.60.0/go.mod h1:1+Bt0oTz5rBnDOJbGhccxn7LYVytXTIIfAy7QYmijDs=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
-github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/linode/linodego v1.65.0 h1:SdsuGD8VSsPWeShXpE7ihl5vec+fD3MgwhnfYC/rj7k=
+github.com/linode/linodego v1.65.0/go.mod h1:tOFiTErdjkbVnV+4S0+NmIE9dqqZUEM2HsJaGu8wMh8=
+github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
+github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
-github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
-github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
+github.com/miekg/dns v1.1.72 h1:vhmr+TF2A3tuoGNkLDFK9zi36F2LS+hKTRW0Uf8kbzI=
+github.com/miekg/dns v1.1.72/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
-github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
@@ -259,16 +274,16 @@ github.com/oapi-codegen/runtime v1.0.0/go.mod h1:LmCUMQuPB4M/nLXilQXhHw+BLZdDb18
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.139.0 h1:D5aGQCErSCb4sKIHoZhgR4El6AzgviTRYlHUpbSFqDo=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.139.0/go.mod h1:ZjeRsA5oaVk89fg5D+iXStx2QncmhAvtGbdSumT07H4=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.139.0 h1:6/j0Ta8ZJnmAFVEoC3aZ1Hs19RB4fHzlN6kOZhsBJqM=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.139.0/go.mod h1:VfA8xHz4xg7Fyj5bBsCDbOO3iVYzDn9wP/QFsjcAE5c=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.139.0 h1:iRNX/ueuad1psOVgnNkxuQmXxvF3ze5ZZCP66xKFk/w=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.139.0/go.mod h1:bW09lo3WgHsPsZ1mgsJvby9wCefT5o13patM5phdfIU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.145.0 h1:0dYiJ7krIwaHFX6YLNDo/yawTZIu8X16tT/nwW1UTG8=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.145.0/go.mod h1:mhoa9lipcEH0heeKf6+xHzGUrCuAgImQv4/Qpmu0+Fk=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.145.0 h1:sB4yuYx45zig1ceQ+kmrEYy0xMZ+mGagwYIFtJkkU1w=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.145.0/go.mod h1:uLhceuH7ZtiVxk+B0MHI0vhJG2Y4aOzT/hrV6c5KjVU=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.145.0 h1:en86L47oOTsAkbDc5VEMF5cziXPBK2D4hqGRqLaJtCw=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.145.0/go.mod h1:osDRUOIfd7IiKkDvcE/VrPp9FFOPJmFp73RuvgOn5gE=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
-github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
+github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE=
github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
@@ -282,31 +297,31 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
-github.com/prometheus/client_golang/exp v0.0.0-20251212205219-7ba246a648ca h1:BOxmsLoL2ymn8lXJtorca7N/m+2vDQUDoEtPjf0iAxA=
-github.com/prometheus/client_golang/exp v0.0.0-20251212205219-7ba246a648ca/go.mod h1:gndBHh3ZdjBozGcGrjUYjN3UJLRS3l2drALtu4lUt+k=
+github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562 h1:vwqZvuobg82U0gcG2eVrFH27806bUbNr32SvfRbvdsg=
+github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562/go.mod h1:PmAYDB13uBFBG9qE1qxZZgZWhg7Rg6SfKM5DMK7hjyI=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
-github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
-github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
+github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
+github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos=
github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/prometheus/prometheus v0.308.1 h1:ApMNI/3/es3Ze90Z7CMb+wwU2BsSYur0m5VKeqHj7h4=
github.com/prometheus/prometheus v0.308.1/go.mod h1:aHjYCDz9zKRyoUXvMWvu13K9XHOkBB12XrEqibs3e0A=
-github.com/prometheus/sigv4 v0.3.0 h1:QIG7nTbu0JTnNidGI1Uwl5AGVIChWUACxn2B/BQ1kms=
-github.com/prometheus/sigv4 v0.3.0/go.mod h1:fKtFYDus2M43CWKMNtGvFNHGXnAJJEGZbiYCmVp/F8I=
+github.com/prometheus/sigv4 v0.4.1 h1:EIc3j+8NBea9u1iV6O5ZAN8uvPq2xOIUPcqCTivHuXs=
+github.com/prometheus/sigv4 v0.4.1/go.mod h1:eu+ZbRvsc5TPiHwqh77OWuCnWK73IdkETYY46P4dXOU=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.35 h1:8xfn1RzeI9yoCUuEwDy08F+No6PcKZGEDOQ6hrRyLts=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.35/go.mod h1:47B1d/YXmSAxlJxUJxClzHR6b3T4M1WyCvwENPQNBWc=
-github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
-github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36 h1:ObX9hZmK+VmijreZO/8x9pQ8/P/ToHD/bdSb4Eg4tUo=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36/go.mod h1:LEsDu4BubxK7/cWhtlQWfuxwL4rf/2UEpxXz1o1EMtM=
+github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
+github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
-github.com/stackitcloud/stackit-sdk-go/core v0.17.3 h1:GsZGmRRc/3GJLmCUnsZswirr5wfLRrwavbnL/renOqg=
-github.com/stackitcloud/stackit-sdk-go/core v0.17.3/go.mod h1:HBCXJGPgdRulplDzhrmwC+Dak9B/x0nzNtmOpu+1Ahg=
+github.com/stackitcloud/stackit-sdk-go/core v0.21.1 h1:Y/PcAgM7DPYMNqum0MLv4n1mF9ieuevzcCIZYQfm3Ts=
+github.com/stackitcloud/stackit-sdk-go/core v0.21.1/go.mod h1:osMglDby4csGZ5sIfhNyYq1bS1TxIdPY88+skE/kkmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -327,70 +342,74 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
-go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/collector/component v1.45.0 h1:gGFfVdbQ+1YuyUkJjWo85I7euu3H/CiupuzCHv8OgHA=
-go.opentelemetry.io/collector/component v1.45.0/go.mod h1:xoNFnRKE8Iv6gmlqAKgjayWraRnDcYLLgrPt9VgyO2g=
-go.opentelemetry.io/collector/component/componentstatus v0.139.0 h1:bQmkv1t7xW7uIDireE0a2Am4IMOprXm6zQr/qDtGCIA=
-go.opentelemetry.io/collector/component/componentstatus v0.139.0/go.mod h1:ibZOohpG0u081/NaT/jMCTsKwRbbwwxWrjZml+owpyM=
-go.opentelemetry.io/collector/component/componenttest v0.139.0 h1:x9Yu2eYhrHxdZ7sFXWtAWVjQ3UIraje557LgNurDC2I=
-go.opentelemetry.io/collector/component/componenttest v0.139.0/go.mod h1:S9cj+qkf9FgHMzjvlYsLwQKd9BiS7B7oLZvxvlENM/c=
-go.opentelemetry.io/collector/confmap v1.45.0 h1:7M7TTlpzX4r+mIzP/ARdxZBAvI4N+1V96phDane+akU=
-go.opentelemetry.io/collector/confmap v1.45.0/go.mod h1:AE1dnkjv0T9gptsh5+mTX0XFGdXx0n7JS4b7CcPfJ6Q=
-go.opentelemetry.io/collector/confmap/xconfmap v0.139.0 h1:uQGpFuWnTCXqdMbI3gDSvkwU66/kF/aoC0kVMrit1EM=
-go.opentelemetry.io/collector/confmap/xconfmap v0.139.0/go.mod h1:d0ucaeNq2rojFRSQsCHF/gkT3cgBx5H2bVkPQMj57ck=
-go.opentelemetry.io/collector/consumer v1.45.0 h1:TtqXxgW+1GSCwdoohq0fzqnfqrZBKbfo++1XRj8mrEA=
-go.opentelemetry.io/collector/consumer v1.45.0/go.mod h1:pJzqTWBubwLt8mVou+G4/Hs23b3m425rVmld3LqOYpY=
-go.opentelemetry.io/collector/consumer/consumertest v0.139.0 h1:06mu43mMO7l49ASJ/GEbKgTWcV3py5zE/pKhNBZ1b3k=
-go.opentelemetry.io/collector/consumer/consumertest v0.139.0/go.mod h1:gaeCpRQGbCFYTeLzi+Z2cTDt40GiIa3hgIEgLEmiC78=
-go.opentelemetry.io/collector/consumer/xconsumer v0.139.0 h1:FhzDv+idglnrfjqPvnUw3YAEOkXSNv/FuNsuMiXQwcY=
-go.opentelemetry.io/collector/consumer/xconsumer v0.139.0/go.mod h1:yWrg/6FE/A4Q7eo/Mg++CzkBoSILHdeMnTlxV3serI0=
-go.opentelemetry.io/collector/featuregate v1.45.0 h1:D06hpf1F2KzKC+qXLmVv5e8IZpgCyZVeVVC8iOQxVmw=
-go.opentelemetry.io/collector/featuregate v1.45.0/go.mod h1:d0tiRzVYrytB6LkcYgz2ESFTv7OktRPQe0QEQcPt1L4=
-go.opentelemetry.io/collector/pdata v1.45.0 h1:q4XaISpeX640BcwXwb2mKOVw/gb67r22HjGWl8sbWsk=
-go.opentelemetry.io/collector/pdata v1.45.0/go.mod h1:5q2f001YhwMQO8QvpFhCOa4Cq/vtwX9W4HRMsXkU/nE=
-go.opentelemetry.io/collector/pdata/pprofile v0.139.0 h1:UA5TgFzYmRuJN3Wz0GR1efLUfjbs5rH0HTaxfASpTR8=
-go.opentelemetry.io/collector/pdata/pprofile v0.139.0/go.mod h1:sI5qHt+zzE2fhOWFdJIaiDBR0yGGjD4A4ZvDFU0tiHk=
-go.opentelemetry.io/collector/pdata/testdata v0.139.0 h1:n7O5bmLLhc3T6PePV4447fFcI/6QWcMhBsLtfCaD0do=
-go.opentelemetry.io/collector/pdata/testdata v0.139.0/go.mod h1:fxZ2VrhYLYBLHYBHC1XQRKZ6IJXwy0I2rPaaRlebYaY=
-go.opentelemetry.io/collector/pipeline v1.45.0 h1:sn9JJAEBe3XABTkWechMk0eH60QMBjjNe5V+ccBl+Uo=
-go.opentelemetry.io/collector/pipeline v1.45.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI=
-go.opentelemetry.io/collector/processor v1.45.0 h1:GH5km9BkDQOoz7MR0jzTnzB1Kb5vtKzPwa/wDmRg2dQ=
-go.opentelemetry.io/collector/processor v1.45.0/go.mod h1:wdlaTTC3wqlZIJP9R9/SLc2q7h+MFGARsxfjgPtwbes=
-go.opentelemetry.io/collector/processor/processortest v0.139.0 h1:30akUdruFNG7EDpayuBhXoX2lV+hcfxW9Gl3Z6MYHb0=
-go.opentelemetry.io/collector/processor/processortest v0.139.0/go.mod h1:RTll3UKHrqj/VS6RGjTHtuGIJzyLEwFhbw8KuCL3pjo=
-go.opentelemetry.io/collector/processor/xprocessor v0.139.0 h1:O9x9RF/OG8gZ+HrOcB4f6F1fjniby484xf2D8GBxgqU=
-go.opentelemetry.io/collector/processor/xprocessor v0.139.0/go.mod h1:hqGhEZ1/PftD/QHaYna0o1xAqZUsb7GhqpOiaTTDJnQ=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/collector/component v1.51.0 h1:btNW76MCRmpsk0ARRT5wspDXF9tvdaLd3uBtYXIiQn0=
+go.opentelemetry.io/collector/component v1.51.0/go.mod h1:Zlgwh4yTLDhJglOXqiyXZ7paepTvvoijfFjLqOr/Qww=
+go.opentelemetry.io/collector/component/componentstatus v0.145.0 h1:EwUZfSaagdpRXnlrb0TqReJXXW2p9HWBU5YiIeXPCAE=
+go.opentelemetry.io/collector/component/componentstatus v0.145.0/go.mod h1:OiYb8rT4FtSJPFSGCKYvOaajdueDUTJZncixGrmy5aM=
+go.opentelemetry.io/collector/component/componenttest v0.145.0 h1:ryhRrXqQybGMhz7A7t32NC8BXAFcX2o1RetgPM7vw88=
+go.opentelemetry.io/collector/component/componenttest v0.145.0/go.mod h1:5uStrhUdZ0Fw3se00CPmVaRtW8o9N8kKiY76OSCWFjQ=
+go.opentelemetry.io/collector/confmap v1.51.0 h1:C9YlMNkIgzuauLpUz2F7DLlWwqAmkQKNcKj1XATVWuE=
+go.opentelemetry.io/collector/confmap v1.51.0/go.mod h1:uWi4b9lHfvEC2poJ2I2vXwGUREVEQTcdUguOpfqdcHM=
+go.opentelemetry.io/collector/confmap/xconfmap v0.145.0 h1:ngbyfh4+SKlA+osgsak3AxUNPxVxaJTmA0Sl7VfJzwY=
+go.opentelemetry.io/collector/confmap/xconfmap v0.145.0/go.mod h1:zTSK+c76NAy/tI1R3xfZjdoI04D9EYDnzAHQQwl6AmA=
+go.opentelemetry.io/collector/consumer v1.51.0 h1:Ex1x/k9VEEA2DOgt/eSc2Z9KTp0I6xBSruLmrYFfIFY=
+go.opentelemetry.io/collector/consumer v1.51.0/go.mod h1:Erk6qdfVj+24QTrGCpurcrF+qdUlHkb4dgMy5wJxLvY=
+go.opentelemetry.io/collector/consumer/consumertest v0.145.0 h1:3+uMwuMHoXMAU+Z6mwCRA3AxWeL7SujcAQwqqHJ1gCc=
+go.opentelemetry.io/collector/consumer/consumertest v0.145.0/go.mod h1:IFc/FeaIHQClb8KK0aVn0tFDNMc+/MmfQ+aBT1cJNeo=
+go.opentelemetry.io/collector/consumer/xconsumer v0.145.0 h1:9w7KKv9lVJoHvMLC6SUJHenU/KySdEgFJXbB4JQOEsk=
+go.opentelemetry.io/collector/consumer/xconsumer v0.145.0/go.mod h1:SryDCLP2ZaFeZJtA2CSksJ0XvjH8k3LmlfXvy/kC7Wc=
+go.opentelemetry.io/collector/featuregate v1.51.0 h1:dxJuv/3T84dhNKp7fz5+8srHz1dhquGzDpLW4OZTFBw=
+go.opentelemetry.io/collector/featuregate v1.51.0/go.mod h1:/1bclXgP91pISaEeNulRxzzmzMTm4I5Xih2SnI4HRSo=
+go.opentelemetry.io/collector/internal/componentalias v0.145.0 h1:A9V5IiETzz8FCtjxjRM5gf7RE3sOtA1h8phmpQjXTZ4=
+go.opentelemetry.io/collector/internal/componentalias v0.145.0/go.mod h1:sEKEAwAn45ZiXRk3T/vbkvetw14tIRd0CJIxcEx9SsQ=
+go.opentelemetry.io/collector/internal/testutil v0.145.0 h1:H/KL0GH3kGqSMKxZvnQ0B0CulfO9xdTg4DZf28uV7fY=
+go.opentelemetry.io/collector/internal/testutil v0.145.0/go.mod h1:YAD9EAkwh/l5asZNbEBEUCqEjoL1OKMjAMoPjPqH76c=
+go.opentelemetry.io/collector/pdata v1.51.0 h1:DnDhSEuDXNdzGRB7f6oOfXpbDApwBX3tY+3K69oUrDA=
+go.opentelemetry.io/collector/pdata v1.51.0/go.mod h1:GoX1bjKDR++mgFKdT7Hynv9+mdgQ1DDXbjs7/Ww209Q=
+go.opentelemetry.io/collector/pdata/pprofile v0.145.0 h1:ASMKpoqokf8HhzjoeMKZf0K6UXLhufVwNXH0sSuUn5w=
+go.opentelemetry.io/collector/pdata/pprofile v0.145.0/go.mod h1:a60GC7wQPhLAixWzKbbP51QLwwc+J0Cmp4SurOlhGUk=
+go.opentelemetry.io/collector/pdata/testdata v0.145.0 h1:iFsxsCMtE3lnAc/5kZbhZHpRv1OMmM+O5ry46xdQHbg=
+go.opentelemetry.io/collector/pdata/testdata v0.145.0/go.mod h1:0y2ERArdzqmYdJHdKLKue+AUubSEGlwK49F+23+Mbic=
+go.opentelemetry.io/collector/pipeline v1.51.0 h1:GZBNW+aaOE+zufGzAkXy0OI7n1cqepEa5J+beaOpS2k=
+go.opentelemetry.io/collector/pipeline v1.51.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI=
+go.opentelemetry.io/collector/processor v1.51.0 h1:PKpCzkLQmqaW08TOVh/zM0qx07Ihq+DR5J/OBkPiL9o=
+go.opentelemetry.io/collector/processor v1.51.0/go.mod h1:rtIPFS+EFRAkG+CSwtjxs2IsIkuZStObvALeueD02XI=
+go.opentelemetry.io/collector/processor/processortest v0.145.0 h1:RDGBmyZnHk7XVK/EdLt/8iPWj+QLStbbVi1nFTNR01s=
+go.opentelemetry.io/collector/processor/processortest v0.145.0/go.mod h1:WAvxAzSojkdoZB915Z1lsVHCPDJBb2fepjJBjenrzjg=
+go.opentelemetry.io/collector/processor/xprocessor v0.145.0 h1:DaIE7MxRlg0OL1o2P0GQZtmZeExAmVso3qWv8S0RLps=
+go.opentelemetry.io/collector/processor/xprocessor v0.145.0/go.mod h1:kUwRyKBU/kjCmXodd+0z7CpvcP0A9G9/QL+MaJt4U2o=
go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4=
go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns=
-go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 h1:2pn7OzMewmYRiNtv1doZnLo3gONcnMHlFnmOR8Vgt+8=
-go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0/go.mod h1:rjbQTDEPQymPE0YnRQp9/NuPwwtL0sesz/fnqRW/v84=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
-go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
-go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
-go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
-go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
-go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
-go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
-go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
-go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
-go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
-go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
-go.opentelemetry.io/proto/slim/otlp v1.8.0 h1:afcLwp2XOeCbGrjufT1qWyruFt+6C9g5SOuymrSPUXQ=
-go.opentelemetry.io/proto/slim/otlp v1.8.0/go.mod h1:Yaa5fjYm1SMCq0hG0x/87wV1MP9H5xDuG/1+AhvBcsI=
-go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.1.0 h1:Uc+elixz922LHx5colXGi1ORbsW8DTIGM+gg+D9V7HE=
-go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.1.0/go.mod h1:VyU6dTWBWv6h9w/+DYgSZAPMabWbPTFTuxp25sM8+s0=
-go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.1.0 h1:i8YpvWGm/Uq1koL//bnbJ/26eV3OrKWm09+rDYo7keU=
-go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.1.0/go.mod h1:pQ70xHY/ZVxNUBPn+qUWPl8nwai87eWdqL3M37lNi9A=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.65.0 h1:ab5U7DpTjjN8pNgwqlA/s0Csb+N2Raqo9eTSDhfg4Z8=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.65.0/go.mod h1:nwFJC46Dxhqz5R9k7IV8To/Z46JPvW+GNKhTxQQlUzg=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0=
+go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
+go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
+go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
+go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
+go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
+go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
+go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
+go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
+go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
+go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
+go.opentelemetry.io/proto/slim/otlp v1.9.0 h1:fPVMv8tP3TrsqlkH1HWYUpbCY9cAIemx184VGkS6vlE=
+go.opentelemetry.io/proto/slim/otlp v1.9.0/go.mod h1:xXdeJJ90Gqyll+orzUkY4bOd2HECo5JofeoLpymVqdI=
+go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0 h1:o13nadWDNkH/quoDomDUClnQBpdQQ2Qqv0lQBjIXjE8=
+go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0/go.mod h1:Gyb6Xe7FTi/6xBHwMmngGoHqL0w29Y4eW8TGFzpefGA=
+go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0 h1:EiUYvtwu6PMrMHVjcPfnsG3v+ajPkbUeH+IL93+QYyk=
+go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0/go.mod h1:mUUHKFiN2SST3AhJ8XhJxEoeVW12oqfXog0Bo8W3Ec4=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
-go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
+go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
@@ -398,92 +417,91 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
-golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
-golang.org/x/exp v0.0.0-20250808145144-a408d31f581a h1:Y+7uR/b1Mw2iSXZ3G//1haIiSElDQZ8KWh0h+sZPG90=
-golang.org/x/exp v0.0.0-20250808145144-a408d31f581a/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
+golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
+golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
+golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
+golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
-golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
+golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
+golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
-golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
-golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
-golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
+golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
+golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
+golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ=
+golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
-golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
-golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
-golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
+golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
+golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
+golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
-golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
-golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
-golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
+golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
+golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
+golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
+golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
-golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
+golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
+golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
-google.golang.org/api v0.252.0 h1:xfKJeAJaMwb8OC9fesr369rjciQ704AjU/psjkKURSI=
-google.golang.org/api v0.252.0/go.mod h1:dnHOv81x5RAmumZ7BWLShB/u7JZNeyalImxHmtTHxqw=
-google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4=
-google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU=
-google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 h1:CirRxTOwnRWVLKzDNrs0CXAaVozJoR4G9xvdRecrdpk=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
-google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
-google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
-google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
-google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
+google.golang.org/api v0.266.0 h1:hco+oNCf9y7DmLeAtHJi/uBAY7n/7XC9mZPxu1ROiyk=
+google.golang.org/api v0.266.0/go.mod h1:Jzc0+ZfLnyvXma3UtaTl023TdhZu6OMBP9tJ+0EmFD0=
+google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0=
+google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
+google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
-gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
+gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
+gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
-gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.67.1 h1:tVBILHy0R6e4wkYOn3XmiITt/hEVH4TFMYvAX2Ytz6k=
+gopkg.in/ini.v1 v1.67.1/go.mod h1:x/cyOwCgZqOkJoDIJ3c1KNHMo10+nLGAhh+kn3Zizss=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
-k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
-k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
-k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
-k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
-k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
+k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
+k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
+k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
+k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
+k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
+k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
-k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
-k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
-k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
-sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
diff --git a/go.mod b/go.mod
index 24619581d1..89d468e874 100644
--- a/go.mod
+++ b/go.mod
@@ -1,9 +1,9 @@
module github.com/prometheus/prometheus
-go 1.24.0
+go 1.25.5
require (
- github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0
@@ -11,99 +11,105 @@ require (
github.com/KimMachineGun/automemlimit v0.7.5
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b
- github.com/aws/aws-sdk-go-v2 v1.41.0
- github.com/aws/aws-sdk-go-v2/config v1.32.6
- github.com/aws/aws-sdk-go-v2/credentials v1.19.6
- github.com/aws/aws-sdk-go-v2/service/ec2 v1.279.0
- github.com/aws/aws-sdk-go-v2/service/ecs v1.70.0
- github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.10
- github.com/aws/aws-sdk-go-v2/service/sts v1.41.5
+ github.com/aws/aws-sdk-go-v2 v1.41.1
+ github.com/aws/aws-sdk-go-v2/config v1.32.7
+ github.com/aws/aws-sdk-go-v2/credentials v1.19.7
+ github.com/aws/aws-sdk-go-v2/service/ec2 v1.285.0
+ github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0
+ github.com/aws/aws-sdk-go-v2/service/kafka v1.46.7
+ github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.11
+ github.com/aws/aws-sdk-go-v2/service/sts v1.41.6
github.com/aws/smithy-go v1.24.0
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0
- github.com/digitalocean/godo v1.171.0
+ github.com/digitalocean/godo v1.173.0
github.com/docker/docker v28.5.2+incompatible
github.com/edsrzf/mmap-go v1.2.0
github.com/envoyproxy/go-control-plane/envoy v1.36.0
github.com/envoyproxy/protoc-gen-validate v1.3.0
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
+ github.com/felixge/fgprof v0.9.5
github.com/fsnotify/fsnotify v1.9.0
github.com/go-openapi/strfmt v0.25.0
github.com/go-zookeeper/zk v1.0.4
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v1.0.0
github.com/google/go-cmp v0.7.0
- github.com/google/pprof v0.0.0-20260111202518-71be6bfdd440
+ github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef
github.com/google/uuid v1.6.0
- github.com/gophercloud/gophercloud/v2 v2.9.0
+ github.com/gophercloud/gophercloud/v2 v2.10.0
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853
- github.com/hashicorp/consul/api v1.32.1
- github.com/hashicorp/nomad/api v0.0.0-20260106084653-e8f2200c7039
- github.com/hetznercloud/hcloud-go/v2 v2.33.0
+ github.com/hashicorp/consul/api v1.33.2
+ github.com/hashicorp/nomad/api v0.0.0-20260205205048-8315996478d1
+ github.com/hetznercloud/hcloud-go/v2 v2.36.0
github.com/ionos-cloud/sdk-go/v6 v6.3.6
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.18.2
+ github.com/klauspost/compress v1.18.3
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
- github.com/linode/linodego v1.63.0
- github.com/miekg/dns v1.1.69
+ github.com/linode/linodego v1.65.0
+ github.com/miekg/dns v1.1.72
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
github.com/oklog/run v1.2.0
github.com/oklog/ulid/v2 v2.1.1
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.142.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.145.0
github.com/ovh/go-ovh v1.9.0
- github.com/prometheus/alertmanager v0.30.0
+ github.com/pb33f/libopenapi v0.33.4
+ github.com/pb33f/libopenapi-validator v0.11.1
+ github.com/prometheus/alertmanager v0.31.0
github.com/prometheus/client_golang v1.23.2
- github.com/prometheus/client_golang/exp v0.0.0-20260101091701-2cd067eb23c9
+ github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562
github.com/prometheus/client_model v0.6.2
- github.com/prometheus/common v0.67.4
+ github.com/prometheus/common v0.67.5
github.com/prometheus/common/assets v0.2.0
- github.com/prometheus/exporter-toolkit v0.15.0
- github.com/prometheus/sigv4 v0.3.0
+ github.com/prometheus/exporter-toolkit v0.15.1
+ github.com/prometheus/sigv4 v0.4.1
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
- github.com/stackitcloud/stackit-sdk-go/core v0.20.1
+ github.com/stackitcloud/stackit-sdk-go/core v0.21.1
github.com/stretchr/testify v1.11.1
github.com/vultr/govultr/v2 v2.17.2
- go.opentelemetry.io/collector/component v1.48.0
- go.opentelemetry.io/collector/consumer v1.48.0
- go.opentelemetry.io/collector/pdata v1.48.0
- go.opentelemetry.io/collector/processor v1.48.0
- go.opentelemetry.io/collector/semconv v0.128.0
- go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.64.0
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0
- go.opentelemetry.io/otel v1.39.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0
- go.opentelemetry.io/otel/metric v1.39.0
- go.opentelemetry.io/otel/sdk v1.39.0
- go.opentelemetry.io/otel/trace v1.39.0
+ go.opentelemetry.io/collector/component v1.51.0
+ go.opentelemetry.io/collector/consumer v1.51.0
+ go.opentelemetry.io/collector/pdata v1.51.0
+ go.opentelemetry.io/collector/processor v1.51.0
+ go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.65.0
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0
+ go.opentelemetry.io/otel v1.40.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0
+ go.opentelemetry.io/otel/metric v1.40.0
+ go.opentelemetry.io/otel/sdk v1.40.0
+ go.opentelemetry.io/otel/trace v1.40.0
go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.6.0
go.uber.org/goleak v1.3.0
- go.uber.org/multierr v1.11.0
go.yaml.in/yaml/v2 v2.4.3
+ go.yaml.in/yaml/v3 v3.0.4
+ go.yaml.in/yaml/v4 v4.0.0-rc.4
golang.org/x/oauth2 v0.34.0
golang.org/x/sync v0.19.0
- golang.org/x/sys v0.39.0
- golang.org/x/text v0.32.0
- google.golang.org/api v0.258.0
- google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b
+ golang.org/x/sys v0.40.0
+ golang.org/x/text v0.33.0
+ google.golang.org/api v0.265.0
+ google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20
google.golang.org/grpc v1.78.0
google.golang.org/protobuf v1.36.11
- gopkg.in/yaml.v3 v3.0.1
- k8s.io/api v0.34.3
- k8s.io/apimachinery v0.34.3
- k8s.io/client-go v0.34.3
+ k8s.io/api v0.35.0
+ k8s.io/apimachinery v0.35.0
+ k8s.io/client-go v0.35.0
k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.130.1
)
require (
- github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
+ github.com/bahlo/generic-list-go v0.2.0 // indirect
+ github.com/basgys/goxml2json v1.1.1-0.20231018121955-e66ee54ceaad // indirect
+ github.com/buger/jsonparser v1.1.1 // indirect
github.com/go-openapi/swag/cmdutils v0.25.4 // indirect
github.com/go-openapi/swag/conv v0.25.4 // indirect
github.com/go-openapi/swag/fileutils v0.25.4 // indirect
@@ -115,26 +121,33 @@ require (
github.com/go-openapi/swag/stringutils v0.25.4 // indirect
github.com/go-openapi/swag/typeutils v0.25.4 // indirect
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
- go.yaml.in/yaml/v3 v3.0.4 // indirect
+ github.com/pb33f/jsonpath v0.7.1 // indirect
+ github.com/pb33f/ordered-map/v2 v2.3.0 // indirect
+ github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
+ github.com/sirupsen/logrus v1.9.4 // indirect
+ go.opentelemetry.io/collector/internal/componentalias v0.145.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)
require (
- cloud.google.com/go/auth v0.17.0 // indirect
+ cloud.google.com/go/auth v0.18.1 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
- github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect
@@ -143,8 +156,8 @@ require (
github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.6.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
- github.com/distribution/reference v0.5.0 // indirect
- github.com/docker/go-connections v0.4.0 // indirect
+ github.com/distribution/reference v0.6.0 // indirect
+ github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/fatih/color v1.16.0 // indirect
@@ -152,25 +165,25 @@ require (
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-openapi/analysis v0.24.1 // indirect
- github.com/go-openapi/errors v0.22.4 // indirect
- github.com/go-openapi/jsonpointer v0.22.1 // indirect
- github.com/go-openapi/jsonreference v0.21.3 // indirect
+ github.com/go-openapi/analysis v0.24.2 // indirect
+ github.com/go-openapi/errors v0.22.6 // indirect
+ github.com/go-openapi/jsonpointer v0.22.4 // indirect
+ github.com/go-openapi/jsonreference v0.21.4 // indirect
github.com/go-openapi/loads v0.23.2 // indirect
- github.com/go-openapi/spec v0.22.1 // indirect
+ github.com/go-openapi/spec v0.22.3 // indirect
github.com/go-openapi/swag v0.25.4 // indirect
github.com/go-openapi/validate v0.25.1 // indirect
github.com/go-resty/resty/v2 v2.17.1 // indirect
- github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
- github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
+ github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
- github.com/google/go-querystring v1.1.0 // indirect
+ github.com/google/go-querystring v1.2.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
- github.com/googleapis/gax-go/v2 v2.15.0 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect
+ github.com/googleapis/gax-go/v2 v2.16.0 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect
github.com/hashicorp/cronexpr v1.1.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
@@ -186,27 +199,26 @@ require (
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/knadh/koanf/maps v0.1.2 // indirect
github.com/knadh/koanf/providers/confmap v1.0.0 // indirect
- github.com/knadh/koanf/v2 v2.3.0 // indirect
+ github.com/knadh/koanf/v2 v2.3.2 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
- github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mdlayher/socket v0.4.1 // indirect
github.com/mdlayher/vsock v1.2.1 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
- github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/atomicwriter v0.1.0 // indirect
- github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
+ github.com/moby/term v0.5.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
- github.com/morikuni/aec v1.0.0 // indirect
+ github.com/morikuni/aec v1.1.0 // indirect
github.com/oklog/ulid v1.3.1 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.142.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.142.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.145.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.145.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
- github.com/opencontainers/image-spec v1.0.2 // indirect
+ github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1 // indirect
@@ -215,34 +227,33 @@ require (
github.com/prometheus/otlptranslator v1.0.0
github.com/prometheus/procfs v0.16.1 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
- github.com/spf13/pflag v1.0.6 // indirect
+ github.com/spf13/pflag v1.0.10 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.mongodb.org/mongo-driver v1.17.6 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
- go.opentelemetry.io/collector/confmap v1.48.0 // indirect
- go.opentelemetry.io/collector/confmap/xconfmap v0.142.0 // indirect
- go.opentelemetry.io/collector/featuregate v1.48.0 // indirect
- go.opentelemetry.io/collector/pipeline v1.48.0 // indirect
+ go.opentelemetry.io/collector/confmap v1.51.0 // indirect
+ go.opentelemetry.io/collector/confmap/xconfmap v0.145.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.51.0 // indirect
+ go.opentelemetry.io/collector/pipeline v1.51.0 // indirect
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
go.uber.org/zap v1.27.1 // indirect
- golang.org/x/crypto v0.46.0 // indirect
- golang.org/x/exp v0.0.0-20250808145144-a408d31f581a // indirect
- golang.org/x/mod v0.30.0 // indirect
- golang.org/x/net v0.48.0 // indirect
- golang.org/x/term v0.38.0 // indirect
+ golang.org/x/crypto v0.47.0 // indirect
+ golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect
+ golang.org/x/mod v0.32.0 // indirect
+ golang.org/x/net v0.49.0 // indirect
+ golang.org/x/term v0.39.0 // indirect
golang.org/x/time v0.14.0 // indirect
- golang.org/x/tools v0.39.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 // indirect
- gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
+ golang.org/x/tools v0.41.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect
+ gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/ini.v1 v1.67.0 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
- gotest.tools/v3 v3.0.3 // indirect
- k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
- k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
- sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
+ gopkg.in/ini.v1 v1.67.1 // indirect
+ gotest.tools/v3 v3.5.1 // indirect
+ k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
+ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
+ sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
)
@@ -255,3 +266,5 @@ exclude (
github.com/grpc-ecosystem/grpc-gateway v1.14.7
google.golang.org/api v0.30.0
)
+
+replace cloud.google.com/go => cloud.google.com/go v0.123.0
diff --git a/go.sum b/go.sum
index 216cc63a7c..bcb7b8fcc1 100644
--- a/go.sum
+++ b/go.sum
@@ -1,11 +1,11 @@
-cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4=
-cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ=
+cloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs=
+cloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA=
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 h1:fou+2+WFTib47nS+nz/ozhEBnvU96bKHy6LjRsY4E28=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0/go.mod h1:t76Ruy8AHvUAC8GfMWJMa0ElSbuIcO03NLpynfbgsPA=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
@@ -20,8 +20,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
+github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
@@ -31,8 +31,10 @@ github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1v
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/KimMachineGun/automemlimit v0.7.5 h1:RkbaC0MwhjL1ZuBKunGDjE/ggwAX43DwZrJqVwyveTk=
github.com/KimMachineGun/automemlimit v0.7.5/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
-github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
-github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
+github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -47,40 +49,46 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4=
-github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
-github.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8=
-github.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI=
-github.com/aws/aws-sdk-go-v2/credentials v1.19.6 h1:F9vWao2TwjV2MyiyVS+duza0NIRtAslgLUM0vTA1ZaE=
-github.com/aws/aws-sdk-go-v2/credentials v1.19.6/go.mod h1:SgHzKjEVsdQr6Opor0ihgWtkWdfRAIwxYzSJ8O85VHY=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc=
+github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
+github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
+github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
+github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
-github.com/aws/aws-sdk-go-v2/service/ec2 v1.279.0 h1:o7eJKe6VYAnqERPlLAvDW5VKXV6eTKv1oxTpMoDP378=
-github.com/aws/aws-sdk-go-v2/service/ec2 v1.279.0/go.mod h1:Wg68QRgy2gEGGdmTPU/UbVpdv8sM14bUZmF64KFwAsY=
-github.com/aws/aws-sdk-go-v2/service/ecs v1.70.0 h1:IZpZatHsscdOKjwmDXC6idsCXmm3F/obutAUNjnX+OM=
-github.com/aws/aws-sdk-go-v2/service/ecs v1.70.0/go.mod h1:LQMlcWBoiFVD3vUVEz42ST0yTiaDujv2dRE6sXt1yPE=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.285.0 h1:cRZQsqCy59DSJmvmUYzi9K+dutysXzfx6F+fkcIHtOk=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.285.0/go.mod h1:Uy+C+Sc58jozdoL1McQr8bDsEvNFx+/nBY+vpO1HVUY=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0 h1:MzP/ElwTpINq+hS80ZQz4epKVnUTlz8Sz+P/AFORCKM=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0/go.mod h1:pMlGFDpHoLTJOIZHGdJOAWmi+xeIlQXuFTuQxs1epYE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM=
-github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.10 h1:MQuZZ6Tq1qQabPlkVxrCMdyVl70Ogl4AERZKo+y9Wzo=
-github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.10/go.mod h1:U5C3JME1ibKESmpzBAqlRpTYZfVbTqrb5ICJm+sVVd8=
-github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ=
-github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU=
-github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 h1:aM/Q24rIlS3bRAhTyFurowU8A0SMyGDtEOY/l/s/1Uw=
-github.com/aws/aws-sdk-go-v2/service/sso v1.30.8/go.mod h1:+fWt2UHSb4kS7Pu8y+BMBvJF0EWx+4H0hzNwtDNRTrg=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 h1:AHDr0DaHIAo8c9t1emrzAlVDFp+iMMKnPdYy6XO4MCE=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12/go.mod h1:GQ73XawFFiWxyWXMHWfhiomvP3tXtdNar/fi8z18sx0=
-github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70=
-github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
+github.com/aws/aws-sdk-go-v2/service/kafka v1.46.7 h1:0jDb9b505gbCmtjH1RT7kx8hDbVDzOhnTeZm7dzskpQ=
+github.com/aws/aws-sdk-go-v2/service/kafka v1.46.7/go.mod h1:tWnHS64fg5ydLHivFlCAtEh/1iMNzr56QsH3F+UTwD4=
+github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.11 h1:VM5e5M39zRSs+aT0O9SoxHjUXqXxhbw3Yi0FdMQWPIc=
+github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.11/go.mod h1:0jvzYPIQGCpnY/dmdaotTk2JH4QuBlnW0oeyrcGLWJ4=
+github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
+github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
+github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
+github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
+github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
+github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
+github.com/basgys/goxml2json v1.1.1-0.20231018121955-e66ee54ceaad h1:3swAvbzgfaI6nKuDDU7BiKfZRdF+h2ZwKgMHd8Ha4t8=
+github.com/basgys/goxml2json v1.1.1-0.20231018121955-e66ee54ceaad/go.mod h1:9+nBLYNWkvPcq9ep0owWUsPTLgL9ZXTsZWcCSVGGLJ0=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -88,11 +96,21 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bitly/go-simplejson v0.5.1 h1:xgwPbetQScXt1gh9BmoJ6j9JMr3TElvuIyjR8pgdoow=
+github.com/bitly/go-simplejson v0.5.1/go.mod h1:YOPVLzCfwK14b4Sff3oP1AmGhI9T9Vsg84etUnlyp+Q=
+github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
+github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
+github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
+github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
+github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
+github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0=
@@ -105,21 +123,22 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo=
github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
-github.com/digitalocean/godo v1.171.0 h1:QwpkwWKr3v7yxc8D4NQG973NoR9APCEWjYnLOQeXVpQ=
-github.com/digitalocean/godo v1.171.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU=
-github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
-github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/digitalocean/godo v1.173.0 h1:tgzevGhlz9VFjk2y3NmeItUT4vIVVCRFETlG/1GlEQI=
+github.com/digitalocean/godo v1.173.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
+github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
-github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
+github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
@@ -137,6 +156,8 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
+github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
@@ -153,18 +174,18 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM=
-github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84=
-github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM=
-github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk=
-github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk=
-github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM=
-github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc=
-github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4=
+github.com/go-openapi/analysis v0.24.2 h1:6p7WXEuKy1llDgOH8FooVeO+Uq2za9qoAOq4ZN08B50=
+github.com/go-openapi/analysis v0.24.2/go.mod h1:x27OOHKANE0lutg2ml4kzYLoHGMKgRm1Cj2ijVOjJuE=
+github.com/go-openapi/errors v0.22.6 h1:eDxcf89O8odEnohIXwEjY1IB4ph5vmbUsBMsFNwXWPo=
+github.com/go-openapi/errors v0.22.6/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk=
+github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=
+github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=
+github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8=
+github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4=
github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4=
github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY=
-github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k=
-github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA=
+github.com/go-openapi/spec v0.22.3 h1:qRSmj6Smz2rEBxMnLRBMeBWxbbOvuOoElvSvObIgwQc=
+github.com/go-openapi/spec v0.22.3/go.mod h1:iIImLODL2loCh3Vnox8TY2YWYJZjMAKYyLH2Mu8lOZs=
github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ=
github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8=
github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU=
@@ -204,17 +225,20 @@ github.com/go-resty/resty/v2 v2.17.1/go.mod h1:kCKZ3wWmwJaNc7S29BRtUhJwy7iqmn+2m
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
-github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
-github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
+github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I=
github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
+github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
+github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
-github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
+github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
+github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -227,37 +251,37 @@ github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
-github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
-github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
+github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0=
+github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20260111202518-71be6bfdd440 h1:oKBqR+eQXiIM7X8K1JEg9aoTEePLq/c6Awe484abOuA=
-github.com/google/pprof v0.0.0-20260111202518-71be6bfdd440/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
+github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
+github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef h1:xpF9fUHpoIrrjX24DURVKiwHcFpw19ndIs+FwTSMbno=
+github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ=
-github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
-github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
-github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
-github.com/gophercloud/gophercloud/v2 v2.9.0 h1:Y9OMrwKF9EDERcHFSOTpf/6XGoAI0yOxmsLmQki4LPM=
-github.com/gophercloud/gophercloud/v2 v2.9.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
+github.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao=
+github.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8=
+github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y=
+github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14=
+github.com/gophercloud/gophercloud/v2 v2.10.0 h1:NRadC0aHNvy4iMoFXj5AFiPmut/Sj3hAPAo9B59VMGc=
+github.com/gophercloud/gophercloud/v2 v2.10.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 h1:cLN4IBkmkYZNnk7EAJ0BHIethd+J6LqxFNw5mSiI2bM=
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
-github.com/hashicorp/consul/api v1.32.1 h1:0+osr/3t/aZNAdJX558crU3PEjVrG4x6715aZHRgceE=
-github.com/hashicorp/consul/api v1.32.1/go.mod h1:mXUWLnxftwTmDv4W3lzxYCPD199iNLLUyLfLGFJbtl4=
-github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
-github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
+github.com/hashicorp/consul/api v1.33.2 h1:Q6mE0WZsUTJerlnl9TuXzqrtZ0cKdOCsxcZhj5mKbMs=
+github.com/hashicorp/consul/api v1.33.2/go.mod h1:K3yoL/vnIBcQV/25NeMZVokRvPPERiqp2Udtr4xAfhs=
+github.com/hashicorp/consul/sdk v0.17.1 h1:LumAh8larSXmXw2wvw/lK5ZALkJ2wK8VRwWMLVV5M5c=
+github.com/hashicorp/consul/sdk v0.17.1/go.mod h1:EngiixMhmw9T7wApycq6rDRFXXVUwjjf7HuLiGMH/Sw=
github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4=
github.com/hashicorp/cronexpr v1.1.3/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -276,8 +300,8 @@ github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-msgpack/v2 v2.1.1 h1:xQEY9yB2wnHitoSzk/B9UjXWRQ67QKu5AOm8aFp8N3I=
-github.com/hashicorp/go-msgpack/v2 v2.1.1/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4=
+github.com/hashicorp/go-msgpack/v2 v2.1.5 h1:Ue879bPnutj/hXfmUk6s/jtIK90XxgiUIcXRl656T44=
+github.com/hashicorp/go-msgpack/v2 v2.1.5/go.mod h1:bjCsRXpZ7NsJdk45PoCQnzRGDaK8TKm5ZnDI/9y3J4M=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
@@ -303,18 +327,20 @@ github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
-github.com/hashicorp/memberlist v0.5.3 h1:tQ1jOCypD0WvMemw/ZhhtH+PWpzcftQvgCorLu0hndk=
-github.com/hashicorp/memberlist v0.5.3/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE=
-github.com/hashicorp/nomad/api v0.0.0-20260106084653-e8f2200c7039 h1:77URO0yPjlPjRc00KbjoBTG2dqHXFKA7Fv3s98w16kM=
-github.com/hashicorp/nomad/api v0.0.0-20260106084653-e8f2200c7039/go.mod h1:sldFTIgs+FsUeKU3LwVjviAIuksxD8TzDOn02MYwslE=
+github.com/hashicorp/memberlist v0.5.4 h1:40YY+3qq2tAUhZIMEK8kqusKZBBjdwJ3NUjvYkcxh74=
+github.com/hashicorp/memberlist v0.5.4/go.mod h1:OgN6xiIo6RlHUWk+ALjP9e32xWCoQrsOCmHrWCm2MWA=
+github.com/hashicorp/nomad/api v0.0.0-20260205205048-8315996478d1 h1:2T7Ay5FMAnZUBxSbrkjufY5YKiLPWij0dDPnbM/KYak=
+github.com/hashicorp/nomad/api v0.0.0-20260205205048-8315996478d1/go.mod h1:JAmS1nGJ1KcTM+MHAkgyrL0GDbsnKiJsp75KyqO2wWc=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
-github.com/hetznercloud/hcloud-go/v2 v2.33.0 h1:g9hwuo60IXbupXJCYMlO4xDXgxxMPuFk31iOpLXDCV4=
-github.com/hetznercloud/hcloud-go/v2 v2.33.0/go.mod h1:GzYEl7slIGKc6Ttt08hjiJvGj8/PbWzcQf6IUi02dIs=
+github.com/hetznercloud/hcloud-go/v2 v2.36.0 h1:HlLL/aaVXUulqe+rsjoJmrxKhPi1MflL5O9iq5QEtvo=
+github.com/hetznercloud/hcloud-go/v2 v2.36.0/go.mod h1:MnN/QJEa/RYNQiiVoJjNHPntM7Z1wlYPgJ2HA40/cDE=
+github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/ionos-cloud/sdk-go/v6 v6.3.6 h1:l/TtKgdQ1wUH3DDe2SfFD78AW+TJWdEbDpQhHkWd6CM=
github.com/ionos-cloud/sdk-go/v6 v6.3.6/go.mod h1:nUGHP4kZHAZngCVr4v6C8nuargFrtvt7GrzH/hqn7c4=
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
@@ -328,14 +354,14 @@ github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRt
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
-github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
+github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=
+github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo=
github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE=
github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A=
-github.com/knadh/koanf/v2 v2.3.0 h1:Qg076dDRFHvqnKG97ZEsi9TAg2/nFTa9hCdcSa1lvlM=
-github.com/knadh/koanf/v2 v2.3.0/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28=
+github.com/knadh/koanf/v2 v2.3.2 h1:Ee6tuzQYFwcZXQpc2MiVeC6qHMandf5SMUJJNoFp/c4=
+github.com/knadh/koanf/v2 v2.3.2/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -349,21 +375,22 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/linode/linodego v1.63.0 h1:MdjizfXNJDVJU6ggoJmMO5O9h4KGPGivNX0fzrAnstk=
-github.com/linode/linodego v1.63.0/go.mod h1:GoiwLVuLdBQcAebxAVKVL3mMYUgJZR/puOUSla04xBE=
+github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
+github.com/linode/linodego v1.65.0 h1:SdsuGD8VSsPWeShXpE7ihl5vec+fD3MgwhnfYC/rj7k=
+github.com/linode/linodego v1.65.0/go.mod h1:tOFiTErdjkbVnV+4S0+NmIE9dqqZUEM2HsJaGu8wMh8=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
-github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
-github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
+github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
-github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@@ -375,16 +402,14 @@ github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
-github.com/miekg/dns v1.1.69 h1:Kb7Y/1Jo+SG+a2GtfoFUfDkG//csdRPwRLkCsxDG9Sc=
-github.com/miekg/dns v1.1.69/go.mod h1:7OyjD9nEba5OkqQ/hB4fy3PIoxafSZJtducccIelz3g=
+github.com/miekg/dns v1.1.72 h1:vhmr+TF2A3tuoGNkLDFK9zi36F2LS+hKTRW0Uf8kbzI=
+github.com/miekg/dns v1.1.72/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
-github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
@@ -393,8 +418,8 @@ github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
-github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
-github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
+github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
+github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -403,8 +428,8 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
-github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ=
+github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@@ -418,25 +443,34 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
-github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
-github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
-github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
-github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.142.0 h1:agYk41V3eIfV6aIMxIeRQ7SFhfaW5k2O96HEebpmPwM=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.142.0/go.mod h1:ZmMdcBia20ih8NYia5b4dNhfNLT68xHgaqF+fNW+TLM=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.142.0 h1:bLp+Ii1UQ9cNr+Dm1jKzbcklhd0eBnPuIFQY6NPzkZ0=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.142.0/go.mod h1:6N36UrFd9Yiz2aYpXM5xiK7Eqp2RyAr3O8lUE+wK2Y8=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.142.0 h1:fL8LBVeje+nbts2VIInvRa4T5LlsC0BZCI60wNGoS+Y=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.142.0/go.mod h1:fSnKuTN91I68Ou1Lgfwe3Mt6BGl9kcA8PYCpnGkPnsY=
+github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
+github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
+github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
+github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.145.0 h1:0dYiJ7krIwaHFX6YLNDo/yawTZIu8X16tT/nwW1UTG8=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.145.0/go.mod h1:mhoa9lipcEH0heeKf6+xHzGUrCuAgImQv4/Qpmu0+Fk=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.145.0 h1:sB4yuYx45zig1ceQ+kmrEYy0xMZ+mGagwYIFtJkkU1w=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.145.0/go.mod h1:uLhceuH7ZtiVxk+B0MHI0vhJG2Y4aOzT/hrV6c5KjVU=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.145.0 h1:en86L47oOTsAkbDc5VEMF5cziXPBK2D4hqGRqLaJtCw=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.145.0/go.mod h1:osDRUOIfd7IiKkDvcE/VrPp9FFOPJmFp73RuvgOn5gE=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
-github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
+github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
+github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE=
github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pb33f/jsonpath v0.7.1 h1:dEp6oIZuJbpDSyuHAl9m7GonoDW4M20BcD5vT0tPYRE=
+github.com/pb33f/jsonpath v0.7.1/go.mod h1:zBV5LJW4OQOPatmQE2QdKpGQJvhDTlE5IEj6ASaRNTo=
+github.com/pb33f/libopenapi v0.33.4 h1:Rgczgrg4VQKXW/NtSj/nApmtYKS+TVpLgTsG692JxmE=
+github.com/pb33f/libopenapi v0.33.4/go.mod h1:e/dmd2Pf1nkjqkI0r7guFSyt9T5V0IIQKgs0L6B/3b0=
+github.com/pb33f/libopenapi-validator v0.11.1 h1:lTW738oB3lwpS9poDzmI3jpTPZSb5W46vklZqtyf7+Q=
+github.com/pb33f/libopenapi-validator v0.11.1/go.mod h1:7CfboslU/utKhiuQRuenriGYZ+HQLDOvARxjqRwd57w=
+github.com/pb33f/ordered-map/v2 v2.3.0 h1:k2OhVEQkhTCQMhAicQ3Z6iInzoZNQ7L9MVomwKBZ5WQ=
+github.com/pb33f/ordered-map/v2 v2.3.0/go.mod h1:oe5ue+6ZNhy7QN9cPZvPA23Hx0vMHnNVeMg4fGdCANw=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
@@ -455,15 +489,15 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
-github.com/prometheus/alertmanager v0.30.0 h1:E4dnxSFXK8V2Bb8iqudlisTmaIrF3hRJSWnliG08tBM=
-github.com/prometheus/alertmanager v0.30.0/go.mod h1:93PBumcTLr/gNtNtM0m7BcCffbvYP5bKuLBWiOnISaA=
+github.com/prometheus/alertmanager v0.31.0 h1:DQW02uIUNNiAa9AD9VA5xaFw5D+xrV+bocJc4gN9bEU=
+github.com/prometheus/alertmanager v0.31.0/go.mod h1:zWPQwhbLt2ybee8rL921UONeQ59Oncash+m/hGP17tU=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
-github.com/prometheus/client_golang/exp v0.0.0-20260101091701-2cd067eb23c9 h1:al1B/YzHmaXhacIFkrZSDSUpnPHV4ZPMfENQpvk3PZQ=
-github.com/prometheus/client_golang/exp v0.0.0-20260101091701-2cd067eb23c9/go.mod h1:PmAYDB13uBFBG9qE1qxZZgZWhg7Rg6SfKM5DMK7hjyI=
+github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562 h1:vwqZvuobg82U0gcG2eVrFH27806bUbNr32SvfRbvdsg=
+github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562/go.mod h1:PmAYDB13uBFBG9qE1qxZZgZWhg7Rg6SfKM5DMK7hjyI=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -471,12 +505,12 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
-github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
+github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
+github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
-github.com/prometheus/exporter-toolkit v0.15.0 h1:Pcle5sSViwR1x0gdPd0wtYrPQENBieQAM7TmT0qtb2U=
-github.com/prometheus/exporter-toolkit v0.15.0/go.mod h1:OyRWd2iTo6Xge9Kedvv0IhCrJSBu36JCfJ2yVniRIYk=
+github.com/prometheus/exporter-toolkit v0.15.1 h1:XrGGr/qWl8Gd+pqJqTkNLww9eG8vR/CoRk0FubOKfLE=
+github.com/prometheus/exporter-toolkit v0.15.1/go.mod h1:P/NR9qFRGbCFgpklyhix9F6v6fFr/VQB/CVsrMDGKo4=
github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos=
github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -484,13 +518,15 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
-github.com/prometheus/sigv4 v0.3.0 h1:QIG7nTbu0JTnNidGI1Uwl5AGVIChWUACxn2B/BQ1kms=
-github.com/prometheus/sigv4 v0.3.0/go.mod h1:fKtFYDus2M43CWKMNtGvFNHGXnAJJEGZbiYCmVp/F8I=
+github.com/prometheus/sigv4 v0.4.1 h1:EIc3j+8NBea9u1iV6O5ZAN8uvPq2xOIUPcqCTivHuXs=
+github.com/prometheus/sigv4 v0.4.1/go.mod h1:eu+ZbRvsc5TPiHwqh77OWuCnWK73IdkETYY46P4dXOU=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ=
+github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36 h1:ObX9hZmK+VmijreZO/8x9pQ8/P/ToHD/bdSb4Eg4tUo=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36/go.mod h1:LEsDu4BubxK7/cWhtlQWfuxwL4rf/2UEpxXz1o1EMtM=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
@@ -501,13 +537,12 @@ github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+Yg
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
-github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
-github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/stackitcloud/stackit-sdk-go/core v0.20.1 h1:odiuhhRXmxvEvnVTeZSN9u98edvw2Cd3DcnkepncP3M=
-github.com/stackitcloud/stackit-sdk-go/core v0.20.1/go.mod h1:fqto7M82ynGhEnpZU6VkQKYWYoFG5goC076JWXTUPRQ=
+github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
+github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
+github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
+github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stackitcloud/stackit-sdk-go/core v0.21.1 h1:Y/PcAgM7DPYMNqum0MLv4n1mF9ieuevzcCIZYQfm3Ts=
+github.com/stackitcloud/stackit-sdk-go/core v0.21.1/go.mod h1:osMglDby4csGZ5sIfhNyYq1bS1TxIdPY88+skE/kkmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -517,6 +552,7 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
@@ -533,66 +569,67 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss=
go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
-go.opentelemetry.io/collector/component v1.48.0 h1:0hZKOvT6fIlXoE+6t40UXbXOH7r/h9jyE3eIt0W19Qg=
-go.opentelemetry.io/collector/component v1.48.0/go.mod h1:Kmc9Z2CT53M2oRRf+WXHUHHgjCC+ADbiqfPO5mgZe3g=
-go.opentelemetry.io/collector/component/componentstatus v0.142.0 h1:a1KkLCtShI5SfhO2ga75VqWjjBRGgrerelt/2JXWLBI=
-go.opentelemetry.io/collector/component/componentstatus v0.142.0/go.mod h1:IRWKvFcUrFrkz1gJEV+cKAdE2ZBT128gk1sHt0OzKI4=
-go.opentelemetry.io/collector/component/componenttest v0.142.0 h1:a8XclEutO5dv4AnzThHK8dfqR4lDWjJKLtRNM2aVUFM=
-go.opentelemetry.io/collector/component/componenttest v0.142.0/go.mod h1:JhX/zKaEbjhFcsiV2ha2spzo24A6RL/jqNBS0svURD0=
-go.opentelemetry.io/collector/confmap v1.48.0 h1:vGhg25NEUX5DiYziJEw2siwdzsvtXBRZVuYyLVinFR8=
-go.opentelemetry.io/collector/confmap v1.48.0/go.mod h1:8tJHJowmvUkJ8AHzZ6SaH61dcWbdfRE9Sd/hwsKLgRE=
-go.opentelemetry.io/collector/confmap/xconfmap v0.142.0 h1:SNfuFP8TA0PmUkx6ryY63uNjLN2HMh5VeGO++IYdPgA=
-go.opentelemetry.io/collector/confmap/xconfmap v0.142.0/go.mod h1:FXuX6B8b7Ub7qkLqloWKanmPhADL18EEkaFptcd4eDQ=
-go.opentelemetry.io/collector/consumer v1.48.0 h1:g1uroz2AA0cqnEsjqFTSZG+y8uH1gQBqqyzk8kd3QiM=
-go.opentelemetry.io/collector/consumer v1.48.0/go.mod h1:lC6PnVXBwI456SV5WtvJqE7vjCNN6DAUc8xjFQ9wUV4=
-go.opentelemetry.io/collector/consumer/consumertest v0.142.0 h1:TRt8zR57Vk1PTjtqjHOwOAMbIl+IeloHxWAuF8sWdRw=
-go.opentelemetry.io/collector/consumer/consumertest v0.142.0/go.mod h1:yq2dhMxFUlCFkRN7LES3fzsTmUDw9VaunyRAka2TEaY=
-go.opentelemetry.io/collector/consumer/xconsumer v0.142.0 h1:qOoQnLZXQ9sRLexTkkmBx3qfaOmEgco9VBPmryg5UhA=
-go.opentelemetry.io/collector/consumer/xconsumer v0.142.0/go.mod h1:oPN0yJzEpovwlWvmSaiYgtDqGuOmMMLmmg352sqZdsE=
-go.opentelemetry.io/collector/featuregate v1.48.0 h1:jiGRcl93yzUFgZVDuskMAftFraE21jANdxXTQfSQScc=
-go.opentelemetry.io/collector/featuregate v1.48.0/go.mod h1:/1bclXgP91pISaEeNulRxzzmzMTm4I5Xih2SnI4HRSo=
-go.opentelemetry.io/collector/internal/testutil v0.142.0 h1:MHnAVRimQdsfYqYHC3YuJRkIUap4VmSpJkkIT2N7jJA=
-go.opentelemetry.io/collector/internal/testutil v0.142.0/go.mod h1:YAD9EAkwh/l5asZNbEBEUCqEjoL1OKMjAMoPjPqH76c=
-go.opentelemetry.io/collector/pdata v1.48.0 h1:CKZ+9v/lGTX/cTGx2XVp8kp0E8R//60kHFCBdZudrTg=
-go.opentelemetry.io/collector/pdata v1.48.0/go.mod h1:jaf2JQGpfUreD1TOtGBPsq00ecOqM66NG15wALmdxKA=
-go.opentelemetry.io/collector/pdata/pprofile v0.142.0 h1:Ivyw7WY8SIIWqzXsnNmjEgz3ysVs/OkIf0KIpJUnuuo=
-go.opentelemetry.io/collector/pdata/pprofile v0.142.0/go.mod h1:94GAph54K4WDpYz9xirhroHB3ptNLuPiY02k8fyoNUI=
-go.opentelemetry.io/collector/pdata/testdata v0.142.0 h1:+jf9RyLWl8WyhIVjpg7yuH+bRdQH4mW20cPtCMlY1cI=
-go.opentelemetry.io/collector/pdata/testdata v0.142.0/go.mod h1:kgAu5ZLEcVuPH3RFiHDg23RGitgm1M0cUAVwiGX4SB8=
-go.opentelemetry.io/collector/pipeline v1.48.0 h1:E4zyQ7+4FTGvdGS4pruUnItuyRTGhN0Qqk1CN71lfW0=
-go.opentelemetry.io/collector/pipeline v1.48.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI=
-go.opentelemetry.io/collector/processor v1.48.0 h1:3Kttw79mnrf463QKJGoGZzFfiNzQuMWK0p2nHuvOhaQ=
-go.opentelemetry.io/collector/processor v1.48.0/go.mod h1:A3OsW6ga+a48J1mrnVNH5L5kB0v+n9nVFlmOQB5/Jwk=
-go.opentelemetry.io/collector/processor/processortest v0.142.0 h1:wQnJeXDejBL6r8ov66AYAGf8Q0/JspjuqAjPVBdCUoI=
-go.opentelemetry.io/collector/processor/processortest v0.142.0/go.mod h1:QU5SWj0L+92MSvQxZDjwWCsKssNDm+nD6SHn7IvviUE=
-go.opentelemetry.io/collector/processor/xprocessor v0.142.0 h1:7a1Crxrd5iBMVnebTxkcqxVkRHAlOBUUmNTUVUTnlCU=
-go.opentelemetry.io/collector/processor/xprocessor v0.142.0/go.mod h1:LY/GS2DiJILJKS3ynU3eOLLWSP8CmN1FtdpAMsVV8AU=
-go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4=
-go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns=
-go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.64.0 h1:OXSUzgmIFkcC4An+mv+lqqZSndTffXpjAyoR+1f8k/A=
-go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.64.0/go.mod h1:1A4GVLFIm54HFqVdOpWmukap7rgb0frrE3zWXohLPdM=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
-go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
-go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 h1:in9O8ESIOlwJAEGTkkf34DesGRAc/Pn8qJ7k3r/42LM=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0/go.mod h1:Rp0EXBm5tfnv0WL+ARyO/PHBEaEAT8UUHQ6AGJcSq6c=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 h1:Ckwye2FpXkYgiHX7fyVrN1uA/UYd9ounqqTuSNAv0k4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0/go.mod h1:teIFJh5pW2y+AN7riv6IBPX2DuesS3HgP39mwOspKwU=
-go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
-go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
-go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18=
-go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE=
-go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8=
-go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew=
-go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
-go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
+go.opentelemetry.io/collector/component v1.51.0 h1:btNW76MCRmpsk0ARRT5wspDXF9tvdaLd3uBtYXIiQn0=
+go.opentelemetry.io/collector/component v1.51.0/go.mod h1:Zlgwh4yTLDhJglOXqiyXZ7paepTvvoijfFjLqOr/Qww=
+go.opentelemetry.io/collector/component/componentstatus v0.145.0 h1:EwUZfSaagdpRXnlrb0TqReJXXW2p9HWBU5YiIeXPCAE=
+go.opentelemetry.io/collector/component/componentstatus v0.145.0/go.mod h1:OiYb8rT4FtSJPFSGCKYvOaajdueDUTJZncixGrmy5aM=
+go.opentelemetry.io/collector/component/componenttest v0.145.0 h1:ryhRrXqQybGMhz7A7t32NC8BXAFcX2o1RetgPM7vw88=
+go.opentelemetry.io/collector/component/componenttest v0.145.0/go.mod h1:5uStrhUdZ0Fw3se00CPmVaRtW8o9N8kKiY76OSCWFjQ=
+go.opentelemetry.io/collector/confmap v1.51.0 h1:C9YlMNkIgzuauLpUz2F7DLlWwqAmkQKNcKj1XATVWuE=
+go.opentelemetry.io/collector/confmap v1.51.0/go.mod h1:uWi4b9lHfvEC2poJ2I2vXwGUREVEQTcdUguOpfqdcHM=
+go.opentelemetry.io/collector/confmap/xconfmap v0.145.0 h1:ngbyfh4+SKlA+osgsak3AxUNPxVxaJTmA0Sl7VfJzwY=
+go.opentelemetry.io/collector/confmap/xconfmap v0.145.0/go.mod h1:zTSK+c76NAy/tI1R3xfZjdoI04D9EYDnzAHQQwl6AmA=
+go.opentelemetry.io/collector/consumer v1.51.0 h1:Ex1x/k9VEEA2DOgt/eSc2Z9KTp0I6xBSruLmrYFfIFY=
+go.opentelemetry.io/collector/consumer v1.51.0/go.mod h1:Erk6qdfVj+24QTrGCpurcrF+qdUlHkb4dgMy5wJxLvY=
+go.opentelemetry.io/collector/consumer/consumertest v0.145.0 h1:3+uMwuMHoXMAU+Z6mwCRA3AxWeL7SujcAQwqqHJ1gCc=
+go.opentelemetry.io/collector/consumer/consumertest v0.145.0/go.mod h1:IFc/FeaIHQClb8KK0aVn0tFDNMc+/MmfQ+aBT1cJNeo=
+go.opentelemetry.io/collector/consumer/xconsumer v0.145.0 h1:9w7KKv9lVJoHvMLC6SUJHenU/KySdEgFJXbB4JQOEsk=
+go.opentelemetry.io/collector/consumer/xconsumer v0.145.0/go.mod h1:SryDCLP2ZaFeZJtA2CSksJ0XvjH8k3LmlfXvy/kC7Wc=
+go.opentelemetry.io/collector/featuregate v1.51.0 h1:dxJuv/3T84dhNKp7fz5+8srHz1dhquGzDpLW4OZTFBw=
+go.opentelemetry.io/collector/featuregate v1.51.0/go.mod h1:/1bclXgP91pISaEeNulRxzzmzMTm4I5Xih2SnI4HRSo=
+go.opentelemetry.io/collector/internal/componentalias v0.145.0 h1:A9V5IiETzz8FCtjxjRM5gf7RE3sOtA1h8phmpQjXTZ4=
+go.opentelemetry.io/collector/internal/componentalias v0.145.0/go.mod h1:sEKEAwAn45ZiXRk3T/vbkvetw14tIRd0CJIxcEx9SsQ=
+go.opentelemetry.io/collector/internal/testutil v0.145.0 h1:H/KL0GH3kGqSMKxZvnQ0B0CulfO9xdTg4DZf28uV7fY=
+go.opentelemetry.io/collector/internal/testutil v0.145.0/go.mod h1:YAD9EAkwh/l5asZNbEBEUCqEjoL1OKMjAMoPjPqH76c=
+go.opentelemetry.io/collector/pdata v1.51.0 h1:DnDhSEuDXNdzGRB7f6oOfXpbDApwBX3tY+3K69oUrDA=
+go.opentelemetry.io/collector/pdata v1.51.0/go.mod h1:GoX1bjKDR++mgFKdT7Hynv9+mdgQ1DDXbjs7/Ww209Q=
+go.opentelemetry.io/collector/pdata/pprofile v0.145.0 h1:ASMKpoqokf8HhzjoeMKZf0K6UXLhufVwNXH0sSuUn5w=
+go.opentelemetry.io/collector/pdata/pprofile v0.145.0/go.mod h1:a60GC7wQPhLAixWzKbbP51QLwwc+J0Cmp4SurOlhGUk=
+go.opentelemetry.io/collector/pdata/testdata v0.145.0 h1:iFsxsCMtE3lnAc/5kZbhZHpRv1OMmM+O5ry46xdQHbg=
+go.opentelemetry.io/collector/pdata/testdata v0.145.0/go.mod h1:0y2ERArdzqmYdJHdKLKue+AUubSEGlwK49F+23+Mbic=
+go.opentelemetry.io/collector/pipeline v1.51.0 h1:GZBNW+aaOE+zufGzAkXy0OI7n1cqepEa5J+beaOpS2k=
+go.opentelemetry.io/collector/pipeline v1.51.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI=
+go.opentelemetry.io/collector/processor v1.51.0 h1:PKpCzkLQmqaW08TOVh/zM0qx07Ihq+DR5J/OBkPiL9o=
+go.opentelemetry.io/collector/processor v1.51.0/go.mod h1:rtIPFS+EFRAkG+CSwtjxs2IsIkuZStObvALeueD02XI=
+go.opentelemetry.io/collector/processor/processortest v0.145.0 h1:RDGBmyZnHk7XVK/EdLt/8iPWj+QLStbbVi1nFTNR01s=
+go.opentelemetry.io/collector/processor/processortest v0.145.0/go.mod h1:WAvxAzSojkdoZB915Z1lsVHCPDJBb2fepjJBjenrzjg=
+go.opentelemetry.io/collector/processor/xprocessor v0.145.0 h1:DaIE7MxRlg0OL1o2P0GQZtmZeExAmVso3qWv8S0RLps=
+go.opentelemetry.io/collector/processor/xprocessor v0.145.0/go.mod h1:kUwRyKBU/kjCmXodd+0z7CpvcP0A9G9/QL+MaJt4U2o=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.65.0 h1:ab5U7DpTjjN8pNgwqlA/s0Csb+N2Raqo9eTSDhfg4Z8=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.65.0/go.mod h1:nwFJC46Dxhqz5R9k7IV8To/Z46JPvW+GNKhTxQQlUzg=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0=
+go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
+go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40=
+go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
+go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
+go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
+go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
+go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
+go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
+go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
+go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
go.opentelemetry.io/proto/slim/otlp v1.9.0 h1:fPVMv8tP3TrsqlkH1HWYUpbCY9cAIemx184VGkS6vlE=
@@ -615,23 +652,26 @@ go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
-go.yaml.in/yaml/v4 v4.0.0-rc.3 h1:3h1fjsh1CTAPjW7q/EMe+C8shx5d8ctzZTrLcs/j8Go=
-go.yaml.in/yaml/v4 v4.0.0-rc.3/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0=
+go.yaml.in/yaml/v4 v4.0.0-rc.4 h1:UP4+v6fFrBIb1l934bDl//mmnoIZEDK0idg1+AIvX5U=
+go.yaml.in/yaml/v4 v4.0.0-rc.4/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
-golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
-golang.org/x/exp v0.0.0-20250808145144-a408d31f581a h1:Y+7uR/b1Mw2iSXZ3G//1haIiSElDQZ8KWh0h+sZPG90=
-golang.org/x/exp v0.0.0-20250808145144-a408d31f581a/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
+golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
+golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
+golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
+golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
-golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
+golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -640,8 +680,12 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
-golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
-golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
+golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
+golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -650,6 +694,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -669,35 +715,49 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
-golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
-golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
+golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
+golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
-golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
-golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
+golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/tools/godoc v0.1.0-deprecated h1:o+aZ1BOj6Hsx/GBdJO/s815sqftjSnrZZwyYTHODvtk=
golang.org/x/tools/godoc v0.1.0-deprecated/go.mod h1:qM63CriJ961IHWmnWa9CjZnBndniPt4a3CK0PVB9bIg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -706,14 +766,14 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
-google.golang.org/api v0.258.0 h1:IKo1j5FBlN74fe5isA2PVozN3Y5pwNKriEgAXPOkDAc=
-google.golang.org/api v0.258.0/go.mod h1:qhOMTQEZ6lUps63ZNq9jhODswwjkjYYguA7fA3TBFww=
-google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4=
-google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s=
-google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E=
-google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 h1:2I6GHUeJ/4shcDpoUlLs/2WPnhg7yJwvXtqcMJt9liA=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
+google.golang.org/api v0.265.0 h1:FZvfUdI8nfmuNrE34aOWFPmLC+qRBEiNm3JdivTvAAU=
+google.golang.org/api v0.265.0/go.mod h1:uAvfEl3SLUj/7n6k+lJutcswVojHPp2Sp08jWCu8hLY=
+google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934=
+google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0=
+google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 h1:7ei4lp52gK1uSejlA8AZl5AJjeLUOHBQscRQZUgAcu0=
+google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20/go.mod h1:ZdbssH/1SOVnjnDlXzxDHK2MCidiqXtbYccJNzNYPEE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
@@ -725,12 +785,12 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/dnaeon/go-vcr.v4 v4.0.6 h1:PiJkrakkmzc5s7EfBnZOnyiLwi7o7A9fwPzN0X2uwe0=
gopkg.in/dnaeon/go-vcr.v4 v4.0.6/go.mod h1:sbq5oMEcM4PXngbcNbHhzfCP9OdZodLhrbRYoyg09HY=
-gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
-gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
+gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
+gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
-gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.67.1 h1:tVBILHy0R6e4wkYOn3XmiITt/hEVH4TFMYvAX2Ytz6k=
+gopkg.in/ini.v1 v1.67.1/go.mod h1:x/cyOwCgZqOkJoDIJ3c1KNHMo10+nLGAhh+kn3Zizss=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -740,25 +800,24 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
-gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
-gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
-k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4=
-k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk=
-k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE=
-k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
-k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A=
-k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM=
+gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
+gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
+k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
+k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
+k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
+k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
+k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
+k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
-k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
-k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
-k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
-sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
diff --git a/go.work b/go.work
index 5ec4aeab50..4d53344b16 100644
--- a/go.work
+++ b/go.work
@@ -1,4 +1,4 @@
-go 1.24.9
+go 1.25.5
use (
.
diff --git a/internal/tools/go.mod b/internal/tools/go.mod
index c8b62b5ca7..f3853a86c6 100644
--- a/internal/tools/go.mod
+++ b/internal/tools/go.mod
@@ -1,20 +1,20 @@
module github.com/prometheus/prometheus/internal/tools
-go 1.24.0
+go 1.25.5
require (
- github.com/bufbuild/buf v1.62.1
+ github.com/bufbuild/buf v1.65.0
github.com/daixiang0/gci v0.13.7
github.com/gogo/protobuf v1.3.2
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8
)
require (
buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.11-20250718181942-e35f9b667443.1 // indirect
buf.build/gen/go/bufbuild/protodescriptor/protocolbuffers/go v1.36.11-20250109164928-1da0de137947.1 // indirect
buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20251209175733-2a1774d88802.1 // indirect
- buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20251202164234-62b14f0b533c.2 // indirect
- buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20251202164234-62b14f0b533c.1 // indirect
+ buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20260126144947-819582968857.2 // indirect
+ buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20260126144947-819582968857.1 // indirect
buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.11-20241007202033-cf42259fcbfc.1 // indirect
buf.build/go/app v0.2.0 // indirect
buf.build/go/bufplugin v0.9.0 // indirect
@@ -26,37 +26,38 @@ require (
buf.build/go/standard v0.1.0 // indirect
cel.dev/expr v0.25.1 // indirect
connectrpc.com/connect v1.19.1 // indirect
- connectrpc.com/otelconnect v0.8.0 // indirect
+ connectrpc.com/otelconnect v0.9.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
- github.com/bufbuild/protocompile v0.14.2-0.20251223142729-db46c1b9d34e // indirect
+ github.com/bufbuild/protocompile v0.14.2-0.20260130195850-5c64bed4577e // indirect
github.com/bufbuild/protoplugin v0.0.0-20250218205857-750e09ce93e1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cli/browser v1.3.0 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
- github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect
+ github.com/containerd/stargz-snapshotter/estargz v0.18.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
- github.com/docker/cli v29.1.3+incompatible // indirect
+ github.com/docker/cli v29.2.0+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker v28.5.2+incompatible // indirect
- github.com/docker/docker-credential-helpers v0.9.4 // indirect
+ github.com/docker/docker-credential-helpers v0.9.5 // indirect
github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/go-chi/chi/v5 v5.2.3 // indirect
+ github.com/go-chi/chi/v5 v5.2.4 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gofrs/flock v0.13.0 // indirect
- github.com/google/cel-go v0.26.1 // indirect
+ github.com/google/cel-go v0.27.0 // indirect
github.com/google/go-containerregistry v0.20.7 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/hexops/gotextdiff v1.0.3 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jdx/go-netrc v1.0.0 // indirect
- github.com/klauspost/compress v1.18.2 // indirect
+ github.com/klauspost/compress v1.18.4 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
@@ -66,19 +67,19 @@ require (
github.com/morikuni/aec v1.1.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
- github.com/petermattis/goid v0.0.0-20251121121749-a11dd1a45f9a // indirect
+ github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741 // indirect
github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/quic-go/qpack v0.6.0 // indirect
- github.com/quic-go/quic-go v0.58.0 // indirect
+ github.com/quic-go/quic-go v0.59.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rs/cors v1.11.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/segmentio/asm v1.2.1 // indirect
github.com/segmentio/encoding v0.5.3 // indirect
- github.com/sirupsen/logrus v1.9.3 // indirect
+ github.com/sirupsen/logrus v1.9.4 // indirect
github.com/spf13/cobra v1.10.2 // indirect
github.com/spf13/pflag v1.0.10 // indirect
- github.com/stoewer/go-strcase v1.3.1 // indirect
github.com/tetratelabs/wazero v1.11.0 // indirect
github.com/tidwall/btree v1.8.1 // indirect
github.com/vbatts/tar-split v0.12.2 // indirect
@@ -87,26 +88,30 @@ require (
go.lsp.dev/protocol v0.12.0 // indirect
go.lsp.dev/uri v0.3.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect
- go.opentelemetry.io/otel v1.39.0 // indirect
- go.opentelemetry.io/otel/metric v1.39.0 // indirect
- go.opentelemetry.io/otel/trace v1.39.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect
+ go.opentelemetry.io/otel v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect
+ go.opentelemetry.io/otel/metric v1.40.0 // indirect
+ go.opentelemetry.io/otel/trace v1.40.0 // indirect
go.uber.org/mock v0.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.1 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
- golang.org/x/crypto v0.46.0 // indirect
- golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93 // indirect
- golang.org/x/mod v0.31.0 // indirect
- golang.org/x/net v0.48.0 // indirect
+ golang.org/x/crypto v0.47.0 // indirect
+ golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect
+ golang.org/x/mod v0.32.0 // indirect
+ golang.org/x/net v0.49.0 // indirect
golang.org/x/sync v0.19.0 // indirect
- golang.org/x/sys v0.39.0 // indirect
- golang.org/x/term v0.38.0 // indirect
- golang.org/x/text v0.32.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect
+ golang.org/x/sys v0.41.0 // indirect
+ golang.org/x/term v0.39.0 // indirect
+ golang.org/x/text v0.34.0 // indirect
+ golang.org/x/time v0.14.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect
google.golang.org/grpc v1.78.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
+ gotest.tools/v3 v3.5.1 // indirect
+ mvdan.cc/xurls/v2 v2.6.0 // indirect
pluginrpc.com/pluginrpc v0.5.0 // indirect
)
diff --git a/internal/tools/go.sum b/internal/tools/go.sum
index df735a5536..ab0255fd6e 100644
--- a/internal/tools/go.sum
+++ b/internal/tools/go.sum
@@ -4,10 +4,10 @@ buf.build/gen/go/bufbuild/protodescriptor/protocolbuffers/go v1.36.11-2025010916
buf.build/gen/go/bufbuild/protodescriptor/protocolbuffers/go v1.36.11-20250109164928-1da0de137947.1/go.mod h1:8PRKXhgNes29Tjrnv8KdZzg3I1QceOkzibW1QK7EXv0=
buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20251209175733-2a1774d88802.1 h1:j9yeqTWEFrtimt8Nng2MIeRrpoCvQzM9/g25XTvqUGg=
buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20251209175733-2a1774d88802.1/go.mod h1:tvtbpgaVXZX4g6Pn+AnzFycuRK3MOz5HJfEGeEllXYM=
-buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20251202164234-62b14f0b533c.2 h1:eQ6XRVUaYYZFOZvBsyrOYLWbw6464s5dVnHscxa0b8w=
-buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20251202164234-62b14f0b533c.2/go.mod h1:omxVRch3jEPMINnUipLsuRWoEhND6LPXELKBG7xzyDw=
-buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20251202164234-62b14f0b533c.1 h1:PdfIJUbUVKdajMVYuMdvr2Wvo+wmzGnlPEYA4bhFaWI=
-buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20251202164234-62b14f0b533c.1/go.mod h1:1JJi9jvOqRxSMa+JxiZSm57doB+db/1WYCIa2lHfc40=
+buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20260126144947-819582968857.2 h1:XPrWCd9ydEo5Ofv1aNJVJaxndMXLQjRO9vVzsJG3jL8=
+buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20260126144947-819582968857.2/go.mod h1:mpsjeEaxOYPIJV2cz4IagLghZufRvx+NPVtInjEeoQ8=
+buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20260126144947-819582968857.1 h1:Yreby6Ypa58wdQUEm9Fnc5g8n/jP487Dq3aK5yBYwfk=
+buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20260126144947-819582968857.1/go.mod h1:1JJi9jvOqRxSMa+JxiZSm57doB+db/1WYCIa2lHfc40=
buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.11-20241007202033-cf42259fcbfc.1 h1:iGPvEJltOXUMANWf0zajcRcbiOXLD90ZwPUFvbcuv6Q=
buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.11-20241007202033-cf42259fcbfc.1/go.mod h1:nWVKKRA29zdt4uvkjka3i/y4mkrswyWwiu0TbdX0zts=
buf.build/go/app v0.2.0 h1:NYaH13A+RzPb7M5vO8uZYZ2maBZI5+MS9A9tQm66fy8=
@@ -30,26 +30,26 @@ cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4=
cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4=
connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
-connectrpc.com/otelconnect v0.8.0 h1:a4qrN4H8aEE2jAoCxheZYYfEjXMgVPyL9OzPQLBEFXU=
-connectrpc.com/otelconnect v0.8.0/go.mod h1:AEkVLjCPXra+ObGFCOClcJkNjS7zPaQSqvO0lCyjfZc=
+connectrpc.com/otelconnect v0.9.0 h1:NggB3pzRC3pukQWaYbRHJulxuXvmCKCKkQ9hbrHAWoA=
+connectrpc.com/otelconnect v0.9.0/go.mod h1:AEkVLjCPXra+ObGFCOClcJkNjS7zPaQSqvO0lCyjfZc=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
-github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE=
-github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
+github.com/bmatcuk/doublestar/v4 v4.10.0 h1:zU9WiOla1YA122oLM6i4EXvGW62DvKZVxIe6TYWexEs=
+github.com/bmatcuk/doublestar/v4 v4.10.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4=
github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs=
-github.com/bufbuild/buf v1.62.1 h1:QdYB6JDW7dP+5H7sKx0lN1raxnuUJDDlEJtPHDYKB0g=
-github.com/bufbuild/buf v1.62.1/go.mod h1:igMN/6U32/GDzyfkmn0VfIaKoeOnWTTizEf5CG0/87k=
-github.com/bufbuild/protocompile v0.14.2-0.20251223142729-db46c1b9d34e h1:LQA+1MyiPkolGHJGC2GMDC5Xu+0RDVH6jGMKech7Exs=
-github.com/bufbuild/protocompile v0.14.2-0.20251223142729-db46c1b9d34e/go.mod h1:5UUj46Eu+U+C59C5N6YilaMI7WWfP2bW9xGcOkme2DI=
+github.com/bufbuild/buf v1.65.0 h1:f2BzeCY9rRh9P5KD340ZoPAaFLTkssoUTHx7lpqozgg=
+github.com/bufbuild/buf v1.65.0/go.mod h1:7SAs2YqGpPXHqBBXBeYQbCzY0OQq4Jbg6XCqirEiYvQ=
+github.com/bufbuild/protocompile v0.14.2-0.20260130195850-5c64bed4577e h1:emH16Bf1w4C0cJ3ge4QtBAl4sIYJe23EfpWH0SpA9co=
+github.com/bufbuild/protocompile v0.14.2-0.20260130195850-5c64bed4577e/go.mod h1:cxhE8h+14t0Yxq2H9MV/UggzQ1L0gh0t2tJobITWsBE=
github.com/bufbuild/protoplugin v0.0.0-20250218205857-750e09ce93e1 h1:V1xulAoqLqVg44rY97xOR+mQpD2N+GzhMHVwJ030WEU=
github.com/bufbuild/protoplugin v0.0.0-20250218205857-750e09ce93e1/go.mod h1:c5D8gWRIZ2HLWO3gXYTtUfw/hbJyD8xikv2ooPxnklQ=
-github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
-github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
+github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cli/browser v1.3.0 h1:LejqCrpWr+1pRqmEPDGnTZOjsMe7sehifLynZJuqJpo=
@@ -60,8 +60,8 @@ github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151X
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
-github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8=
-github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q=
+github.com/containerd/stargz-snapshotter/estargz v0.18.2 h1:yXkZFYIzz3eoLwlTUZKz2iQ4MrckBxJjkmD16ynUTrw=
+github.com/containerd/stargz-snapshotter/estargz v0.18.2/go.mod h1:XyVU5tcJ3PRpkA9XS2T5us6Eg35yM0214Y+wvrZTBrY=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
@@ -69,27 +69,26 @@ github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/daixiang0/gci v0.13.7 h1:+0bG5eK9vlI08J+J/NWGbWPTNiXPG4WhNLJOkSxWITQ=
github.com/daixiang0/gci v0.13.7/go.mod h1:812WVN6JLFY9S6Tv76twqmNqevN0pa3SX3nih0brVzQ=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/cli v29.1.3+incompatible h1:+kz9uDWgs+mAaIZojWfFt4d53/jv0ZUOOoSh5ZnH36c=
-github.com/docker/cli v29.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v29.2.0+incompatible h1:9oBd9+YM7rxjZLfyMGxjraKBKE4/nVyvVfN4qNl9XRM=
+github.com/docker/cli v29.2.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI=
-github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c=
+github.com/docker/docker-credential-helpers v0.9.5 h1:EFNN8DHvaiK8zVqFA2DT6BjXE0GzfLOZ38ggPTKePkY=
+github.com/docker/docker-credential-helpers v0.9.5/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c=
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE=
-github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
+github.com/go-chi/chi/v5 v5.2.4 h1:WtFKPHwlywe8Srng8j2BhOD9312j9cGUxG1SP4V2cR4=
+github.com/go-chi/chi/v5 v5.2.4/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@@ -99,8 +98,8 @@ github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw=
github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ=
-github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
+github.com/google/cel-go v0.27.0 h1:e7ih85+4qVrBuqQWTW4FKSqZYokVuc3HnhH5keboFTo=
+github.com/google/cel-go v0.27.0/go.mod h1:tTJ11FWqnhw5KKpnWpvW9CJC3Y9GK4EIS0WXnBbebzw=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
@@ -108,8 +107,8 @@ github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4p
github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 h1:kEISI/Gx67NzH3nJxAmY/dGac80kKZgZt134u7Y/k1s=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4/go.mod h1:6Nz966r3vQYCqIzWsuEl9d7cf7mRhtDmm++sOxlnfxI=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8 h1:NpbJl/eVbvrGE0MJ6X16X9SAifesl6Fwxg/YmCvubRI=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8/go.mod h1:mi7YA+gCzVem12exXy46ZespvGtX/lZmD/RLnQhVW7U=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@@ -120,8 +119,8 @@ github.com/jhump/protoreflect/v2 v2.0.0-beta.2 h1:qZU+rEZUOYTz1Bnhi3xbwn+VxdXkLV
github.com/jhump/protoreflect/v2 v2.0.0-beta.2/go.mod h1:4tnOYkB/mq7QTyS3YKtVtNrJv4Psqout8HA1U+hZtgM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
-github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
+github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
+github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -148,18 +147,18 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
-github.com/petermattis/goid v0.0.0-20251121121749-a11dd1a45f9a h1:VweslR2akb/ARhXfqSfRbj1vpWwYXf3eeAUyw/ndms0=
-github.com/petermattis/goid v0.0.0-20251121121749-a11dd1a45f9a/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
+github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741 h1:KPpdlQLZcHfTMQRi6bFQ7ogNO0ltFT4PmtwTLW4W+14=
+github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9 h1:arwj11zP0yJIxIRiDn22E0H8PxfF7TsTrc2wIPFIsf4=
github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9/go.mod h1:SKZx6stCn03JN3BOWTwvVIO2ajMkb/zQdTceXYhKw/4=
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
-github.com/quic-go/quic-go v0.58.0 h1:ggY2pvZaVdB9EyojxL1p+5mptkuHyX5MOSv4dgWF4Ug=
-github.com/quic-go/quic-go v0.58.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
+github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw=
+github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rodaine/protogofakeit v0.1.1 h1:ZKouljuRM3A+TArppfBqnH8tGZHOwM/pjvtXe9DaXH8=
@@ -174,22 +173,13 @@ github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0=
github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/segmentio/encoding v0.5.3 h1:OjMgICtcSFuNvQCdwqMCv9Tg7lEOXGwm1J5RPQccx6w=
github.com/segmentio/encoding v0.5.3/go.mod h1:HS1ZKa3kSN32ZHVZ7ZLPLXWvOVIiZtyJnO1gPH1sKt0=
-github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
-github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
+github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs=
-github.com/stoewer/go-strcase v1.3.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tetratelabs/wazero v1.11.0 h1:+gKemEuKCTevU4d7ZTzlsvgd1uaToIDtlQlmNbwqYhA=
@@ -210,22 +200,22 @@ go.lsp.dev/uri v0.3.0 h1:KcZJmh6nFIBeJzTugn5JTU6OOyG0lDOo3R9KwTxTYbo=
go.lsp.dev/uri v0.3.0/go.mod h1:P5sbO1IQR+qySTWOCnhnK7phBx+W3zbLqSMDJNTw88I=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
-go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
-go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0=
-go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
-go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
-go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18=
-go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE=
-go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8=
-go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew=
-go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
-go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0=
+go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
+go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40=
+go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
+go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
+go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
+go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
+go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
+go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
+go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
+go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -241,20 +231,20 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
-golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
-golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93 h1:fQsdNF2N+/YewlRZiricy4P1iimyPKZ/xwniHj8Q2a0=
-golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93/go.mod h1:EPRbTFwzwjXj9NpYyyrvenVh9Y+GFeEvMNh7Xuz7xgU=
+golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
+golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
+golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
+golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
-golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
+golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
+golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
-golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
+golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
+golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -264,32 +254,31 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
-golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
-golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
+golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
+golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
+golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
-golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
-golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
-golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
+golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
+golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
+golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
-golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
+golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
+golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E=
-google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0=
+google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
@@ -297,10 +286,11 @@ google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
-gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
+gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
+gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
+mvdan.cc/xurls/v2 v2.6.0 h1:3NTZpeTxYVWNSokW3MKeyVkz/j7uYXYiMtXRUfmjbgI=
+mvdan.cc/xurls/v2 v2.6.0/go.mod h1:bCvEZ1XvdA6wDnxY7jPPjEmigDtvtvPXAD/Exa9IMSk=
pluginrpc.com/pluginrpc v0.5.0 h1:tOQj2D35hOmvHyPu8e7ohW2/QvAnEtKscy2IJYWQ2yo=
pluginrpc.com/pluginrpc v0.5.0/go.mod h1:UNWZ941hcVAoOZUn8YZsMmOZBzbUjQa3XMns8RQLp9o=
diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go
index 75021d2c62..d457d8ab25 100644
--- a/model/histogram/float_histogram.go
+++ b/model/histogram/float_histogram.go
@@ -18,6 +18,8 @@ import (
"fmt"
"math"
"strings"
+
+ "github.com/prometheus/prometheus/util/kahansum"
)
// FloatHistogram is similar to Histogram but uses float64 for all
@@ -353,7 +355,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (res *FloatHistogram, counte
}
counterResetCollision = h.adjustCounterReset(other)
if !h.UsesCustomBuckets() {
- otherZeroCount := h.reconcileZeroBuckets(other)
+ otherZeroCount, _ := h.reconcileZeroBuckets(other, nil)
h.ZeroCount += otherZeroCount
}
h.Count += other.Count
@@ -374,11 +376,11 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (res *FloatHistogram, counte
intersectedBounds := intersectCustomBucketBounds(h.CustomValues, other.CustomValues)
// Add with mapping - maps both histograms to intersected layout.
- h.PositiveSpans, h.PositiveBuckets = addCustomBucketsWithMismatches(
+ h.PositiveSpans, h.PositiveBuckets, _ = addCustomBucketsWithMismatches(
false,
hPositiveSpans, hPositiveBuckets, h.CustomValues,
otherPositiveSpans, otherPositiveBuckets, other.CustomValues,
- intersectedBounds)
+ nil, intersectedBounds)
h.CustomValues = intersectedBounds
}
return h, counterResetCollision, nhcbBoundsReconciled, nil
@@ -408,6 +410,121 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (res *FloatHistogram, counte
return h, counterResetCollision, nhcbBoundsReconciled, nil
}
+// KahanAdd works like Add but using the Kahan summation algorithm to minimize numerical errors.
+// c is a histogram holding the Kahan compensation term. It is modified in-place if non-nil.
+// If c is nil, a new compensation histogram is created inside the function. In this case,
+// the caller must use the returned updatedC, because the original c variable is not modified.
+func (h *FloatHistogram) KahanAdd(other, c *FloatHistogram) (updatedC *FloatHistogram, counterResetCollision, nhcbBoundsReconciled bool, err error) {
+ if err := h.checkSchemaAndBounds(other); err != nil {
+ return nil, false, false, err
+ }
+
+ counterResetCollision = h.adjustCounterReset(other)
+
+ if c == nil {
+ c = h.newCompensationHistogram()
+ }
+ if !h.UsesCustomBuckets() {
+ otherZeroCount, otherCZeroCount := h.reconcileZeroBuckets(other, c)
+ h.ZeroCount, c.ZeroCount = kahansum.Inc(otherZeroCount, h.ZeroCount, c.ZeroCount)
+ h.ZeroCount, c.ZeroCount = kahansum.Inc(otherCZeroCount, h.ZeroCount, c.ZeroCount)
+ }
+ h.Count, c.Count = kahansum.Inc(other.Count, h.Count, c.Count)
+ h.Sum, c.Sum = kahansum.Inc(other.Sum, h.Sum, c.Sum)
+
+ var (
+ hPositiveSpans = h.PositiveSpans
+ hPositiveBuckets = h.PositiveBuckets
+ otherPositiveSpans = other.PositiveSpans
+ otherPositiveBuckets = other.PositiveBuckets
+ cPositiveBuckets = c.PositiveBuckets
+ )
+
+ if h.UsesCustomBuckets() {
+ if CustomBucketBoundsMatch(h.CustomValues, other.CustomValues) {
+ h.PositiveSpans, h.PositiveBuckets, c.PositiveBuckets = kahanAddBuckets(
+ h.Schema, h.ZeroThreshold, false,
+ hPositiveSpans, hPositiveBuckets,
+ otherPositiveSpans, otherPositiveBuckets,
+ cPositiveBuckets, nil,
+ )
+ } else {
+ nhcbBoundsReconciled = true
+ intersectedBounds := intersectCustomBucketBounds(h.CustomValues, other.CustomValues)
+
+ // Add with mapping - maps both histograms to intersected layout.
+ h.PositiveSpans, h.PositiveBuckets, c.PositiveBuckets = addCustomBucketsWithMismatches(
+ false,
+ hPositiveSpans, hPositiveBuckets, h.CustomValues,
+ otherPositiveSpans, otherPositiveBuckets, other.CustomValues,
+ cPositiveBuckets, intersectedBounds)
+ h.CustomValues = intersectedBounds
+ c.CustomValues = intersectedBounds
+ }
+ c.PositiveSpans = h.PositiveSpans
+ return c, counterResetCollision, nhcbBoundsReconciled, nil
+ }
+
+ otherC := other.newCompensationHistogram()
+
+ var (
+ hNegativeSpans = h.NegativeSpans
+ hNegativeBuckets = h.NegativeBuckets
+ otherNegativeSpans = other.NegativeSpans
+ otherNegativeBuckets = other.NegativeBuckets
+ cNegativeBuckets = c.NegativeBuckets
+ otherCPositiveBuckets = otherC.PositiveBuckets
+ otherCNegativeBuckets = otherC.NegativeBuckets
+ )
+
+ switch {
+ case other.Schema < h.Schema:
+ hPositiveSpans, hPositiveBuckets, cPositiveBuckets = kahanReduceResolution(
+ hPositiveSpans, hPositiveBuckets, cPositiveBuckets,
+ h.Schema, other.Schema,
+ true,
+ )
+ hNegativeSpans, hNegativeBuckets, cNegativeBuckets = kahanReduceResolution(
+ hNegativeSpans, hNegativeBuckets, cNegativeBuckets,
+ h.Schema, other.Schema,
+ true,
+ )
+ h.Schema = other.Schema
+
+ case other.Schema > h.Schema:
+ otherPositiveSpans, otherPositiveBuckets, otherCPositiveBuckets = kahanReduceResolution(
+ otherPositiveSpans, otherPositiveBuckets, otherCPositiveBuckets,
+ other.Schema, h.Schema,
+ false,
+ )
+ otherNegativeSpans, otherNegativeBuckets, otherCNegativeBuckets = kahanReduceResolution(
+ otherNegativeSpans, otherNegativeBuckets, otherCNegativeBuckets,
+ other.Schema, h.Schema,
+ false,
+ )
+ }
+
+ h.PositiveSpans, h.PositiveBuckets, c.PositiveBuckets = kahanAddBuckets(
+ h.Schema, h.ZeroThreshold, false,
+ hPositiveSpans, hPositiveBuckets,
+ otherPositiveSpans, otherPositiveBuckets,
+ cPositiveBuckets, otherCPositiveBuckets,
+ )
+ h.NegativeSpans, h.NegativeBuckets, c.NegativeBuckets = kahanAddBuckets(
+ h.Schema, h.ZeroThreshold, false,
+ hNegativeSpans, hNegativeBuckets,
+ otherNegativeSpans, otherNegativeBuckets,
+ cNegativeBuckets, otherCNegativeBuckets,
+ )
+
+ c.Schema = h.Schema
+ c.ZeroThreshold = h.ZeroThreshold
+ c.PositiveSpans = h.PositiveSpans
+ c.NegativeSpans = h.NegativeSpans
+
+ return c, counterResetCollision, nhcbBoundsReconciled, nil
+}
+
// Sub works like Add but subtracts the other histogram. It uses the same logic
// to adjust the counter reset hint. This is useful where this method is used
// for incremental mean calculation. However, if it is used for the actual "-"
@@ -419,7 +536,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) (res *FloatHistogram, counte
}
counterResetCollision = h.adjustCounterReset(other)
if !h.UsesCustomBuckets() {
- otherZeroCount := h.reconcileZeroBuckets(other)
+ otherZeroCount, _ := h.reconcileZeroBuckets(other, nil)
h.ZeroCount -= otherZeroCount
}
h.Count -= other.Count
@@ -440,11 +557,11 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) (res *FloatHistogram, counte
intersectedBounds := intersectCustomBucketBounds(h.CustomValues, other.CustomValues)
// Subtract with mapping - maps both histograms to intersected layout.
- h.PositiveSpans, h.PositiveBuckets = addCustomBucketsWithMismatches(
+ h.PositiveSpans, h.PositiveBuckets, _ = addCustomBucketsWithMismatches(
true,
hPositiveSpans, hPositiveBuckets, h.CustomValues,
otherPositiveSpans, otherPositiveBuckets, other.CustomValues,
- intersectedBounds)
+ nil, intersectedBounds)
h.CustomValues = intersectedBounds
}
return h, counterResetCollision, nhcbBoundsReconciled, nil
@@ -576,15 +693,28 @@ func (h *FloatHistogram) Size() int {
// easier to iterate through. Still, the safest bet is to use maxEmptyBuckets==0
// and only use a larger number if you know what you are doing.
func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram {
- h.PositiveBuckets, h.PositiveSpans = compactBuckets(
- h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, false,
+ h.PositiveBuckets, _, h.PositiveSpans = compactBuckets(
+ h.PositiveBuckets, nil, h.PositiveSpans, maxEmptyBuckets, false,
)
- h.NegativeBuckets, h.NegativeSpans = compactBuckets(
- h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, false,
+ h.NegativeBuckets, _, h.NegativeSpans = compactBuckets(
+ h.NegativeBuckets, nil, h.NegativeSpans, maxEmptyBuckets, false,
)
return h
}
+// kahanCompact works like Compact, but it is specialized for FloatHistogram's KahanAdd method.
+// c is a histogram holding the Kahan compensation term.
+func (h *FloatHistogram) kahanCompact(maxEmptyBuckets int, c *FloatHistogram,
+) (updatedH, updatedC *FloatHistogram) {
+ h.PositiveBuckets, c.PositiveBuckets, h.PositiveSpans = compactBuckets(
+ h.PositiveBuckets, c.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, false,
+ )
+ h.NegativeBuckets, c.NegativeBuckets, h.NegativeSpans = compactBuckets(
+ h.NegativeBuckets, c.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, false,
+ )
+ return h, c
+}
+
// DetectReset returns true if the receiving histogram is missing any buckets
// that have a non-zero population in the provided previous histogram. It also
// returns true if any count (in any bucket, in the zero count, or in the count
@@ -652,7 +782,7 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
// ZeroThreshold decreased.
return true
}
- previousZeroCount, newThreshold := previous.zeroCountForLargerThreshold(h.ZeroThreshold)
+ previousZeroCount, newThreshold, _ := previous.zeroCountForLargerThreshold(h.ZeroThreshold, nil)
if newThreshold != h.ZeroThreshold {
// ZeroThreshold is within a populated bucket in previous
// histogram.
@@ -847,30 +977,42 @@ func (h *FloatHistogram) Validate() error {
}
// zeroCountForLargerThreshold returns what the histogram's zero count would be
-// if the ZeroThreshold had the provided larger (or equal) value. If the
-// provided value is less than the histogram's ZeroThreshold, the method panics.
+// if the ZeroThreshold had the provided larger (or equal) value. It also returns the
+// zero count of the compensation histogram `c` if provided (used for Kahan summation).
+//
+// If the provided ZeroThreshold is less than the histogram's ZeroThreshold, the method panics.
// If the largerThreshold ends up within a populated bucket of the histogram, it
// is adjusted upwards to the lower limit of that bucket (all in terms of
// absolute values) and that bucket's count is included in the returned
// count. The adjusted threshold is returned, too.
-func (h *FloatHistogram) zeroCountForLargerThreshold(largerThreshold float64) (count, threshold float64) {
+func (h *FloatHistogram) zeroCountForLargerThreshold(
+ largerThreshold float64, c *FloatHistogram) (hZeroCount, threshold, cZeroCount float64,
+) {
+ if c != nil {
+ cZeroCount = c.ZeroCount
+ }
// Fast path.
if largerThreshold == h.ZeroThreshold {
- return h.ZeroCount, largerThreshold
+ return h.ZeroCount, largerThreshold, cZeroCount
}
if largerThreshold < h.ZeroThreshold {
panic(fmt.Errorf("new threshold %f is less than old threshold %f", largerThreshold, h.ZeroThreshold))
}
outer:
for {
- count = h.ZeroCount
+ hZeroCount = h.ZeroCount
i := h.PositiveBucketIterator()
+ bucketsIdx := 0
for i.Next() {
b := i.At()
if b.Lower >= largerThreshold {
break
}
- count += b.Count // Bucket to be merged into zero bucket.
+ // Bucket to be merged into zero bucket.
+ hZeroCount, cZeroCount = kahansum.Inc(b.Count, hZeroCount, cZeroCount)
+ if c != nil {
+ hZeroCount, cZeroCount = kahansum.Inc(c.PositiveBuckets[bucketsIdx], hZeroCount, cZeroCount)
+ }
if b.Upper > largerThreshold {
// New threshold ended up within a bucket. if it's
// populated, we need to adjust largerThreshold before
@@ -880,14 +1022,20 @@ outer:
}
break
}
+ bucketsIdx++
}
i = h.NegativeBucketIterator()
+ bucketsIdx = 0
for i.Next() {
b := i.At()
if b.Upper <= -largerThreshold {
break
}
- count += b.Count // Bucket to be merged into zero bucket.
+ // Bucket to be merged into zero bucket.
+ hZeroCount, cZeroCount = kahansum.Inc(b.Count, hZeroCount, cZeroCount)
+ if c != nil {
+ hZeroCount, cZeroCount = kahansum.Inc(c.NegativeBuckets[bucketsIdx], hZeroCount, cZeroCount)
+ }
if b.Lower < -largerThreshold {
// New threshold ended up within a bucket. If
// it's populated, we need to adjust
@@ -900,15 +1048,17 @@ outer:
}
break
}
+ bucketsIdx++
}
- return count, largerThreshold
+ return hZeroCount, largerThreshold, cZeroCount
}
}
// trimBucketsInZeroBucket removes all buckets that are within the zero
// bucket. It assumes that the zero threshold is at a bucket boundary and that
// the counts in the buckets to remove are already part of the zero count.
-func (h *FloatHistogram) trimBucketsInZeroBucket() {
+// c is a histogram holding the Kahan compensation term.
+func (h *FloatHistogram) trimBucketsInZeroBucket(c *FloatHistogram) {
i := h.PositiveBucketIterator()
bucketsIdx := 0
for i.Next() {
@@ -917,6 +1067,9 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() {
break
}
h.PositiveBuckets[bucketsIdx] = 0
+ if c != nil {
+ c.PositiveBuckets[bucketsIdx] = 0
+ }
bucketsIdx++
}
i = h.NegativeBucketIterator()
@@ -927,34 +1080,46 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() {
break
}
h.NegativeBuckets[bucketsIdx] = 0
+ if c != nil {
+ c.NegativeBuckets[bucketsIdx] = 0
+ }
bucketsIdx++
}
// We are abusing Compact to trim the buckets set to zero
// above. Premature compacting could cause additional cost, but this
// code path is probably rarely used anyway.
- h.Compact(0)
+ if c != nil {
+ h.kahanCompact(0, c)
+ } else {
+ h.Compact(0)
+ }
}
// reconcileZeroBuckets finds a zero bucket large enough to include the zero
// buckets of both histograms (the receiving histogram and the other histogram)
// with a zero threshold that is not within a populated bucket in either
-// histogram. This method modifies the receiving histogram accordingly, but
-// leaves the other histogram as is. Instead, it returns the zero count the
-// other histogram would have if it were modified.
-func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
- otherZeroCount := other.ZeroCount
+// histogram. This method modifies the receiving histogram accordingly, and
+// also modifies the compensation histogram `c` (used for Kahan summation) if provided,
+// but leaves the other histogram as is. Instead, it returns the zero count the
+// other histogram would have if it were modified, as well as its Kahan compensation term.
+func (h *FloatHistogram) reconcileZeroBuckets(other, c *FloatHistogram) (otherZeroCount, otherCZeroCount float64) {
+ otherZeroCount = other.ZeroCount
otherZeroThreshold := other.ZeroThreshold
for otherZeroThreshold != h.ZeroThreshold {
if h.ZeroThreshold > otherZeroThreshold {
- otherZeroCount, otherZeroThreshold = other.zeroCountForLargerThreshold(h.ZeroThreshold)
+ otherZeroCount, otherZeroThreshold, otherCZeroCount = other.zeroCountForLargerThreshold(h.ZeroThreshold, nil)
}
if otherZeroThreshold > h.ZeroThreshold {
- h.ZeroCount, h.ZeroThreshold = h.zeroCountForLargerThreshold(otherZeroThreshold)
- h.trimBucketsInZeroBucket()
+ var cZeroCount float64
+ h.ZeroCount, h.ZeroThreshold, cZeroCount = h.zeroCountForLargerThreshold(otherZeroThreshold, c)
+ if c != nil {
+ c.ZeroCount = cZeroCount
+ }
+ h.trimBucketsInZeroBucket(c)
}
}
- return otherZeroCount
+ return otherZeroCount, otherCZeroCount
}
// floatBucketIterator is a low-level constructor for bucket iterators.
@@ -1369,6 +1534,145 @@ func addBuckets(
return spansA, bucketsA
}
+// kahanAddBuckets works like addBuckets but it is used in FloatHistogram's KahanAdd method
+// and takes additional arguments, compensationBucketsA and compensationBucketsB,
+// which hold the Kahan compensation values associated with histograms A and B.
+// It returns the resulting spans/buckets and compensation buckets.
+func kahanAddBuckets(
+ schema int32, threshold float64, negative bool,
+ spansA []Span, bucketsA []float64,
+ spansB []Span, bucketsB []float64,
+ compensationBucketsA, compensationBucketsB []float64,
+) (newSpans []Span, newBucketsA, newBucketsC []float64) {
+ var (
+ iSpan = -1
+ iBucket = -1
+ iInSpan int32
+ indexA int32
+ indexB int32
+ bIdxB int
+ bucketB float64
+ compensationBucketB float64
+ deltaIndex int32
+ lowerThanThreshold = true
+ )
+
+ for _, spanB := range spansB {
+ indexB += spanB.Offset
+ for j := 0; j < int(spanB.Length); j++ {
+ if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold {
+ goto nextLoop
+ }
+ lowerThanThreshold = false
+
+ bucketB = bucketsB[bIdxB]
+ if compensationBucketsB != nil {
+ compensationBucketB = compensationBucketsB[bIdxB]
+ }
+ if negative {
+ bucketB *= -1
+ compensationBucketB *= -1
+ }
+
+ if iSpan == -1 {
+ if len(spansA) == 0 || spansA[0].Offset > indexB {
+ // Add bucket before all others.
+ bucketsA = append(bucketsA, 0)
+ copy(bucketsA[1:], bucketsA)
+ bucketsA[0] = bucketB
+ compensationBucketsA = append(compensationBucketsA, 0)
+ copy(compensationBucketsA[1:], compensationBucketsA)
+ compensationBucketsA[0] = compensationBucketB
+ if len(spansA) > 0 && spansA[0].Offset == indexB+1 {
+ spansA[0].Length++
+ spansA[0].Offset--
+ goto nextLoop
+ }
+ spansA = append(spansA, Span{})
+ copy(spansA[1:], spansA)
+ spansA[0] = Span{Offset: indexB, Length: 1}
+ if len(spansA) > 1 {
+ // Convert the absolute offset in the formerly
+ // first span to a relative offset.
+ spansA[1].Offset -= indexB + 1
+ }
+ goto nextLoop
+ } else if spansA[0].Offset == indexB {
+ // Just add to first bucket.
+ bucketsA[0], compensationBucketsA[0] = kahansum.Inc(bucketB, bucketsA[0], compensationBucketsA[0])
+ bucketsA[0], compensationBucketsA[0] = kahansum.Inc(compensationBucketB, bucketsA[0], compensationBucketsA[0])
+ goto nextLoop
+ }
+ iSpan, iBucket, iInSpan = 0, 0, 0
+ indexA = spansA[0].Offset
+ }
+ deltaIndex = indexB - indexA
+ for {
+ remainingInSpan := int32(spansA[iSpan].Length) - iInSpan
+ if deltaIndex < remainingInSpan {
+ // Bucket is in current span.
+ iBucket += int(deltaIndex)
+ iInSpan += deltaIndex
+ bucketsA[iBucket], compensationBucketsA[iBucket] = kahansum.Inc(bucketB, bucketsA[iBucket], compensationBucketsA[iBucket])
+ bucketsA[iBucket], compensationBucketsA[iBucket] = kahansum.Inc(compensationBucketB, bucketsA[iBucket], compensationBucketsA[iBucket])
+ break
+ }
+ deltaIndex -= remainingInSpan
+ iBucket += int(remainingInSpan)
+ iSpan++
+ if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset {
+ // Bucket is in gap behind previous span (or there are no further spans).
+ bucketsA = append(bucketsA, 0)
+ copy(bucketsA[iBucket+1:], bucketsA[iBucket:])
+ bucketsA[iBucket] = bucketB
+ compensationBucketsA = append(compensationBucketsA, 0)
+ copy(compensationBucketsA[iBucket+1:], compensationBucketsA[iBucket:])
+ compensationBucketsA[iBucket] = compensationBucketB
+ switch {
+ case deltaIndex == 0:
+ // Directly after previous span, extend previous span.
+ if iSpan < len(spansA) {
+ spansA[iSpan].Offset--
+ }
+ iSpan--
+ iInSpan = int32(spansA[iSpan].Length)
+ spansA[iSpan].Length++
+ goto nextLoop
+ case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1:
+ // Directly before next span, extend next span.
+ iInSpan = 0
+ spansA[iSpan].Offset--
+ spansA[iSpan].Length++
+ goto nextLoop
+ default:
+ // No next span, or next span is not directly adjacent to new bucket.
+ // Add new span.
+ iInSpan = 0
+ if iSpan < len(spansA) {
+ spansA[iSpan].Offset -= deltaIndex + 1
+ }
+ spansA = append(spansA, Span{})
+ copy(spansA[iSpan+1:], spansA[iSpan:])
+ spansA[iSpan] = Span{Length: 1, Offset: deltaIndex}
+ goto nextLoop
+ }
+ } else {
+ // Try start of next span.
+ deltaIndex -= spansA[iSpan].Offset
+ iInSpan = 0
+ }
+ }
+
+ nextLoop:
+ indexA = indexB
+ indexB++
+ bIdxB++
+ }
+ }
+
+ return spansA, bucketsA, compensationBucketsA
+}
+
// floatBucketsMatch compares bucket values of two float histograms using binary float comparison
// and returns true if all values match.
func floatBucketsMatch(b1, b2 []float64) bool {
@@ -1496,15 +1800,18 @@ func intersectCustomBucketBounds(boundsA, boundsB []float64) []float64 {
// addCustomBucketsWithMismatches handles adding/subtracting custom bucket histograms
// with mismatched bucket layouts by mapping both to an intersected layout.
+// It also processes the Kahan compensation term if provided.
func addCustomBucketsWithMismatches(
negative bool,
spansA []Span, bucketsA, boundsA []float64,
spansB []Span, bucketsB, boundsB []float64,
+ bucketsC []float64,
intersectedBounds []float64,
-) ([]Span, []float64) {
+) ([]Span, []float64, []float64) {
targetBuckets := make([]float64, len(intersectedBounds)+1)
+ cTargetBuckets := make([]float64, len(intersectedBounds)+1)
- mapBuckets := func(spans []Span, buckets, bounds []float64, negative bool) {
+ mapBuckets := func(spans []Span, buckets, bounds []float64, negative, withCompensation bool) {
srcIdx := 0
bucketIdx := 0
intersectIdx := 0
@@ -1530,9 +1837,12 @@ func addCustomBucketsWithMismatches(
}
if negative {
- targetBuckets[targetIdx] -= value
+ targetBuckets[targetIdx], cTargetBuckets[targetIdx] = kahansum.Dec(value, targetBuckets[targetIdx], cTargetBuckets[targetIdx])
} else {
- targetBuckets[targetIdx] += value
+ targetBuckets[targetIdx], cTargetBuckets[targetIdx] = kahansum.Inc(value, targetBuckets[targetIdx], cTargetBuckets[targetIdx])
+ if withCompensation && bucketsC != nil {
+ targetBuckets[targetIdx], cTargetBuckets[targetIdx] = kahansum.Inc(bucketsC[bucketIdx], targetBuckets[targetIdx], cTargetBuckets[targetIdx])
+ }
}
}
srcIdx++
@@ -1541,21 +1851,23 @@ func addCustomBucketsWithMismatches(
}
}
- // Map both histograms to the intersected layout.
- mapBuckets(spansA, bucketsA, boundsA, false)
- mapBuckets(spansB, bucketsB, boundsB, negative)
+ // Map histograms to the intersected layout.
+ mapBuckets(spansA, bucketsA, boundsA, false, true)
+ mapBuckets(spansB, bucketsB, boundsB, negative, false)
// Build spans and buckets, excluding zero-valued buckets from the final result.
- destSpans := spansA[:0] // Reuse spansA capacity for destSpans since we don't need it anymore.
- destBuckets := targetBuckets[:0] // Reuse targetBuckets capacity for destBuckets since it's guaranteed to be large enough.
+ destSpans := spansA[:0] // Reuse spansA capacity for destSpans since we don't need it anymore.
+ destBuckets := targetBuckets[:0] // Reuse targetBuckets capacity for destBuckets since it's guaranteed to be large enough.
+ cDestBuckets := cTargetBuckets[:0] // Reuse cTargetBuckets capacity for cDestBuckets since it's guaranteed to be large enough.
lastIdx := int32(-1)
- for i, count := range targetBuckets {
- if count == 0 {
+ for i := range targetBuckets {
+ if targetBuckets[i] == 0 && cTargetBuckets[i] == 0 {
continue
}
- destBuckets = append(destBuckets, count)
+ destBuckets = append(destBuckets, targetBuckets[i])
+ cDestBuckets = append(cDestBuckets, cTargetBuckets[i])
idx := int32(i)
if len(destSpans) > 0 && idx == lastIdx+1 {
@@ -1578,7 +1890,7 @@ func addCustomBucketsWithMismatches(
lastIdx = idx
}
- return destSpans, destBuckets
+ return destSpans, destBuckets, cDestBuckets
}
// ReduceResolution reduces the float histogram's spans, buckets into target schema.
@@ -1618,6 +1930,121 @@ func (h *FloatHistogram) ReduceResolution(targetSchema int32) error {
return nil
}
+// kahanReduceResolution works like reduceResolution, but it is specialized for FloatHistogram's KahanAdd method.
+// Unlike reduceResolution, which supports both float and integer buckets, this function only operates on float buckets.
+// It also takes an additional argument, originCompensationBuckets, representing the compensation buckets for the origin histogram.
+// Modifies both the origin histogram buckets and their associated compensation buckets.
+func kahanReduceResolution(
+ originSpans []Span,
+ originReceivingBuckets []float64,
+ originCompensationBuckets []float64,
+ originSchema,
+ targetSchema int32,
+ inplace bool,
+) (newSpans []Span, newReceivingBuckets, newCompensationBuckets []float64) {
+ var (
+ targetSpans []Span // The spans in the target schema.
+ targetReceivingBuckets []float64 // The receiving bucket counts in the target schema.
+ targetCompensationBuckets []float64 // The compensation bucket counts in the target schema.
+ bucketIdx int32 // The index of bucket in the origin schema.
+ bucketCountIdx int // The position of a bucket in origin bucket count slice `originBuckets`.
+ targetBucketIdx int32 // The index of bucket in the target schema.
+ lastTargetBucketIdx int32 // The index of the last added target bucket.
+ )
+
+ if inplace {
+ // Slice reuse is safe because when reducing the resolution,
+ // target slices don't grow faster than origin slices are being read.
+ targetSpans = originSpans[:0]
+ targetReceivingBuckets = originReceivingBuckets[:0]
+ targetCompensationBuckets = originCompensationBuckets[:0]
+ }
+
+ for _, span := range originSpans {
+ // Determine the index of the first bucket in this span.
+ bucketIdx += span.Offset
+ for j := 0; j < int(span.Length); j++ {
+ // Determine the index of the bucket in the target schema from the index in the original schema.
+ targetBucketIdx = targetIdx(bucketIdx, originSchema, targetSchema)
+
+ switch {
+ case len(targetSpans) == 0:
+ // This is the first span in the targetSpans.
+ span := Span{
+ Offset: targetBucketIdx,
+ Length: 1,
+ }
+ targetSpans = append(targetSpans, span)
+ targetReceivingBuckets = append(targetReceivingBuckets, originReceivingBuckets[bucketCountIdx])
+ lastTargetBucketIdx = targetBucketIdx
+ targetCompensationBuckets = append(targetCompensationBuckets, originCompensationBuckets[bucketCountIdx])
+
+ case lastTargetBucketIdx == targetBucketIdx:
+ // The current bucket has to be merged into the same target bucket as the previous bucket.
+ lastBucketIdx := len(targetReceivingBuckets) - 1
+ targetReceivingBuckets[lastBucketIdx], targetCompensationBuckets[lastBucketIdx] = kahansum.Inc(
+ originReceivingBuckets[bucketCountIdx],
+ targetReceivingBuckets[lastBucketIdx],
+ targetCompensationBuckets[lastBucketIdx],
+ )
+ targetReceivingBuckets[lastBucketIdx], targetCompensationBuckets[lastBucketIdx] = kahansum.Inc(
+ originCompensationBuckets[bucketCountIdx],
+ targetReceivingBuckets[lastBucketIdx],
+ targetCompensationBuckets[lastBucketIdx],
+ )
+
+ case (lastTargetBucketIdx + 1) == targetBucketIdx:
+ // The current bucket has to go into a new target bucket,
+ // and that bucket is next to the previous target bucket,
+ // so we add it to the current target span.
+ targetSpans[len(targetSpans)-1].Length++
+ lastTargetBucketIdx++
+ targetReceivingBuckets = append(targetReceivingBuckets, originReceivingBuckets[bucketCountIdx])
+ targetCompensationBuckets = append(targetCompensationBuckets, originCompensationBuckets[bucketCountIdx])
+
+ case (lastTargetBucketIdx + 1) < targetBucketIdx:
+ // The current bucket has to go into a new target bucket,
+ // and that bucket is separated by a gap from the previous target bucket,
+ // so we need to add a new target span.
+ span := Span{
+ Offset: targetBucketIdx - lastTargetBucketIdx - 1,
+ Length: 1,
+ }
+ targetSpans = append(targetSpans, span)
+ lastTargetBucketIdx = targetBucketIdx
+ targetReceivingBuckets = append(targetReceivingBuckets, originReceivingBuckets[bucketCountIdx])
+ targetCompensationBuckets = append(targetCompensationBuckets, originCompensationBuckets[bucketCountIdx])
+ }
+
+ bucketIdx++
+ bucketCountIdx++
+ }
+ }
+
+ return targetSpans, targetReceivingBuckets, targetCompensationBuckets
+}
+
+// newCompensationHistogram initializes a new compensation histogram that can be used
+// alongside the current FloatHistogram in Kahan summation.
+// The compensation histogram is structured to match the receiving histogram's bucket layout
+// including its schema, zero threshold and custom values, and it shares spans with the receiving
+// histogram. However, the bucket values in the compensation histogram are initialized to zero.
+func (h *FloatHistogram) newCompensationHistogram() *FloatHistogram {
+ c := &FloatHistogram{
+ CounterResetHint: h.CounterResetHint,
+ Schema: h.Schema,
+ ZeroThreshold: h.ZeroThreshold,
+ CustomValues: h.CustomValues,
+ PositiveBuckets: make([]float64, len(h.PositiveBuckets)),
+ PositiveSpans: h.PositiveSpans,
+ NegativeSpans: h.NegativeSpans,
+ }
+ if !h.UsesCustomBuckets() {
+ c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
+ }
+ return c
+}
+
// checkSchemaAndBounds checks if two histograms are compatible because they
// both use a standard exponential schema or because they both are NHCBs.
func (h *FloatHistogram) checkSchemaAndBounds(other *FloatHistogram) error {
@@ -1659,3 +2086,27 @@ func (h *FloatHistogram) adjustCounterReset(other *FloatHistogram) (counterReset
}
return false
}
+
+// HasOverflow reports whether any of the FloatHistogram's fields contain an infinite value.
+// This can happen when aggregating multiple histograms and exceeding float64 capacity.
+func (h *FloatHistogram) HasOverflow() bool {
+ if math.IsInf(h.ZeroCount, 0) || math.IsInf(h.Count, 0) || math.IsInf(h.Sum, 0) {
+ return true
+ }
+ for _, v := range h.PositiveBuckets {
+ if math.IsInf(v, 0) {
+ return true
+ }
+ }
+ for _, v := range h.NegativeBuckets {
+ if math.IsInf(v, 0) {
+ return true
+ }
+ }
+ for _, v := range h.CustomValues {
+ if math.IsInf(v, 0) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go
index 5c29544c8f..caf77b6256 100644
--- a/model/histogram/float_histogram_test.go
+++ b/model/histogram/float_histogram_test.go
@@ -2514,6 +2514,243 @@ func TestFloatHistogramAdd(t *testing.T) {
t.Run(c.name, func(t *testing.T) {
testHistogramAdd(t, c.in1, c.in2, c.expected, c.expErrMsg, c.expCounterResetCollision, c.expNHCBBoundsReconciled)
testHistogramAdd(t, c.in2, c.in1, c.expected, c.expErrMsg, c.expCounterResetCollision, c.expNHCBBoundsReconciled)
+ testHistogramKahanAdd(t, c.in1, nil, c.in2, c.expected, c.expErrMsg, c.expCounterResetCollision, c.expNHCBBoundsReconciled)
+ testHistogramKahanAdd(t, c.in2, nil, c.in1, c.expected, c.expErrMsg, c.expCounterResetCollision, c.expNHCBBoundsReconciled)
+ })
+ }
+}
+
+// TestKahanAddWithCompHistogram tests KahanAdd.
+// Test cases provide two float histograms and a compensation histogram with predefined values.
+func TestKahanAddWithCompHistogram(t *testing.T) {
+ cases := []struct {
+ name string
+ in1, comp, in2, expectedSum *FloatHistogram
+ expErrMsg string
+ expCounterResetCollision bool
+ expNHCBBoundsReconciled bool
+ }{
+ {
+ name: "larger zero bucket in first histogram",
+ in1: &FloatHistogram{
+ ZeroThreshold: 1,
+ ZeroCount: 17,
+ Count: 21,
+ Sum: 1.234,
+ PositiveSpans: []Span{{1, 2}, {0, 3}},
+ PositiveBuckets: []float64{2, 3, 6, 2, 5},
+ NegativeSpans: []Span{{4, 2}, {1, 2}},
+ NegativeBuckets: []float64{1, 1, 4, 4},
+ },
+ comp: &FloatHistogram{
+ ZeroThreshold: 1,
+ PositiveSpans: []Span{{1, 2}, {0, 3}},
+ PositiveBuckets: []float64{0.02, 0.03, 0.06, 0.02, 0.05},
+ NegativeSpans: []Span{{4, 2}, {1, 2}},
+ NegativeBuckets: []float64{0.01, 0.01, 0.04, 0.04},
+ },
+ in2: &FloatHistogram{
+ ZeroThreshold: 0.01,
+ ZeroCount: 11,
+ Count: 30,
+ Sum: 2.345,
+ PositiveSpans: []Span{{-2, 2}, {2, 3}},
+ PositiveBuckets: []float64{1, 0, 3, 4, 7},
+ NegativeSpans: []Span{{3, 2}, {3, 2}},
+ NegativeBuckets: []float64{3, 1, 5, 6},
+ },
+ expectedSum: &FloatHistogram{
+ ZeroThreshold: 1,
+ ZeroCount: 29,
+ Count: 51,
+ Sum: 3.579,
+ PositiveSpans: []Span{{1, 2}, {0, 3}},
+ PositiveBuckets: []float64{2.02, 6.03, 10.06, 9.02, 5.05},
+ NegativeSpans: []Span{{3, 3}, {1, 3}},
+ NegativeBuckets: []float64{3, 2.01, 1.01, 4.04, 9.04, 6},
+ },
+ expErrMsg: "",
+ expCounterResetCollision: false,
+ expNHCBBoundsReconciled: false,
+ },
+ {
+ name: "smaller zero bucket in first histogram",
+ in1: &FloatHistogram{
+ ZeroThreshold: 0.01,
+ ZeroCount: 11,
+ Count: 40,
+ Sum: 2.345,
+ PositiveSpans: []Span{{-2, 2}, {2, 3}},
+ PositiveBuckets: []float64{1, 2, 3, 4, 7},
+ NegativeSpans: []Span{{3, 2}, {3, 2}},
+ NegativeBuckets: []float64{3, 1, 5, 6},
+ },
+ comp: &FloatHistogram{
+ ZeroThreshold: 0.01,
+ ZeroCount: 0,
+ PositiveSpans: []Span{{-2, 2}, {2, 3}},
+ PositiveBuckets: []float64{0.02, 0.03, 0.06, 0.07, 0.05},
+ NegativeSpans: []Span{{3, 2}, {3, 2}},
+ NegativeBuckets: []float64{0.01, 0.01, 0.04, 0.04},
+ },
+ in2: &FloatHistogram{
+ ZeroThreshold: 1,
+ ZeroCount: 17,
+ Count: 11,
+ Sum: 1.234,
+ PositiveSpans: []Span{{1, 2}, {0, 3}},
+ PositiveBuckets: []float64{2, 3, 6, 2, 5},
+ NegativeSpans: []Span{{4, 2}, {1, 2}},
+ NegativeBuckets: []float64{1, 1, 4, 4},
+ },
+ expectedSum: &FloatHistogram{
+ ZeroThreshold: 1,
+ ZeroCount: 31.05,
+ Count: 51,
+ Sum: 3.579,
+ PositiveSpans: []Span{{1, 5}},
+ PositiveBuckets: []float64{2, 6.06, 10.07, 9.05, 5},
+ NegativeSpans: []Span{{3, 3}, {1, 3}},
+ NegativeBuckets: []float64{3.01, 2.01, 1, 4, 9.04, 6.04},
+ },
+ expErrMsg: "",
+ expCounterResetCollision: false,
+ expNHCBBoundsReconciled: false,
+ },
+ {
+ name: "first histogram contains zero buckets and Compact is called",
+ in1: &FloatHistogram{
+ ZeroThreshold: 0.01,
+ ZeroCount: 11,
+ Count: 30,
+ Sum: 2.345,
+ PositiveSpans: []Span{{-2, 2}, {1, 1}, {1, 3}},
+ PositiveBuckets: []float64{1, 3, 3, 0, 7, -6},
+ },
+ comp: &FloatHistogram{
+ ZeroThreshold: 0.01,
+ PositiveSpans: []Span{{-2, 2}, {1, 1}, {1, 3}},
+ PositiveBuckets: []float64{7, 2, 0.03, 0, 0.05, 0.06},
+ },
+ in2: &FloatHistogram{
+ ZeroThreshold: 1,
+ ZeroCount: 17,
+ Count: 21,
+ Sum: 1.234,
+ PositiveSpans: []Span{{1, 2}, {1, 2}},
+ PositiveBuckets: []float64{2, 3, 2, 5},
+ },
+ expectedSum: &FloatHistogram{
+ ZeroThreshold: 1,
+ ZeroCount: 41,
+ Count: 51,
+ Sum: 3.579,
+ PositiveSpans: []Span{{1, 2}, {1, 2}},
+ PositiveBuckets: []float64{5.03, 3, 9.05, -0.94},
+ },
+ expErrMsg: "",
+ expCounterResetCollision: false,
+ expNHCBBoundsReconciled: false,
+ },
+ {
+ name: "reduce resolution",
+ in1: &FloatHistogram{
+ Schema: 2,
+ ZeroThreshold: 0.01,
+ ZeroCount: 11,
+ Count: 30,
+ Sum: 2.345,
+ PositiveSpans: []Span{{-2, 2}, {1, 1}, {1, 3}},
+ PositiveBuckets: []float64{1, 3, 1e100, 0, 7, -6},
+ },
+ comp: &FloatHistogram{
+ Schema: 2,
+ ZeroThreshold: 0.01,
+ ZeroCount: 1,
+ PositiveSpans: []Span{{-2, 2}, {1, 1}, {1, 3}},
+ PositiveBuckets: []float64{7, 2, 0.03, 0, 0.05, 0.06},
+ },
+ in2: &FloatHistogram{
+ Schema: 1,
+ ZeroThreshold: 1,
+ ZeroCount: 17,
+ Count: 21,
+ Sum: 1.234,
+ PositiveSpans: []Span{{1, 2}, {1, 2}},
+ PositiveBuckets: []float64{-1e100, 3, 2, 5},
+ },
+ expectedSum: &FloatHistogram{
+ Schema: 1,
+ ZeroThreshold: 1,
+ ZeroCount: 42,
+ Count: 51,
+ Sum: 3.579,
+ PositiveSpans: []Span{{1, 5}},
+ PositiveBuckets: []float64{0.03, 10.05, -5.94, 2, 5},
+ },
+ expErrMsg: "",
+ expCounterResetCollision: false,
+ expNHCBBoundsReconciled: false,
+ },
+ {
+ name: "reduce resolution of 'other' histogram",
+ in1: &FloatHistogram{
+ Schema: 0,
+ ZeroThreshold: 1,
+ ZeroCount: 17,
+ Count: 21,
+ Sum: 1.234,
+ PositiveSpans: []Span{{1, 2}, {1, 2}},
+ PositiveBuckets: []float64{2, 3, 2, 5},
+ },
+ comp: &FloatHistogram{
+ Schema: 0,
+ ZeroThreshold: 1,
+ ZeroCount: 1,
+ PositiveSpans: []Span{{1, 2}, {1, 2}},
+ PositiveBuckets: []float64{17, 2, 0.03, 0},
+ },
+ in2: &FloatHistogram{
+ Schema: 2,
+ ZeroThreshold: 0.01,
+ ZeroCount: 11,
+ Count: 30,
+ Sum: 2.345,
+ PositiveSpans: []Span{{-2, 3}, {1, 1}, {1, 3}},
+ PositiveBuckets: []float64{1e100, 4.1, -1e100, 2.1, 0, 7, -6},
+ },
+ expectedSum: &FloatHistogram{
+ Schema: 0,
+ ZeroThreshold: 1,
+ ZeroCount: 33.1,
+ Count: 51,
+ Sum: 3.579,
+ PositiveSpans: []Span{{1, 2}, {1, 2}},
+ PositiveBuckets: []float64{21.1, 6, 2.03, 5},
+ },
+ expErrMsg: "",
+ expCounterResetCollision: false,
+ expNHCBBoundsReconciled: false,
+ },
+ {
+ name: "warn on counter reset hint collision",
+ in1: &FloatHistogram{
+ Schema: CustomBucketsSchema,
+ CounterResetHint: CounterReset,
+ },
+ in2: &FloatHistogram{
+ Schema: CustomBucketsSchema,
+ CounterResetHint: NotCounterReset,
+ },
+ expErrMsg: "",
+ expCounterResetCollision: true,
+ expNHCBBoundsReconciled: false,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ testHistogramKahanAdd(t, c.in1, c.comp, c.in2, c.expectedSum, c.expErrMsg, c.expCounterResetCollision, c.expNHCBBoundsReconciled)
})
}
}
@@ -2557,6 +2794,68 @@ func testHistogramAdd(t *testing.T, a, b, expected *FloatHistogram, expErrMsg st
}
}
+func testHistogramKahanAdd(
+ t *testing.T, a, c, b, expectedSum *FloatHistogram, expErrMsg string, expCounterResetCollision, expNHCBBoundsReconciled bool,
+) {
+ var (
+ aCopy = a.Copy()
+ bCopy = b.Copy()
+ cCopy *FloatHistogram
+ expectedSumCopy *FloatHistogram
+ )
+
+ if c != nil {
+ cCopy = c.Copy()
+ }
+
+ if expectedSum != nil {
+ expectedSumCopy = expectedSum.Copy()
+ }
+
+ comp, counterResetCollision, nhcbBoundsReconciled, err := aCopy.KahanAdd(bCopy, cCopy)
+ if expErrMsg != "" {
+ require.EqualError(t, err, expErrMsg)
+ } else {
+ require.NoError(t, err)
+ }
+
+ var res *FloatHistogram
+ if comp != nil {
+ // Check that aCopy and its compensation histogram layouts match after addition.
+ require.Equal(t, aCopy.Schema, comp.Schema)
+ require.Equal(t, aCopy.ZeroThreshold, comp.ZeroThreshold)
+ require.Equal(t, aCopy.PositiveSpans, comp.PositiveSpans)
+ require.Equal(t, aCopy.NegativeSpans, comp.NegativeSpans)
+ require.Len(t, aCopy.CustomValues, len(comp.CustomValues))
+ require.Len(t, aCopy.PositiveBuckets, len(comp.PositiveBuckets))
+ require.Len(t, aCopy.NegativeBuckets, len(comp.NegativeBuckets))
+
+ res, _, _, err = aCopy.Add(comp)
+ if expErrMsg != "" {
+ require.EqualError(t, err, expErrMsg)
+ } else {
+ require.NoError(t, err)
+ }
+ }
+
+ // Check that the warnings are correct.
+ require.Equal(t, expCounterResetCollision, counterResetCollision)
+ require.Equal(t, expNHCBBoundsReconciled, nhcbBoundsReconciled)
+
+ if expectedSum != nil {
+ res.Compact(0)
+ expectedSumCopy.Compact(0)
+
+ require.Equal(t, expectedSumCopy, res)
+
+ // Has it also happened in-place?
+ require.Equal(t, expectedSumCopy, aCopy)
+
+ // Check that the argument was not mutated.
+ require.Equal(t, b, bCopy)
+ }
+}
+
func TestFloatHistogramSub(t *testing.T) {
// This has fewer test cases than TestFloatHistogramAdd because Add and
// Sub share most of the trickier code.
diff --git a/model/histogram/generic.go b/model/histogram/generic.go
index 61fc5067f2..9ec9e9cd4b 100644
--- a/model/histogram/generic.go
+++ b/model/histogram/generic.go
@@ -230,14 +230,29 @@ func (b *baseBucketIterator[BC, IBC]) strippedAt() strippedBucket[BC] {
// compactBuckets is a generic function used by both Histogram.Compact and
// FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are
// deltas. Set it to false if the buckets contain absolute counts.
-func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) {
+// For float histograms, deltaBuckets is always false.
+// primaryBuckets hold the main histogram values, while compensationBuckets (if provided) store
+// Kahan compensation values. compensationBuckets can only be provided for float histograms
+// and are processed in parallel with primaryBuckets to maintain synchronization.
+func compactBuckets[IBC InternalBucketCount](
+ primaryBuckets []IBC, compensationBuckets []float64,
+ spans []Span, maxEmptyBuckets int, deltaBuckets bool,
+) (updatedPrimaryBuckets []IBC, updatedCompensationBuckets []float64, updatedSpans []Span) {
+ if deltaBuckets && compensationBuckets != nil {
+ panic("histogram type mismatch: deltaBuckets cannot be true when compensationBuckets is provided")
+ } else if compensationBuckets != nil && len(primaryBuckets) != len(compensationBuckets) {
+ panic(fmt.Errorf(
+ "primary buckets layout (%v) mismatch against associated compensation buckets layout (%v)",
+ primaryBuckets, compensationBuckets),
+ )
+ }
// Fast path: If there are no empty buckets AND no offset in any span is
// <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return
// immediately. We check that first because it's cheap and presumably
// common.
nothingToDo := true
var currentBucketAbsolute IBC
- for _, bucket := range buckets {
+ for _, bucket := range primaryBuckets {
if deltaBuckets {
currentBucketAbsolute += bucket
} else {
@@ -256,7 +271,7 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
}
}
if nothingToDo {
- return buckets, spans
+ return primaryBuckets, compensationBuckets, spans
}
}
@@ -268,12 +283,19 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
emptyBucketsHere := func() int {
i := 0
abs := currentBucketAbsolute
- for uint32(i)+posInSpan < spans[iSpan].Length && abs == 0 {
+ comp := float64(0)
+ if compensationBuckets != nil {
+ comp = compensationBuckets[iBucket]
+ }
+ for uint32(i)+posInSpan < spans[iSpan].Length && abs == 0 && comp == 0 {
i++
- if i+iBucket >= len(buckets) {
+ if i+iBucket >= len(primaryBuckets) {
break
}
- abs = buckets[i+iBucket]
+ abs = primaryBuckets[i+iBucket]
+ if compensationBuckets != nil {
+ comp = compensationBuckets[i+iBucket]
+ }
}
return i
}
@@ -313,11 +335,11 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
// Cut out empty buckets from start and end of spans, no matter
// what. Also cut out empty buckets from the middle of a span but only
// if there are more than maxEmptyBuckets consecutive empty buckets.
- for iBucket < len(buckets) {
+ for iBucket < len(primaryBuckets) {
if deltaBuckets {
- currentBucketAbsolute += buckets[iBucket]
+ currentBucketAbsolute += primaryBuckets[iBucket]
} else {
- currentBucketAbsolute = buckets[iBucket]
+ currentBucketAbsolute = primaryBuckets[iBucket]
}
if nEmpty := emptyBucketsHere(); nEmpty > 0 {
if posInSpan > 0 &&
@@ -334,11 +356,14 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
continue
}
// In all other cases, we cut out the empty buckets.
- if deltaBuckets && iBucket+nEmpty < len(buckets) {
- currentBucketAbsolute = -buckets[iBucket]
- buckets[iBucket+nEmpty] += buckets[iBucket]
+ if deltaBuckets && iBucket+nEmpty < len(primaryBuckets) {
+ currentBucketAbsolute = -primaryBuckets[iBucket]
+ primaryBuckets[iBucket+nEmpty] += primaryBuckets[iBucket]
+ }
+ primaryBuckets = append(primaryBuckets[:iBucket], primaryBuckets[iBucket+nEmpty:]...)
+ if compensationBuckets != nil {
+ compensationBuckets = append(compensationBuckets[:iBucket], compensationBuckets[iBucket+nEmpty:]...)
}
- buckets = append(buckets[:iBucket], buckets[iBucket+nEmpty:]...)
if posInSpan == 0 {
// Start of span.
if nEmpty == int(spans[iSpan].Length) {
@@ -388,8 +413,8 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
iSpan++
}
}
- if maxEmptyBuckets == 0 || len(buckets) == 0 {
- return buckets, spans
+ if maxEmptyBuckets == 0 || len(primaryBuckets) == 0 {
+ return primaryBuckets, compensationBuckets, spans
}
// Finally, check if any offsets between spans are small enough to merge
@@ -397,7 +422,7 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
iBucket = int(spans[0].Length)
if deltaBuckets {
currentBucketAbsolute = 0
- for _, bucket := range buckets[:iBucket] {
+ for _, bucket := range primaryBuckets[:iBucket] {
currentBucketAbsolute += bucket
}
}
@@ -406,7 +431,7 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
if int(spans[iSpan].Offset) > maxEmptyBuckets {
l := int(spans[iSpan].Length)
if deltaBuckets {
- for _, bucket := range buckets[iBucket : iBucket+l] {
+ for _, bucket := range primaryBuckets[iBucket : iBucket+l] {
currentBucketAbsolute += bucket
}
}
@@ -418,22 +443,28 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
offset := int(spans[iSpan].Offset)
spans[iSpan-1].Length += uint32(offset) + spans[iSpan].Length
spans = append(spans[:iSpan], spans[iSpan+1:]...)
- newBuckets := make([]IBC, len(buckets)+offset)
- copy(newBuckets, buckets[:iBucket])
- copy(newBuckets[iBucket+offset:], buckets[iBucket:])
+ newPrimaryBuckets := make([]IBC, len(primaryBuckets)+offset)
+ copy(newPrimaryBuckets, primaryBuckets[:iBucket])
+ copy(newPrimaryBuckets[iBucket+offset:], primaryBuckets[iBucket:])
if deltaBuckets {
- newBuckets[iBucket] = -currentBucketAbsolute
- newBuckets[iBucket+offset] += currentBucketAbsolute
+ newPrimaryBuckets[iBucket] = -currentBucketAbsolute
+ newPrimaryBuckets[iBucket+offset] += currentBucketAbsolute
+ }
+ primaryBuckets = newPrimaryBuckets
+ if compensationBuckets != nil {
+ newCompensationBuckets := make([]float64, len(compensationBuckets)+offset)
+ copy(newCompensationBuckets, compensationBuckets[:iBucket])
+ copy(newCompensationBuckets[iBucket+offset:], compensationBuckets[iBucket:])
+ compensationBuckets = newCompensationBuckets
}
iBucket += offset
- buckets = newBuckets
- currentBucketAbsolute = buckets[iBucket]
+ currentBucketAbsolute = primaryBuckets[iBucket]
// Note that with many merges, it would be more efficient to
// first record all the chunks of empty buckets to insert and
// then do it in one go through all the buckets.
}
- return buckets, spans
+ return primaryBuckets, compensationBuckets, spans
}
func checkHistogramSpans(spans []Span, numBuckets int) error {
diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go
index 5be60174fc..6ed02aed57 100644
--- a/model/histogram/histogram.go
+++ b/model/histogram/histogram.go
@@ -349,11 +349,11 @@ func allEmptySpans(s []Span) bool {
// Compact works like FloatHistogram.Compact. See there for detailed
// explanations.
func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram {
- h.PositiveBuckets, h.PositiveSpans = compactBuckets(
- h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, true,
+ h.PositiveBuckets, _, h.PositiveSpans = compactBuckets(
+ h.PositiveBuckets, nil, h.PositiveSpans, maxEmptyBuckets, true,
)
- h.NegativeBuckets, h.NegativeSpans = compactBuckets(
- h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, true,
+ h.NegativeBuckets, _, h.NegativeSpans = compactBuckets(
+ h.NegativeBuckets, nil, h.NegativeSpans, maxEmptyBuckets, true,
)
return h
}
diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go
index 6087253d11..4045cc65db 100644
--- a/model/relabel/relabel.go
+++ b/model/relabel/relabel.go
@@ -269,22 +269,8 @@ func (re Regexp) String() string {
return str[5 : len(str)-2]
}
-// Process returns a relabeled version of the given label set. The relabel configurations
-// are applied in order of input.
-// There are circumstances where Process will modify the input label.
-// If you want to avoid issues with the input label set being modified, at the cost of
-// higher memory usage, you can use lbls.Copy().
-// If a label set is dropped, EmptyLabels and false is returned.
-func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) {
- lb := labels.NewBuilder(lbls)
- if !ProcessBuilder(lb, cfgs...) {
- return labels.EmptyLabels(), false
- }
- return lb.Labels(), true
-}
-
-// ProcessBuilder is like Process, but the caller passes a labels.Builder
-// containing the initial set of labels, which is mutated by the rules.
+// ProcessBuilder applies relabeling configurations (rules) to the labels in lb.
+// The rules are applied in order of input. Returns false if the rule says to drop.
func ProcessBuilder(lb *labels.Builder, cfgs ...*Config) (keep bool) {
for _, cfg := range cfgs {
keep = relabel(cfg, lb)
diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go
index a3eb925995..8c2ba55ea7 100644
--- a/model/relabel/relabel_test.go
+++ b/model/relabel/relabel_test.go
@@ -751,10 +751,11 @@ func TestRelabel(t *testing.T) {
require.NoError(t, cfg.Validate(model.UTF8Validation))
}
- res, keep := Process(test.input, test.relabel...)
+ lb := labels.NewBuilder(test.input)
+ keep := ProcessBuilder(lb, test.relabel...)
require.Equal(t, !test.drop, keep)
if keep {
- testutil.RequireEqual(t, test.output, res)
+ testutil.RequireEqual(t, test.output, lb.Labels())
}
}
}
@@ -1064,9 +1065,11 @@ func BenchmarkRelabel(b *testing.B) {
require.NoError(b, err)
}
for _, tt := range tests {
+ lb := labels.NewBuilder(labels.EmptyLabels())
b.Run(tt.name, func(b *testing.B) {
for b.Loop() {
- _, _ = Process(tt.lbls, tt.cfgs...)
+ lb.Reset(tt.lbls)
+ _ = ProcessBuilder(lb, tt.cfgs...)
}
})
}
diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go
index 70541eb0d3..d284a14c40 100644
--- a/model/rulefmt/rulefmt.go
+++ b/model/rulefmt/rulefmt.go
@@ -24,7 +24,7 @@ import (
"time"
"github.com/prometheus/common/model"
- "gopkg.in/yaml.v3"
+ "go.yaml.in/yaml/v3"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql"
@@ -97,7 +97,7 @@ type ruleGroups struct {
}
// Validate validates all rules in the rule groups.
-func (g *RuleGroups) Validate(node ruleGroups, nameValidationScheme model.ValidationScheme) (errs []error) {
+func (g *RuleGroups) Validate(node ruleGroups, nameValidationScheme model.ValidationScheme, p parser.Parser) (errs []error) {
if err := namevalidationutil.CheckNameValidationScheme(nameValidationScheme); err != nil {
errs = append(errs, err)
return errs
@@ -134,7 +134,7 @@ func (g *RuleGroups) Validate(node ruleGroups, nameValidationScheme model.Valida
set[g.Name] = struct{}{}
for i, r := range g.Rules {
- for _, node := range r.Validate(node.Groups[j].Rules[i], nameValidationScheme) {
+ for _, node := range r.Validate(node.Groups[j].Rules[i], nameValidationScheme, p) {
var ruleName string
if r.Alert != "" {
ruleName = r.Alert
@@ -198,7 +198,7 @@ type RuleNode struct {
}
// Validate the rule and return a list of encountered errors.
-func (r *Rule) Validate(node RuleNode, nameValidationScheme model.ValidationScheme) (nodes []WrappedError) {
+func (r *Rule) Validate(node RuleNode, nameValidationScheme model.ValidationScheme, p parser.Parser) (nodes []WrappedError) {
if r.Record != "" && r.Alert != "" {
nodes = append(nodes, WrappedError{
err: errors.New("only one of 'record' and 'alert' must be set"),
@@ -219,7 +219,7 @@ func (r *Rule) Validate(node RuleNode, nameValidationScheme model.ValidationSche
err: errors.New("field 'expr' must be set in rule"),
node: &node.Expr,
})
- } else if _, err := parser.ParseExpr(r.Expr); err != nil {
+ } else if _, err := p.ParseExpr(r.Expr); err != nil {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("could not parse expression: %w", err),
node: &node.Expr,
@@ -339,7 +339,7 @@ func testTemplateParsing(rl *Rule) (errs []error) {
}
// Parse parses and validates a set of rules.
-func Parse(content []byte, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*RuleGroups, []error) {
+func Parse(content []byte, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme, p parser.Parser) (*RuleGroups, []error) {
var (
groups RuleGroups
node ruleGroups
@@ -364,16 +364,16 @@ func Parse(content []byte, ignoreUnknownFields bool, nameValidationScheme model.
return nil, errs
}
- return &groups, groups.Validate(node, nameValidationScheme)
+ return &groups, groups.Validate(node, nameValidationScheme, p)
}
// ParseFile reads and parses rules from a file.
-func ParseFile(file string, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*RuleGroups, []error) {
+func ParseFile(file string, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme, p parser.Parser) (*RuleGroups, []error) {
b, err := os.ReadFile(file)
if err != nil {
return nil, []error{fmt.Errorf("%s: %w", file, err)}
}
- rgs, errs := Parse(b, ignoreUnknownFields, nameValidationScheme)
+ rgs, errs := Parse(b, ignoreUnknownFields, nameValidationScheme, p)
for i := range errs {
errs[i] = fmt.Errorf("%s: %w", file, errs[i])
}
diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go
index ec16052bc0..071711319c 100644
--- a/model/rulefmt/rulefmt_test.go
+++ b/model/rulefmt/rulefmt_test.go
@@ -21,18 +21,22 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
- "gopkg.in/yaml.v3"
+ "go.yaml.in/yaml/v3"
+
+ "github.com/prometheus/prometheus/promql/parser"
)
+var testParser = parser.NewParser(parser.Options{})
+
func TestParseFileSuccess(t *testing.T) {
- _, errs := ParseFile("testdata/test.yaml", false, model.UTF8Validation)
+ _, errs := ParseFile("testdata/test.yaml", false, model.UTF8Validation, testParser)
require.Empty(t, errs, "unexpected errors parsing file")
- _, errs = ParseFile("testdata/utf-8_lname.good.yaml", false, model.UTF8Validation)
+ _, errs = ParseFile("testdata/utf-8_lname.good.yaml", false, model.UTF8Validation, testParser)
require.Empty(t, errs, "unexpected errors parsing file")
- _, errs = ParseFile("testdata/utf-8_annotation.good.yaml", false, model.UTF8Validation)
+ _, errs = ParseFile("testdata/utf-8_annotation.good.yaml", false, model.UTF8Validation, testParser)
require.Empty(t, errs, "unexpected errors parsing file")
- _, errs = ParseFile("testdata/legacy_validation_annotation.good.yaml", false, model.LegacyValidation)
+ _, errs = ParseFile("testdata/legacy_validation_annotation.good.yaml", false, model.LegacyValidation, testParser)
require.Empty(t, errs, "unexpected errors parsing file")
}
@@ -41,7 +45,7 @@ func TestParseFileSuccessWithAliases(t *testing.T) {
/
sum without(instance) (rate(requests_total[5m]))
`
- rgs, errs := ParseFile("testdata/test_aliases.yaml", false, model.UTF8Validation)
+ rgs, errs := ParseFile("testdata/test_aliases.yaml", false, model.UTF8Validation, testParser)
require.Empty(t, errs, "unexpected errors parsing file")
for _, rg := range rgs.Groups {
require.Equal(t, "HighAlert", rg.Rules[0].Alert)
@@ -119,7 +123,7 @@ func TestParseFileFailure(t *testing.T) {
if c.nameValidationScheme == model.UnsetValidation {
c.nameValidationScheme = model.UTF8Validation
}
- _, errs := ParseFile(filepath.Join("testdata", c.filename), false, c.nameValidationScheme)
+ _, errs := ParseFile(filepath.Join("testdata", c.filename), false, c.nameValidationScheme, testParser)
require.NotEmpty(t, errs, "Expected error parsing %s but got none", c.filename)
require.ErrorContainsf(t, errs[0], c.errMsg, "Expected error for %s.", c.filename)
})
@@ -215,7 +219,7 @@ groups:
}
for _, tst := range tests {
- rgs, errs := Parse([]byte(tst.ruleString), false, model.UTF8Validation)
+ rgs, errs := Parse([]byte(tst.ruleString), false, model.UTF8Validation, testParser)
require.NotNil(t, rgs, "Rule parsing, rule=\n"+tst.ruleString)
passed := (tst.shouldPass && len(errs) == 0) || (!tst.shouldPass && len(errs) > 0)
require.True(t, passed, "Rule validation failed, rule=\n"+tst.ruleString)
@@ -242,7 +246,7 @@ groups:
annotations:
summary: "Instance {{ $labels.instance }} up"
`
- _, errs := Parse([]byte(group), false, model.UTF8Validation)
+ _, errs := Parse([]byte(group), false, model.UTF8Validation, testParser)
require.Len(t, errs, 2, "Expected two errors")
var err00 *Error
require.ErrorAs(t, errs[0], &err00)
diff --git a/notifier/alertmanager_test.go b/notifier/alertmanager_test.go
index 668271d267..ca2bd2f771 100644
--- a/notifier/alertmanager_test.go
+++ b/notifier/alertmanager_test.go
@@ -14,11 +14,15 @@
package notifier
import (
+ "log/slog"
"testing"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/discovery/targetgroup"
)
func TestPostPath(t *testing.T) {
@@ -60,3 +64,89 @@ func TestLabelSetNotReused(t *testing.T) {
// Target modified during alertmanager extraction
require.Equal(t, tg, makeInputTargetGroup())
}
+
+// TestAlertmanagerSetSync verifies that sync properly manages sendloop lifecycle:
+// - Starts sendloops for new alertmanagers.
+// - Stops sendloops for removed alertmanagers.
+// - Does NOT stop sendloops that are still in use.
+// - Does NOT stop sendloops that were just created.
+func TestAlertmanagerSetSync(t *testing.T) {
+ reg := prometheus.NewRegistry()
+ alertmanagersDiscoveredFunc := func() float64 { return 0 }
+ metrics := newAlertMetrics(reg, alertmanagersDiscoveredFunc)
+ logger := slog.New(slog.DiscardHandler)
+ opts := &Options{QueueCapacity: 10, MaxBatchSize: DefaultMaxBatchSize}
+
+ cfg := config.DefaultAlertmanagerConfig
+
+ // Create alertmanagerSet
+ ams, err := newAlertmanagerSet(&cfg, opts, logger, metrics)
+ require.NoError(t, err)
+
+ defer func() {
+ ams.sync([]*targetgroup.Group{})
+ require.Empty(t, ams.sendLoops, "All sendloops should be cleaned up")
+ }()
+
+ // First sync: Add AM1 and AM2
+ tgs1 := []*targetgroup.Group{
+ {
+ Targets: []model.LabelSet{
+ {model.AddressLabel: "am1.example.com:9093"},
+ {model.AddressLabel: "am2.example.com:9093"},
+ },
+ },
+ }
+
+ ams.sync(tgs1)
+
+ require.Len(t, ams.sendLoops, 2, "AM1 and AM2 sendloops should be created")
+ require.Contains(t, ams.sendLoops, "http://am1.example.com:9093/api/v2/alerts", "AM1 sendloop should be created")
+ require.Contains(t, ams.sendLoops, "http://am2.example.com:9093/api/v2/alerts", "AM2 sendloop should be created")
+
+ am1Loop := ams.sendLoops["http://am1.example.com:9093/api/v2/alerts"]
+ am2Loop := ams.sendLoops["http://am2.example.com:9093/api/v2/alerts"]
+ require.NotNil(t, am1Loop)
+ require.NotNil(t, am2Loop)
+
+ // Second sync: Keep AM2, remove AM1, add AM3
+ tgs2 := []*targetgroup.Group{
+ {
+ Targets: []model.LabelSet{
+ {model.AddressLabel: "am2.example.com:9093"},
+ {model.AddressLabel: "am3.example.com:9093"},
+ },
+ },
+ }
+
+ ams.sync(tgs2)
+
+ require.Len(t, ams.sendLoops, 2)
+ require.NotContains(t, ams.sendLoops, "http://am1.example.com:9093/api/v2/alerts", "AM1 sendloop should be removed")
+ require.Contains(t, ams.sendLoops, "http://am2.example.com:9093/api/v2/alerts", "AM2 sendloop should be kept")
+ require.Contains(t, ams.sendLoops, "http://am3.example.com:9093/api/v2/alerts", "AM3 sendloop should be created")
+
+ am2LoopAfter := ams.sendLoops["http://am2.example.com:9093/api/v2/alerts"]
+ require.Same(t, am2Loop, am2LoopAfter, "AM2 sendloop should not be recreated")
+
+ am3Loop := ams.sendLoops["http://am3.example.com:9093/api/v2/alerts"]
+ require.NotNil(t, am3Loop, "AM3 sendloop should be created")
+
+ // Third sync: Keep only AM3, remove AM2
+ tgs3 := []*targetgroup.Group{
+ {
+ Targets: []model.LabelSet{
+ {model.AddressLabel: "am3.example.com:9093"},
+ },
+ },
+ }
+
+ ams.sync(tgs3)
+
+ require.Len(t, ams.sendLoops, 1)
+ require.NotContains(t, ams.sendLoops, "http://am2.example.com:9093/api/v2/alerts", "AM2 sendloop should be removed")
+ require.Contains(t, ams.sendLoops, "http://am3.example.com:9093/api/v2/alerts", "AM3 sendloop should be kept")
+
+ am3LoopAfter := ams.sendLoops["http://am3.example.com:9093/api/v2/alerts"]
+ require.Same(t, am3Loop, am3LoopAfter, "AM3 sendloop should not be recreated")
+}
diff --git a/notifier/alertmanagerset.go b/notifier/alertmanagerset.go
index eca798e6f5..81565b5cf8 100644
--- a/notifier/alertmanagerset.go
+++ b/notifier/alertmanagerset.go
@@ -16,6 +16,7 @@ package notifier
import (
"crypto/md5"
"encoding/hex"
+ "fmt"
"log/slog"
"net/http"
"sync"
@@ -26,6 +27,7 @@ import (
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
+ "github.com/prometheus/prometheus/model/labels"
)
// alertmanagerSet contains a set of Alertmanagers discovered via a group of service
@@ -33,16 +35,19 @@ import (
type alertmanagerSet struct {
cfg *config.AlertmanagerConfig
client *http.Client
+ opts *Options
metrics *alertMetrics
mtx sync.RWMutex
ams []alertmanager
droppedAms []alertmanager
- logger *slog.Logger
+ sendLoops map[string]*sendLoop
+
+ logger *slog.Logger
}
-func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger *slog.Logger, metrics *alertMetrics) (*alertmanagerSet, error) {
+func newAlertmanagerSet(cfg *config.AlertmanagerConfig, opts *Options, logger *slog.Logger, metrics *alertMetrics) (*alertmanagerSet, error) {
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager")
if err != nil {
return nil, err
@@ -59,10 +64,12 @@ func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger *slog.Logger, met
client.Transport = t
s := &alertmanagerSet{
- client: client,
- cfg: cfg,
- logger: logger,
- metrics: metrics,
+ client: client,
+ cfg: cfg,
+ opts: opts,
+ sendLoops: make(map[string]*sendLoop),
+ logger: logger,
+ metrics: metrics,
}
return s, nil
}
@@ -86,36 +93,32 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) {
s.mtx.Lock()
defer s.mtx.Unlock()
previousAms := s.ams
- // Set new Alertmanagers and deduplicate them along their unique URL.
s.ams = []alertmanager{}
s.droppedAms = []alertmanager{}
s.droppedAms = append(s.droppedAms, allDroppedAms...)
- seen := map[string]struct{}{}
+ // Deduplicate Alertmanagers and add sendloops for new Alertmanagers.
+ seen := map[string]struct{}{}
for _, am := range allAms {
us := am.url().String()
if _, ok := seen[us]; ok {
continue
}
- // This will initialize the Counters for the AM to 0.
- s.metrics.sent.WithLabelValues(us)
- s.metrics.errors.WithLabelValues(us)
-
seen[us] = struct{}{}
s.ams = append(s.ams, am)
}
- // Now remove counters for any removed Alertmanagers.
+ s.addSendLoops(s.ams)
+
+ // Populate a list of Alertmanagers to clean up,
+ // avoid cleaning up what we just added.
for _, am := range previousAms {
us := am.url().String()
if _, ok := seen[us]; ok {
continue
}
- s.metrics.latencySummary.DeleteLabelValues(us)
- s.metrics.latencyHistogram.DeleteLabelValues(us)
- s.metrics.sent.DeleteLabelValues(us)
- s.metrics.errors.DeleteLabelValues(us)
seen[us] = struct{}{}
+ s.cleanSendLoops(am)
}
}
@@ -127,3 +130,62 @@ func (s *alertmanagerSet) configHash() (string, error) {
hash := md5.Sum(b)
return hex.EncodeToString(hash[:]), nil
}
+
+func (s *alertmanagerSet) send(alerts ...*Alert) {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
+ if len(s.cfg.AlertRelabelConfigs) > 0 {
+ alerts = relabelAlerts(s.cfg.AlertRelabelConfigs, labels.Labels{}, alerts)
+ if len(alerts) == 0 {
+ return
+ }
+ }
+
+ for _, sendLoop := range s.sendLoops {
+ sendLoop.add(alerts...)
+ }
+}
+
+// addSendLoops creates and starts a send loop for newly discovered alertmanager.
+// This function expects the caller to acquire needed locks.
+func (s *alertmanagerSet) addSendLoops(ams []alertmanager) {
+ for _, am := range ams {
+ us := am.url().String()
+ // Only add if sendloop doesn't already exist
+ if loop, exists := s.sendLoops[us]; exists {
+ loop.logger.Debug("Alertmanager already has send loop running, skipping")
+ continue
+ }
+ sendLoop := newSendLoop(us, s.client, s.cfg, s.opts, s.logger.With("alertmanager", us), s.metrics)
+ go sendLoop.loop()
+ s.sendLoops[us] = sendLoop
+ }
+}
+
+// cleanSendLoops stops and cleans the send loops for each removed alertmanager.
+// This function expects the caller to acquire needed locks.
+func (s *alertmanagerSet) cleanSendLoops(ams ...alertmanager) {
+ for _, am := range ams {
+ us := am.url().String()
+ if sendLoop, ok := s.sendLoops[us]; ok {
+ sendLoop.stop()
+ delete(s.sendLoops, us)
+ }
+ }
+}
+
+// startSendLoops starts a send loop for newly discovered alertmanager.
+// This function expects the caller to acquire needed locks.
+// This is mainly needed for testing where the loops are added as part of the test setup.
+func (s *alertmanagerSet) startSendLoops(ams []alertmanager) {
+ for _, am := range ams {
+ us := am.url().String()
+
+ if l, ok := s.sendLoops[us]; ok {
+ go l.loop()
+ continue
+ }
+ panic(fmt.Sprintf("send loop not found for %s", us))
+ }
+}
diff --git a/notifier/manager.go b/notifier/manager.go
index a835cccffd..7eeed79b79 100644
--- a/notifier/manager.go
+++ b/notifier/manager.go
@@ -14,16 +14,12 @@
package notifier
import (
- "bytes"
"context"
- "encoding/json"
"fmt"
- "io"
"log/slog"
"net/http"
"net/url"
"sync"
- "time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
@@ -55,13 +51,11 @@ var userAgent = version.PrometheusUserAgent()
// Manager is responsible for dispatching alert notifications to an
// alert manager service.
type Manager struct {
- queue []*Alert
- opts *Options
+ opts *Options
metrics *alertMetrics
- more chan struct{}
- mtx sync.RWMutex
+ mtx sync.RWMutex
stopOnce *sync.Once
stopRequested chan struct{}
@@ -114,23 +108,16 @@ func NewManager(o *Options, nameValidationScheme model.ValidationScheme, logger
}
n := &Manager{
- queue: make([]*Alert, 0, o.QueueCapacity),
- more: make(chan struct{}, 1),
stopRequested: make(chan struct{}),
stopOnce: &sync.Once{},
opts: o,
logger: logger,
}
- queueLenFunc := func() float64 { return float64(n.queueLen()) }
alertmanagersDiscoveredFunc := func() float64 { return float64(len(n.Alertmanagers())) }
- n.metrics = newAlertMetrics(
- o.Registerer,
- o.QueueCapacity,
- queueLenFunc,
- alertmanagersDiscoveredFunc,
- )
+ n.metrics = newAlertMetrics(o.Registerer, alertmanagersDiscoveredFunc)
+ n.metrics.queueCapacity.Set(float64(o.QueueCapacity))
return n
}
@@ -163,7 +150,7 @@ func (n *Manager) ApplyConfig(conf *config.Config) error {
}
for k, cfg := range conf.AlertingConfig.AlertmanagerConfigs.ToMap() {
- ams, err := newAlertmanagerSet(cfg, n.logger, n.metrics)
+ ams, err := newAlertmanagerSet(cfg, n.opts, n.logger, n.metrics)
if err != nil {
return err
}
@@ -176,86 +163,54 @@ func (n *Manager) ApplyConfig(conf *config.Config) error {
if oldAmSet, ok := configToAlertmanagers[hash]; ok {
ams.ams = oldAmSet.ams
ams.droppedAms = oldAmSet.droppedAms
+ // Only transfer sendLoops to the first new config with this hash.
+ // Subsequent configs with the same hash should not share the sendLoops
+ // map reference, as that would cause shared mutable state between
+ // alertmanagerSets (cleanup in one would affect the other).
+ oldAmSet.mtx.Lock()
+ if oldAmSet.sendLoops != nil {
+ ams.mtx.Lock()
+ ams.sendLoops = oldAmSet.sendLoops
+ oldAmSet.sendLoops = nil
+ ams.mtx.Unlock()
+ }
+ oldAmSet.mtx.Unlock()
}
amSets[k] = ams
}
+ // Clean up sendLoops that weren't transferred to new config.
+ // This happens when: (1) key was removed, or (2) key exists but hash changed.
+ // After the transfer loop above, any oldAmSet with non-nil sendLoops
+ // had its sendLoops NOT transferred (since we set it to nil on transfer).
+ for _, oldAmSet := range n.alertmanagers {
+ oldAmSet.mtx.Lock()
+ if oldAmSet.sendLoops != nil {
+ oldAmSet.cleanSendLoops(oldAmSet.ams...)
+ }
+ oldAmSet.mtx.Unlock()
+ }
+
n.alertmanagers = amSets
return nil
}
-func (n *Manager) queueLen() int {
- n.mtx.RLock()
- defer n.mtx.RUnlock()
-
- return len(n.queue)
-}
-
-func (n *Manager) nextBatch() []*Alert {
- n.mtx.Lock()
- defer n.mtx.Unlock()
-
- var alerts []*Alert
-
- if maxBatchSize := n.opts.MaxBatchSize; len(n.queue) > maxBatchSize {
- alerts = append(make([]*Alert, 0, maxBatchSize), n.queue[:maxBatchSize]...)
- n.queue = n.queue[maxBatchSize:]
- } else {
- alerts = append(make([]*Alert, 0, len(n.queue)), n.queue...)
- n.queue = n.queue[:0]
- }
-
- return alerts
-}
-
// Run dispatches notifications continuously, returning once Stop has been called and all
// pending notifications have been drained from the queue (if draining is enabled).
//
// Dispatching of notifications occurs in parallel to processing target updates to avoid one starving the other.
// Refer to https://github.com/prometheus/prometheus/issues/13676 for more details.
func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) {
- wg := sync.WaitGroup{}
- wg.Add(2)
+ n.targetUpdateLoop(tsets)
- go func() {
- defer wg.Done()
- n.targetUpdateLoop(tsets)
- }()
-
- go func() {
- defer wg.Done()
- n.sendLoop()
- n.drainQueue()
- }()
-
- wg.Wait()
- n.logger.Info("Notification manager stopped")
-}
-
-// sendLoop continuously consumes the notifications queue and sends alerts to
-// the configured Alertmanagers.
-func (n *Manager) sendLoop() {
- for {
- // If we've been asked to stop, that takes priority over sending any further notifications.
- select {
- case <-n.stopRequested:
- return
- default:
- select {
- case <-n.stopRequested:
- return
-
- case <-n.more:
- n.sendOneBatch()
-
- // If the queue still has items left, kick off the next iteration.
- if n.queueLen() > 0 {
- n.setMore()
- }
- }
- }
+ n.mtx.Lock()
+ defer n.mtx.Unlock()
+ for _, ams := range n.alertmanagers {
+ ams.mtx.Lock()
+ ams.cleanSendLoops(ams.ams...)
+ ams.mtx.Unlock()
}
}
@@ -280,33 +235,6 @@ func (n *Manager) targetUpdateLoop(tsets <-chan map[string][]*targetgroup.Group)
}
}
-func (n *Manager) sendOneBatch() {
- alerts := n.nextBatch()
-
- if !n.sendAll(alerts...) {
- n.metrics.dropped.Add(float64(len(alerts)))
- }
-}
-
-func (n *Manager) drainQueue() {
- if !n.opts.DrainOnShutdown {
- if n.queueLen() > 0 {
- n.logger.Warn("Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen())
- n.metrics.dropped.Add(float64(n.queueLen()))
- }
-
- return
- }
-
- n.logger.Info("Draining any remaining notifications...")
-
- for n.queueLen() > 0 {
- n.sendOneBatch()
- }
-
- n.logger.Info("Remaining notifications drained")
-}
-
func (n *Manager) reload(tgs map[string][]*targetgroup.Group) {
n.mtx.Lock()
defer n.mtx.Unlock()
@@ -324,44 +252,23 @@ func (n *Manager) reload(tgs map[string][]*targetgroup.Group) {
// Send queues the given notification requests for processing.
// Panics if called on a handler that is not running.
func (n *Manager) Send(alerts ...*Alert) {
- n.mtx.Lock()
- defer n.mtx.Unlock()
+ // If we've been asked to stop, that takes priority over accepting new alerts.
+ select {
+ case <-n.stopRequested:
+ return
+ default:
+ }
+
+ n.mtx.RLock()
+ defer n.mtx.RUnlock()
alerts = relabelAlerts(n.opts.RelabelConfigs, n.opts.ExternalLabels, alerts)
if len(alerts) == 0 {
return
}
- // Queue capacity should be significantly larger than a single alert
- // batch could be.
- if d := len(alerts) - n.opts.QueueCapacity; d > 0 {
- alerts = alerts[d:]
-
- n.logger.Warn("Alert batch larger than queue capacity, dropping alerts", "num_dropped", d)
- n.metrics.dropped.Add(float64(d))
- }
-
- // If the queue is full, remove the oldest alerts in favor
- // of newer ones.
- if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 {
- n.queue = n.queue[d:]
-
- n.logger.Warn("Alert notification queue full, dropping alerts", "num_dropped", d)
- n.metrics.dropped.Add(float64(d))
- }
- n.queue = append(n.queue, alerts...)
-
- // Notify sending goroutine that there are alerts to be processed.
- n.setMore()
-}
-
-// setMore signals that the alert queue has items.
-func (n *Manager) setMore() {
- // If we cannot send on the channel, it means the signal already exists
- // and has not been consumed yet.
- select {
- case n.more <- struct{}{}:
- default:
+ for _, ams := range n.alertmanagers {
+ ams.send(alerts...)
}
}
@@ -403,158 +310,11 @@ func (n *Manager) DroppedAlertmanagers() []*url.URL {
return res
}
-// sendAll sends the alerts to all configured Alertmanagers concurrently.
-// It returns true if the alerts could be sent successfully to at least one Alertmanager.
-func (n *Manager) sendAll(alerts ...*Alert) bool {
- if len(alerts) == 0 {
- return true
- }
-
- begin := time.Now()
-
- // cachedPayload represent 'alerts' marshaled for Alertmanager API v2.
- // Marshaling happens below. Reference here is for caching between
- // for loop iterations.
- var cachedPayload []byte
-
- n.mtx.RLock()
- amSets := n.alertmanagers
- n.mtx.RUnlock()
-
- var (
- wg sync.WaitGroup
- amSetCovered sync.Map
- )
- for k, ams := range amSets {
- var (
- payload []byte
- err error
- amAlerts = alerts
- )
-
- ams.mtx.RLock()
-
- if len(ams.ams) == 0 {
- ams.mtx.RUnlock()
- continue
- }
-
- if len(ams.cfg.AlertRelabelConfigs) > 0 {
- amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts)
- if len(amAlerts) == 0 {
- ams.mtx.RUnlock()
- continue
- }
- // We can't use the cached values from previous iteration.
- cachedPayload = nil
- }
-
- switch ams.cfg.APIVersion {
- case config.AlertmanagerAPIVersionV2:
- {
- if cachedPayload == nil {
- openAPIAlerts := alertsToOpenAPIAlerts(amAlerts)
-
- cachedPayload, err = json.Marshal(openAPIAlerts)
- if err != nil {
- n.logger.Error("Encoding alerts for Alertmanager API v2 failed", "err", err)
- ams.mtx.RUnlock()
- return false
- }
- }
-
- payload = cachedPayload
- }
- default:
- {
- n.logger.Error(
- fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions),
- "err", err,
- )
- ams.mtx.RUnlock()
- return false
- }
- }
-
- if len(ams.cfg.AlertRelabelConfigs) > 0 {
- // We can't use the cached values on the next iteration.
- cachedPayload = nil
- }
-
- // Being here means len(ams.ams) > 0
- amSetCovered.Store(k, false)
- for _, am := range ams.ams {
- wg.Add(1)
-
- ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ams.cfg.Timeout))
- defer cancel()
-
- go func(ctx context.Context, k string, client *http.Client, url string, payload []byte, count int) {
- err := n.sendOne(ctx, client, url, payload)
- if err != nil {
- n.logger.Error("Error sending alerts", "alertmanager", url, "count", count, "err", err)
- n.metrics.errors.WithLabelValues(url).Add(float64(count))
- } else {
- amSetCovered.CompareAndSwap(k, false, true)
- }
-
- durationSeconds := time.Since(begin).Seconds()
- n.metrics.latencySummary.WithLabelValues(url).Observe(durationSeconds)
- n.metrics.latencyHistogram.WithLabelValues(url).Observe(durationSeconds)
- n.metrics.sent.WithLabelValues(url).Add(float64(count))
-
- wg.Done()
- }(ctx, k, ams.client, am.url().String(), payload, len(amAlerts))
- }
-
- ams.mtx.RUnlock()
- }
-
- wg.Wait()
-
- // Return false if there are any sets which were attempted (e.g. not filtered
- // out) but have no successes.
- allAmSetsCovered := true
- amSetCovered.Range(func(_, value any) bool {
- if !value.(bool) {
- allAmSetsCovered = false
- return false
- }
- return true
- })
-
- return allAmSetsCovered
-}
-
-func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []byte) error {
- req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(b))
- if err != nil {
- return err
- }
- req.Header.Set("User-Agent", userAgent)
- req.Header.Set("Content-Type", contentTypeJSON)
- resp, err := n.opts.Do(ctx, c, req)
- if err != nil {
- return err
- }
- defer func() {
- io.Copy(io.Discard, resp.Body)
- resp.Body.Close()
- }()
-
- // Any HTTP status 2xx is OK.
- if resp.StatusCode/100 != 2 {
- return fmt.Errorf("bad response status %s", resp.Status)
- }
-
- return nil
-}
-
// Stop signals the notification manager to shut down and immediately returns.
//
// Run will return once the notification manager has successfully shut down.
//
-// The manager will optionally drain any queued notifications before shutting down.
+// The manager will optionally drain send loops before shutting down.
//
// Stop is safe to call multiple times.
func (n *Manager) Stop() {
diff --git a/notifier/manager_test.go b/notifier/manager_test.go
index 21ab0b28a1..d7108c1628 100644
--- a/notifier/manager_test.go
+++ b/notifier/manager_test.go
@@ -19,14 +19,17 @@ import (
"encoding/json"
"fmt"
"io"
+ "log/slog"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
+ "strings"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
+ prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
@@ -40,27 +43,9 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
+ "github.com/prometheus/prometheus/util/testutil/synctest"
)
-const maxBatchSize = 256
-
-func TestHandlerNextBatch(t *testing.T) {
- h := NewManager(&Options{}, model.UTF8Validation, nil)
-
- for i := range make([]struct{}, 2*maxBatchSize+1) {
- h.queue = append(h.queue, &Alert{
- Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
- })
- }
-
- expected := append([]*Alert{}, h.queue...)
-
- require.NoError(t, alertsEqual(expected[0:maxBatchSize], h.nextBatch()))
- require.NoError(t, alertsEqual(expected[maxBatchSize:2*maxBatchSize], h.nextBatch()))
- require.NoError(t, alertsEqual(expected[2*maxBatchSize:], h.nextBatch()))
- require.Empty(t, h.queue, "Expected queue to be empty but got %d alerts", len(h.queue))
-}
-
func alertsEqual(a, b []*Alert) error {
if len(a) != len(b) {
return fmt.Errorf("length mismatch: %v != %v", a, b)
@@ -108,10 +93,37 @@ func newTestHTTPServerBuilder(expected *[]*Alert, errc chan<- error, u, p string
}))
}
+func newTestAlertmanagerSet(
+ cfg *config.AlertmanagerConfig,
+ client *http.Client,
+ opts *Options,
+ metrics *alertMetrics,
+ alertmanagerURLs ...string,
+) *alertmanagerSet {
+ ams := make([]alertmanager, len(alertmanagerURLs))
+ for i, am := range alertmanagerURLs {
+ ams[i] = alertmanagerMock{urlf: func() string { return am }}
+ }
+ logger := slog.New(slog.DiscardHandler)
+ sendLoops := make(map[string]*sendLoop)
+ for _, am := range alertmanagerURLs {
+ sendLoops[am] = newSendLoop(am, client, cfg, opts, logger, metrics)
+ }
+ return &alertmanagerSet{
+ ams: ams,
+ cfg: cfg,
+ client: client,
+ logger: logger,
+ metrics: metrics,
+ opts: opts,
+ sendLoops: sendLoops,
+ }
+}
+
func TestHandlerSendAll(t *testing.T) {
var (
errc = make(chan error, 1)
- expected = make([]*Alert, 0, maxBatchSize)
+ expected = make([]*Alert, 0)
status1, status2, status3 atomic.Int32
)
status1.Store(int32(http.StatusOK))
@@ -125,7 +137,8 @@ func TestHandlerSendAll(t *testing.T) {
defer server2.Close()
defer server3.Close()
- h := NewManager(&Options{}, model.UTF8Validation, nil)
+ reg := prometheus.NewRegistry()
+ h := NewManager(&Options{Registerer: reg}, model.UTF8Validation, nil)
authClient, _ := config_util.NewClientFromConfig(
config_util.HTTPClientConfig{
@@ -146,35 +159,15 @@ func TestHandlerSendAll(t *testing.T) {
am3Cfg := config.DefaultAlertmanagerConfig
am3Cfg.Timeout = model.Duration(time.Second)
- h.alertmanagers["1"] = &alertmanagerSet{
- ams: []alertmanager{
- alertmanagerMock{
- urlf: func() string { return server1.URL },
- },
- },
- cfg: &am1Cfg,
- client: authClient,
- }
+ opts := &Options{Do: do, QueueCapacity: 10_000, MaxBatchSize: DefaultMaxBatchSize}
- h.alertmanagers["2"] = &alertmanagerSet{
- ams: []alertmanager{
- alertmanagerMock{
- urlf: func() string { return server2.URL },
- },
- alertmanagerMock{
- urlf: func() string { return server3.URL },
- },
- },
- cfg: &am2Cfg,
- }
+ h.alertmanagers["1"] = newTestAlertmanagerSet(&am1Cfg, authClient, opts, h.metrics, server1.URL)
+ h.alertmanagers["2"] = newTestAlertmanagerSet(&am2Cfg, nil, opts, h.metrics, server2.URL, server3.URL)
+ h.alertmanagers["3"] = newTestAlertmanagerSet(&am3Cfg, nil, opts, h.metrics)
- h.alertmanagers["3"] = &alertmanagerSet{
- ams: []alertmanager{}, // empty set
- cfg: &am3Cfg,
- }
-
- for i := range make([]struct{}, maxBatchSize) {
- h.queue = append(h.queue, &Alert{
+ var alerts []*Alert
+ for i := range DefaultMaxBatchSize {
+ alerts = append(alerts, &Alert{
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
expected = append(expected, &Alert{
@@ -191,34 +184,62 @@ func TestHandlerSendAll(t *testing.T) {
}
}
- // all ams in all sets are up
- require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
+ // Start send loops.
+ for _, ams := range h.alertmanagers {
+ ams.startSendLoops(ams.ams)
+ }
+ defer func() {
+ for _, ams := range h.alertmanagers {
+ ams.cleanSendLoops(ams.ams...)
+ }
+ }()
+
+ h.Send(alerts...)
+ require.Eventually(t, func() bool {
+ return prom_testutil.ToFloat64(h.metrics.sent.WithLabelValues(server1.URL)) == DefaultMaxBatchSize
+ }, time.Second*2, time.Millisecond*10)
checkNoErr()
- // the only am in set 1 is down
+ // The only am in set 1 is down.
status1.Store(int32(http.StatusNotFound))
- require.False(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
+ h.Send(alerts...)
+ // Wait for all send loops to process before changing any status.
+ require.Eventually(t, func() bool {
+ return prom_testutil.ToFloat64(h.metrics.errors.WithLabelValues(server1.URL)) == DefaultMaxBatchSize &&
+ prom_testutil.ToFloat64(h.metrics.sent.WithLabelValues(server2.URL)) == DefaultMaxBatchSize*2 &&
+ prom_testutil.ToFloat64(h.metrics.sent.WithLabelValues(server3.URL)) == DefaultMaxBatchSize*2
+ }, time.Second*2, time.Millisecond*10)
checkNoErr()
- // reset it
+ // Fix the am.
status1.Store(int32(http.StatusOK))
- // only one of the ams in set 2 is down
+ // Only one of the ams in set 2 is down.
status2.Store(int32(http.StatusInternalServerError))
- require.True(t, h.sendAll(h.queue...), "all sends succeeded unexpectedly")
+ h.Send(alerts...)
+ // Wait for all send loops to either send or fail with errors depending on their status.
+ require.Eventually(t, func() bool {
+ return prom_testutil.ToFloat64(h.metrics.errors.WithLabelValues(server2.URL)) == DefaultMaxBatchSize &&
+ prom_testutil.ToFloat64(h.metrics.sent.WithLabelValues(server1.URL)) == DefaultMaxBatchSize*2 &&
+ prom_testutil.ToFloat64(h.metrics.sent.WithLabelValues(server3.URL)) == DefaultMaxBatchSize*3
+ }, time.Second*2, time.Millisecond*10)
checkNoErr()
- // both ams in set 2 are down
+ // Both ams in set 2 are down.
status3.Store(int32(http.StatusInternalServerError))
- require.False(t, h.sendAll(h.queue...), "all sends succeeded unexpectedly")
+ h.Send(alerts...)
+ require.Eventually(t, func() bool {
+ return prom_testutil.ToFloat64(h.metrics.errors.WithLabelValues(server2.URL)) == DefaultMaxBatchSize*2 &&
+ prom_testutil.ToFloat64(h.metrics.errors.WithLabelValues(server3.URL)) == DefaultMaxBatchSize
+ }, time.Second*2, time.Millisecond*10)
checkNoErr()
}
func TestHandlerSendAllRemapPerAm(t *testing.T) {
var (
errc = make(chan error, 1)
- expected1 = make([]*Alert, 0, maxBatchSize)
- expected2 = make([]*Alert, 0, maxBatchSize)
+ expected1 = make([]*Alert, 0)
+ expected2 = make([]*Alert, 0)
expected3 = make([]*Alert, 0)
status1, status2, status3 atomic.Int32
@@ -235,7 +256,8 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) {
defer server2.Close()
defer server3.Close()
- h := NewManager(&Options{}, model.UTF8Validation, nil)
+ reg := prometheus.NewRegistry()
+ h := NewManager(&Options{QueueCapacity: 10_000, Registerer: reg}, model.UTF8Validation, nil)
h.alertmanagers = make(map[string]*alertmanagerSet)
am1Cfg := config.DefaultAlertmanagerConfig
@@ -263,43 +285,18 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) {
},
}
- h.alertmanagers = map[string]*alertmanagerSet{
- // Drop no alerts.
- "1": {
- ams: []alertmanager{
- alertmanagerMock{
- urlf: func() string { return server1.URL },
- },
- },
- cfg: &am1Cfg,
- },
- // Drop only alerts with the "alertnamedrop" label.
- "2": {
- ams: []alertmanager{
- alertmanagerMock{
- urlf: func() string { return server2.URL },
- },
- },
- cfg: &am2Cfg,
- },
- // Drop all alerts.
- "3": {
- ams: []alertmanager{
- alertmanagerMock{
- urlf: func() string { return server3.URL },
- },
- },
- cfg: &am3Cfg,
- },
- // Empty list of Alertmanager endpoints.
- "4": {
- ams: []alertmanager{},
- cfg: &config.DefaultAlertmanagerConfig,
- },
- }
+ // Drop no alerts.
+ h.alertmanagers["1"] = newTestAlertmanagerSet(&am1Cfg, nil, h.opts, h.metrics, server1.URL)
+ // Drop only alerts with the "alertnamedrop" label.
+ h.alertmanagers["2"] = newTestAlertmanagerSet(&am2Cfg, nil, h.opts, h.metrics, server2.URL)
+ // Drop all alerts.
+ h.alertmanagers["3"] = newTestAlertmanagerSet(&am3Cfg, nil, h.opts, h.metrics, server3.URL)
+ // Empty list of Alertmanager endpoints.
+ h.alertmanagers["4"] = newTestAlertmanagerSet(&config.DefaultAlertmanagerConfig, nil, h.opts, h.metrics)
- for i := range make([]struct{}, maxBatchSize/2) {
- h.queue = append(h.queue,
+ var alerts []*Alert
+ for i := range make([]struct{}, DefaultMaxBatchSize/2) {
+ alerts = append(alerts,
&Alert{
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
},
@@ -330,63 +327,48 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) {
}
}
- // all ams are up
- require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
+ // Start send loops.
+ for _, ams := range h.alertmanagers {
+ ams.startSendLoops(ams.ams)
+ }
+ defer func() {
+ // Stop send loops.
+ for _, ams := range h.alertmanagers {
+ ams.cleanSendLoops(ams.ams...)
+ }
+ }()
+
+ // All ams are up.
+ h.Send(alerts...)
+ require.Eventually(t, func() bool {
+ return prom_testutil.ToFloat64(h.metrics.sent.WithLabelValues(server1.URL)) == DefaultMaxBatchSize
+ }, time.Second*2, time.Millisecond*10)
checkNoErr()
- // the only am in set 1 goes down
+ // The only am in set 1 goes down.
status1.Store(int32(http.StatusInternalServerError))
- require.False(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
+ h.Send(alerts...)
+ // Wait for metrics to update.
+ require.Eventually(t, func() bool {
+ return prom_testutil.ToFloat64(h.metrics.errors.WithLabelValues(server1.URL)) == DefaultMaxBatchSize
+ }, time.Second*2, time.Millisecond*10)
checkNoErr()
- // reset set 1
+ // Reset set 1.
status1.Store(int32(http.StatusOK))
- // set 3 loses its only am, but all alerts were dropped
- // so there was nothing to send, keeping sendAll true
+ // Set 3 loses its only am, but all alerts were dropped
+ // so there was nothing to send, keeping sendAll true.
status3.Store(int32(http.StatusInternalServerError))
- require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
+ h.Send(alerts...)
checkNoErr()
-
- // Verify that individual locks are released.
- for k := range h.alertmanagers {
- h.alertmanagers[k].mtx.Lock()
- h.alertmanagers[k].ams = nil
- h.alertmanagers[k].mtx.Unlock()
- }
-}
-
-func TestCustomDo(t *testing.T) {
- const testURL = "http://testurl.com/"
- const testBody = "testbody"
-
- var received bool
- h := NewManager(&Options{
- Do: func(_ context.Context, _ *http.Client, req *http.Request) (*http.Response, error) {
- received = true
- body, err := io.ReadAll(req.Body)
-
- require.NoError(t, err)
-
- require.Equal(t, testBody, string(body))
-
- require.Equal(t, testURL, req.URL.String())
-
- return &http.Response{
- Body: io.NopCloser(bytes.NewBuffer(nil)),
- }, nil
- },
- }, model.UTF8Validation, nil)
-
- h.sendOne(context.Background(), nil, testURL, []byte(testBody))
-
- require.True(t, received, "Expected to receive an alert, but didn't")
}
func TestExternalLabels(t *testing.T) {
+ reg := prometheus.NewRegistry()
h := NewManager(&Options{
- QueueCapacity: 3 * maxBatchSize,
- MaxBatchSize: maxBatchSize,
+ QueueCapacity: 3 * DefaultMaxBatchSize,
+ MaxBatchSize: DefaultMaxBatchSize,
ExternalLabels: labels.FromStrings("a", "b"),
RelabelConfigs: []*relabel.Config{
{
@@ -398,8 +380,14 @@ func TestExternalLabels(t *testing.T) {
NameValidationScheme: model.UTF8Validation,
},
},
+ Registerer: reg,
}, model.UTF8Validation, nil)
+ cfg := config.DefaultAlertmanagerConfig
+ h.alertmanagers = map[string]*alertmanagerSet{
+ "test": newTestAlertmanagerSet(&cfg, nil, h.opts, h.metrics, "test"),
+ }
+
// This alert should get the external label attached.
h.Send(&Alert{
Labels: labels.FromStrings("alertname", "test"),
@@ -416,13 +404,14 @@ func TestExternalLabels(t *testing.T) {
{Labels: labels.FromStrings("alertname", "externalrelabelthis", "a", "c")},
}
- require.NoError(t, alertsEqual(expected, h.queue))
+ require.NoError(t, alertsEqual(expected, h.alertmanagers["test"].sendLoops["test"].queue))
}
func TestHandlerRelabel(t *testing.T) {
+ reg := prometheus.NewRegistry()
h := NewManager(&Options{
- QueueCapacity: 3 * maxBatchSize,
- MaxBatchSize: maxBatchSize,
+ QueueCapacity: 3 * DefaultMaxBatchSize,
+ MaxBatchSize: DefaultMaxBatchSize,
RelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"alertname"},
@@ -439,8 +428,14 @@ func TestHandlerRelabel(t *testing.T) {
NameValidationScheme: model.UTF8Validation,
},
},
+ Registerer: reg,
}, model.UTF8Validation, nil)
+ cfg := config.DefaultAlertmanagerConfig
+ h.alertmanagers = map[string]*alertmanagerSet{
+ "test": newTestAlertmanagerSet(&cfg, nil, h.opts, h.metrics, "test"),
+ }
+
// This alert should be dropped due to the configuration
h.Send(&Alert{
Labels: labels.FromStrings("alertname", "drop"),
@@ -455,7 +450,7 @@ func TestHandlerRelabel(t *testing.T) {
{Labels: labels.FromStrings("alertname", "renamed")},
}
- require.NoError(t, alertsEqual(expected, h.queue))
+ require.NoError(t, alertsEqual(expected, h.alertmanagers["test"].sendLoops["test"].queue))
}
func TestHandlerQueuing(t *testing.T) {
@@ -500,10 +495,12 @@ func TestHandlerQueuing(t *testing.T) {
server.Close()
}()
+ reg := prometheus.NewRegistry()
h := NewManager(
&Options{
- QueueCapacity: 3 * maxBatchSize,
- MaxBatchSize: maxBatchSize,
+ QueueCapacity: 3 * DefaultMaxBatchSize,
+ MaxBatchSize: DefaultMaxBatchSize,
+ Registerer: reg,
},
model.UTF8Validation,
nil,
@@ -513,20 +510,18 @@ func TestHandlerQueuing(t *testing.T) {
am1Cfg := config.DefaultAlertmanagerConfig
am1Cfg.Timeout = model.Duration(time.Second)
+ h.alertmanagers["1"] = newTestAlertmanagerSet(&am1Cfg, nil, h.opts, h.metrics, server.URL)
- h.alertmanagers["1"] = &alertmanagerSet{
- ams: []alertmanager{
- alertmanagerMock{
- urlf: func() string { return server.URL },
- },
- },
- cfg: &am1Cfg,
- }
go h.Run(nil)
defer h.Stop()
+ // Start send loops.
+ for _, ams := range h.alertmanagers {
+ ams.startSendLoops(ams.ams)
+ }
+
var alerts []*Alert
- for i := range make([]struct{}, 20*maxBatchSize) {
+ for i := range make([]struct{}, 20*DefaultMaxBatchSize) {
alerts = append(alerts, &Alert{
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
@@ -547,29 +542,22 @@ func TestHandlerQueuing(t *testing.T) {
}
}
- // If the batch is larger than the queue capacity, it should be truncated
- // from the front.
- h.Send(alerts[:4*maxBatchSize]...)
- for i := 1; i < 4; i++ {
- assertAlerts(alerts[i*maxBatchSize : (i+1)*maxBatchSize])
- }
-
// Send one batch, wait for it to arrive and block the server so the queue fills up.
- h.Send(alerts[:maxBatchSize]...)
+ h.Send(alerts[:DefaultMaxBatchSize]...)
<-called
// Send several batches while the server is still blocked so the queue
- // fills up to its maximum capacity (3*maxBatchSize). Then check that the
+ // fills up to its maximum capacity (3*DefaultMaxBatchSize). Then check that the
// queue is truncated in the front.
- h.Send(alerts[1*maxBatchSize : 2*maxBatchSize]...) // this batch should be dropped.
- h.Send(alerts[2*maxBatchSize : 3*maxBatchSize]...)
- h.Send(alerts[3*maxBatchSize : 4*maxBatchSize]...)
+ h.Send(alerts[1*DefaultMaxBatchSize : 2*DefaultMaxBatchSize]...) // This batch should be dropped.
+ h.Send(alerts[2*DefaultMaxBatchSize : 3*DefaultMaxBatchSize]...)
+ h.Send(alerts[3*DefaultMaxBatchSize : 4*DefaultMaxBatchSize]...)
// Send the batch that drops the first one.
- h.Send(alerts[4*maxBatchSize : 5*maxBatchSize]...)
+ h.Send(alerts[4*DefaultMaxBatchSize : 5*DefaultMaxBatchSize]...)
// Unblock the server.
- expectedc <- alerts[:maxBatchSize]
+ expectedc <- alerts[:DefaultMaxBatchSize]
select {
case err := <-errc:
require.NoError(t, err)
@@ -579,7 +567,7 @@ func TestHandlerQueuing(t *testing.T) {
// Verify that we receive the last 3 batches.
for i := 2; i < 5; i++ {
- assertAlerts(alerts[i*maxBatchSize : (i+1)*maxBatchSize])
+ assertAlerts(alerts[i*DefaultMaxBatchSize : (i+1)*DefaultMaxBatchSize])
}
}
@@ -713,319 +701,321 @@ func makeInputTargetGroup() *targetgroup.Group {
// queued alerts. This test reproduces the issue described in https://github.com/prometheus/prometheus/issues/13676.
// and https://github.com/prometheus/prometheus/issues/8768.
func TestHangingNotifier(t *testing.T) {
- const (
- batches = 100
- alertsCount = maxBatchSize * batches
- )
+ synctest.Test(t, func(t *testing.T) {
+ const (
+ batches = 100
+ alertsCount = DefaultMaxBatchSize * batches
- var (
- sendTimeout = 100 * time.Millisecond
- sdUpdatert = sendTimeout / 2
+ faultyURL = "http://faulty:9093/api/v2/alerts"
+ functionalURL = "http://functional:9093/api/v2/alerts"
+ )
- done = make(chan struct{})
- )
+ var (
+ sendTimeout = 100 * time.Millisecond
+ sdUpdatert = sendTimeout / 2
+ )
- defer func() {
- close(done)
- }()
+ // Track which alertmanagers have been called.
+ var faultyCalled, functionalCalled atomic.Bool
- // Set up a faulty Alertmanager.
- var faultyCalled atomic.Bool
- faultyServer := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
- faultyCalled.Store(true)
- select {
- case <-done:
- case <-time.After(time.Hour):
- }
- }))
- faultyURL, err := url.Parse(faultyServer.URL)
- require.NoError(t, err)
-
- // Set up a functional Alertmanager.
- var functionalCalled atomic.Bool
- functionalServer := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
- functionalCalled.Store(true)
- }))
- functionalURL, err := url.Parse(functionalServer.URL)
- require.NoError(t, err)
-
- // Initialize the discovery manager
- // This is relevant as the updates aren't sent continually in real life, but only each updatert.
- // The old implementation of TestHangingNotifier didn't take that into account.
- ctx := t.Context()
- reg := prometheus.NewRegistry()
- sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg))
- require.NoError(t, err)
- sdManager := discovery.NewManager(
- ctx,
- promslog.NewNopLogger(),
- reg,
- sdMetrics,
- discovery.Name("sd-manager"),
- discovery.Updatert(sdUpdatert),
- )
- go sdManager.Run()
-
- // Set up the notifier with both faulty and functional Alertmanagers.
- notifier := NewManager(
- &Options{
- QueueCapacity: alertsCount,
- },
- model.UTF8Validation,
- nil,
- )
- notifier.alertmanagers = make(map[string]*alertmanagerSet)
- amCfg := config.DefaultAlertmanagerConfig
- amCfg.Timeout = model.Duration(sendTimeout)
- notifier.alertmanagers["config-0"] = &alertmanagerSet{
- ams: []alertmanager{
- alertmanagerMock{
- urlf: func() string { return faultyURL.String() },
- },
- alertmanagerMock{
- urlf: func() string { return functionalURL.String() },
- },
- },
- cfg: &amCfg,
- metrics: notifier.metrics,
- }
- go notifier.Run(sdManager.SyncCh())
- defer notifier.Stop()
-
- require.Len(t, notifier.Alertmanagers(), 2)
-
- // Enqueue the alerts.
- var alerts []*Alert
- for i := range make([]struct{}, alertsCount) {
- alerts = append(alerts, &Alert{
- Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
- })
- }
- notifier.Send(alerts...)
-
- // Wait for the Alertmanagers to start receiving alerts.
- // 10*sdUpdatert is used as an arbitrary timeout here.
- timeout := time.After(10 * sdUpdatert)
-loop1:
- for {
- select {
- case <-timeout:
- t.Fatalf("Timeout waiting for the alertmanagers to be reached for the first time.")
- default:
- if faultyCalled.Load() && functionalCalled.Load() {
- break loop1
+ // Fake Do function that simulates alertmanager behavior in-process.
+ // This runs within the synctest bubble, so time.Sleep uses fake time.
+ fakeDo := func(ctx context.Context, _ *http.Client, req *http.Request) (*http.Response, error) {
+ url := req.URL.String()
+ if strings.Contains(url, "faulty") {
+ faultyCalled.Store(true)
+ // Faulty alertmanager hangs until context is canceled (by timeout).
+ <-ctx.Done()
+ return nil, ctx.Err()
}
+ // Functional alertmanager responds successfully.
+ // Sleep simulates network latency that real HTTP would have—without it,
+ // the queue drains instantly and the final queueLen() assertion fails.
+ functionalCalled.Store(true)
+ time.Sleep(sendTimeout / 2)
+ return &http.Response{
+ StatusCode: http.StatusOK,
+ Body: io.NopCloser(bytes.NewBuffer(nil)),
+ }, nil
}
- }
- // Request to remove the faulty Alertmanager.
- c := map[string]discovery.Configs{
- "config-0": {
- discovery.StaticConfig{
- &targetgroup.Group{
- Targets: []model.LabelSet{
- {
- model.AddressLabel: model.LabelValue(functionalURL.Host),
+ // Initialize the discovery manager
+ // This is relevant as the updates aren't sent continually in real life, but only each updatert.
+ // The old implementation of TestHangingNotifier didn't take that into account.
+ ctx, cancelSdManager := context.WithCancel(t.Context())
+ defer cancelSdManager()
+ reg := prometheus.NewRegistry()
+ sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg))
+ require.NoError(t, err)
+ sdManager := discovery.NewManager(
+ ctx,
+ promslog.NewNopLogger(),
+ reg,
+ sdMetrics,
+ discovery.Name("sd-manager"),
+ discovery.Updatert(sdUpdatert),
+ )
+ go sdManager.Run()
+
+ // Set up the notifier with both faulty and functional Alertmanagers.
+ notifier := NewManager(
+ &Options{
+ QueueCapacity: alertsCount,
+ Registerer: reg,
+ Do: fakeDo,
+ },
+ model.UTF8Validation,
+ nil,
+ )
+
+ notifier.alertmanagers = make(map[string]*alertmanagerSet)
+ amCfg := config.DefaultAlertmanagerConfig
+ amCfg.Timeout = model.Duration(sendTimeout)
+ notifier.alertmanagers["config-0"] = newTestAlertmanagerSet(&amCfg, nil, notifier.opts, notifier.metrics, faultyURL, functionalURL)
+
+ for _, ams := range notifier.alertmanagers {
+ ams.startSendLoops(ams.ams)
+ }
+
+ go notifier.Run(sdManager.SyncCh())
+ t.Cleanup(func() {
+ notifier.Stop()
+ // Advance time so in-flight request timeouts fire.
+ time.Sleep(sendTimeout * 2)
+ })
+
+ require.Len(t, notifier.Alertmanagers(), 2)
+
+ // Enqueue the alerts.
+ var alerts []*Alert
+ for i := range make([]struct{}, alertsCount) {
+ alerts = append(alerts, &Alert{
+ Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
+ })
+ }
+ notifier.Send(alerts...)
+
+ // Wait for the Alertmanagers to start receiving alerts.
+ // Use a polling loop since we need to wait for goroutines to process.
+ for !faultyCalled.Load() || !functionalCalled.Load() {
+ time.Sleep(sdUpdatert)
+ synctest.Wait()
+ }
+
+ // Request to remove the faulty Alertmanager.
+ c := map[string]discovery.Configs{
+ "config-0": {
+ discovery.StaticConfig{
+ &targetgroup.Group{
+ Targets: []model.LabelSet{
+ {
+ model.AddressLabel: "functional:9093",
+ },
},
},
},
},
- },
- }
- require.NoError(t, sdManager.ApplyConfig(c))
-
- // The notifier should not wait until the alerts queue is empty to apply the discovery changes
- // A faulty Alertmanager could cause each alert sending cycle to take up to AlertmanagerConfig.Timeout
- // The queue may never be emptied, as the arrival rate could be larger than the departure rate
- // It could even overflow and alerts could be dropped.
- timeout = time.After(batches * sendTimeout)
-loop2:
- for {
- select {
- case <-timeout:
- t.Fatalf("Timeout, the faulty alertmanager not removed on time.")
- default:
- // The faulty alertmanager was dropped.
- if len(notifier.Alertmanagers()) == 1 {
- // Prevent from TOCTOU.
- require.Positive(t, notifier.queueLen())
- break loop2
- }
- require.Positive(t, notifier.queueLen(), "The faulty alertmanager wasn't dropped before the alerts queue was emptied.")
}
- }
+ require.NoError(t, sdManager.ApplyConfig(c))
+
+ // Wait for the discovery update to be processed.
+ // Advance time to trigger the discovery manager's update interval.
+ // The faulty alertmanager should be dropped without waiting for its queue to drain.
+ for len(notifier.Alertmanagers()) != 1 {
+ time.Sleep(sdUpdatert)
+ synctest.Wait()
+ }
+ // The notifier should not wait until the alerts queue of the functional am is empty to apply the discovery changes.
+ require.NotZero(t, notifier.alertmanagers["config-0"].sendLoops[functionalURL].queueLen())
+ })
}
func TestStop_DrainingDisabled(t *testing.T) {
- releaseReceiver := make(chan struct{})
- receiverReceivedRequest := make(chan struct{}, 2)
- alertsReceived := atomic.NewInt64(0)
+ synctest.Test(t, func(t *testing.T) {
+ const alertmanagerURL = "http://alertmanager:9093/api/v2/alerts"
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- // Let the test know we've received a request.
- receiverReceivedRequest <- struct{}{}
+ handlerStarted := make(chan struct{})
+ alertsReceived := atomic.NewInt64(0)
- var alerts []*Alert
+ // Fake Do function that simulates a hanging alertmanager that times out.
+ fakeDo := func(ctx context.Context, _ *http.Client, req *http.Request) (*http.Response, error) {
+ var alerts []*Alert
+ b, err := io.ReadAll(req.Body)
+ if err != nil {
+ return nil, fmt.Errorf("read request body: %w", err)
+ }
+ if err := json.Unmarshal(b, &alerts); err != nil {
+ return nil, fmt.Errorf("unmarshal request body: %w", err)
+ }
+ alertsReceived.Add(int64(len(alerts)))
- b, err := io.ReadAll(r.Body)
- require.NoError(t, err)
+ // Signal arrival, then block until context times out.
+ handlerStarted <- struct{}{}
+ <-ctx.Done()
- err = json.Unmarshal(b, &alerts)
- require.NoError(t, err)
+ return nil, ctx.Err()
+ }
- alertsReceived.Add(int64(len(alerts)))
-
- // Wait for the test to release us.
- <-releaseReceiver
-
- w.WriteHeader(http.StatusOK)
- }))
- defer func() {
- server.Close()
- }()
-
- m := NewManager(
- &Options{
- QueueCapacity: 10,
- DrainOnShutdown: false,
- },
- model.UTF8Validation,
- nil,
- )
-
- m.alertmanagers = make(map[string]*alertmanagerSet)
-
- am1Cfg := config.DefaultAlertmanagerConfig
- am1Cfg.Timeout = model.Duration(time.Second)
-
- m.alertmanagers["1"] = &alertmanagerSet{
- ams: []alertmanager{
- alertmanagerMock{
- urlf: func() string { return server.URL },
+ reg := prometheus.NewRegistry()
+ m := NewManager(
+ &Options{
+ QueueCapacity: 10,
+ DrainOnShutdown: false,
+ Registerer: reg,
+ Do: fakeDo,
},
- },
- cfg: &am1Cfg,
- }
+ model.UTF8Validation,
+ nil,
+ )
- notificationManagerStopped := make(chan struct{})
+ m.alertmanagers = make(map[string]*alertmanagerSet)
- go func() {
- defer close(notificationManagerStopped)
- m.Run(nil)
- }()
+ am1Cfg := config.DefaultAlertmanagerConfig
+ am1Cfg.Timeout = model.Duration(time.Second)
+ m.alertmanagers["1"] = newTestAlertmanagerSet(&am1Cfg, nil, m.opts, m.metrics, alertmanagerURL)
- // Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later.
- m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
+ for _, ams := range m.alertmanagers {
+ ams.startSendLoops(ams.ams)
+ }
- select {
- case <-receiverReceivedRequest:
- // Nothing more to do.
- case <-time.After(time.Second):
- require.FailNow(t, "gave up waiting for receiver to receive notification of first alert")
- }
+ // This will be waited on automatically when synctest.Test exits.
+ go m.Run(nil)
- m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
+ // Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later.
+ m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
- // Stop the notification manager, pause to allow the shutdown to be observed, and then allow the receiver to proceed.
- m.Stop()
- time.Sleep(time.Second)
- close(releaseReceiver)
+ // Wait for receiver to get the request.
+ <-handlerStarted
- // Wait for the notification manager to stop and confirm only the first notification was sent.
- // The second notification should be dropped.
- select {
- case <-notificationManagerStopped:
- // Nothing more to do.
- case <-time.After(time.Second):
- require.FailNow(t, "gave up waiting for notification manager to stop")
- }
+ m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
- require.Equal(t, int64(1), alertsReceived.Load())
+ // Stop the notification manager, then advance time to trigger the request timeout.
+ m.Stop()
+ time.Sleep(time.Second)
+
+ // Allow goroutines to finish.
+ synctest.Wait()
+
+ // Confirm only the first notification was sent. The second notification should be dropped.
+ require.Equal(t, int64(1), alertsReceived.Load())
+ })
}
func TestStop_DrainingEnabled(t *testing.T) {
- releaseReceiver := make(chan struct{})
- receiverReceivedRequest := make(chan struct{}, 2)
- alertsReceived := atomic.NewInt64(0)
+ synctest.Test(t, func(t *testing.T) {
+ const alertmanagerURL = "http://alertmanager:9093/api/v2/alerts"
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- // Let the test know we've received a request.
- receiverReceivedRequest <- struct{}{}
+ handlerStarted := make(chan struct{}, 1)
+ alertsReceived := atomic.NewInt64(0)
- var alerts []*Alert
+ // Fake Do function that simulates alertmanager responding slowly but successfully.
+ fakeDo := func(_ context.Context, _ *http.Client, req *http.Request) (*http.Response, error) {
+ var alerts []*Alert
+ b, err := io.ReadAll(req.Body)
+ if err != nil {
+ return nil, fmt.Errorf("read request body: %w", err)
+ }
+ if err := json.Unmarshal(b, &alerts); err != nil {
+ return nil, fmt.Errorf("unmarshal request body: %w", err)
+ }
+ alertsReceived.Add(int64(len(alerts)))
- b, err := io.ReadAll(r.Body)
- require.NoError(t, err)
+ // Signal arrival.
+ handlerStarted <- struct{}{}
- err = json.Unmarshal(b, &alerts)
- require.NoError(t, err)
+ // Block to allow for alert-2 to be queued while this request is in-flight.
+ time.Sleep(100 * time.Millisecond)
- alertsReceived.Add(int64(len(alerts)))
+ return &http.Response{
+ StatusCode: http.StatusOK,
+ Body: io.NopCloser(bytes.NewBuffer(nil)),
+ }, nil
+ }
- // Wait for the test to release us.
- <-releaseReceiver
-
- w.WriteHeader(http.StatusOK)
- }))
- defer func() {
- server.Close()
- }()
-
- m := NewManager(
- &Options{
- QueueCapacity: 10,
- DrainOnShutdown: true,
- },
- model.UTF8Validation,
- nil,
- )
-
- m.alertmanagers = make(map[string]*alertmanagerSet)
-
- am1Cfg := config.DefaultAlertmanagerConfig
- am1Cfg.Timeout = model.Duration(time.Second)
-
- m.alertmanagers["1"] = &alertmanagerSet{
- ams: []alertmanager{
- alertmanagerMock{
- urlf: func() string { return server.URL },
+ reg := prometheus.NewRegistry()
+ m := NewManager(
+ &Options{
+ QueueCapacity: 10,
+ DrainOnShutdown: true,
+ Registerer: reg,
+ Do: fakeDo,
},
- },
- cfg: &am1Cfg,
+ model.UTF8Validation,
+ nil,
+ )
+
+ m.alertmanagers = make(map[string]*alertmanagerSet)
+
+ am1Cfg := config.DefaultAlertmanagerConfig
+ am1Cfg.Timeout = model.Duration(time.Second)
+ m.alertmanagers["1"] = newTestAlertmanagerSet(&am1Cfg, nil, m.opts, m.metrics, alertmanagerURL)
+
+ for _, ams := range m.alertmanagers {
+ ams.startSendLoops(ams.ams)
+ }
+
+ go m.Run(nil)
+
+ // Queue two alerts. The first should be immediately sent to the receiver.
+ m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
+
+ // Wait for receiver to get the first request.
+ <-handlerStarted
+
+ // Send second alert while first is still being processed (fakeDo has 100ms delay).
+ m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
+
+ // Stop the notification manager. With DrainOnShutdown=true, this should wait
+ // for the queue to drain, ensuring both alerts are sent.
+ m.Stop()
+
+ // Advance time so in-flight requests complete.
+ time.Sleep(time.Second)
+
+ // Allow goroutines to finish.
+ synctest.Wait()
+
+ // Confirm both notifications were sent.
+ require.Equal(t, int64(2), alertsReceived.Load())
+ })
+}
+
+// TestQueuesDrainingOnApplyConfig ensures that when an alertmanagerSet disappears after an ApplyConfig(), its
+// sendLoops queues are drained only when DrainOnShutdown is set.
+func TestQueuesDrainingOnApplyConfig(t *testing.T) {
+ for _, drainOnShutDown := range []bool{false, true} {
+ t.Run(strconv.FormatBool(drainOnShutDown), func(t *testing.T) {
+ t.Parallel()
+ alertSent := make(chan struct{})
+
+ server := newImmediateAlertManager(alertSent)
+ defer server.Close()
+
+ h := NewManager(&Options{QueueCapacity: 10, DrainOnShutdown: drainOnShutDown}, model.UTF8Validation, nil)
+ h.alertmanagers = make(map[string]*alertmanagerSet)
+
+ amCfg := config.DefaultAlertmanagerConfig
+ amCfg.Timeout = model.Duration(time.Second)
+ h.alertmanagers["1"] = newTestAlertmanagerSet(&amCfg, nil, h.opts, h.metrics, server.URL)
+
+ // The send loops were not started, nothing will be sent.
+ h.Send([]*Alert{{Labels: labels.FromStrings("alertname", "foo")}}...)
+
+ // Remove the alertmanagerSet.
+ h.ApplyConfig(&config.Config{})
+
+ select {
+ case <-alertSent:
+ if !drainOnShutDown {
+ require.FailNow(t, "no alert should be sent")
+ }
+ case <-time.After(100 * time.Millisecond):
+ if drainOnShutDown {
+ require.FailNow(t, "alert wasn't received")
+ }
+ }
+ })
}
-
- notificationManagerStopped := make(chan struct{})
-
- go func() {
- defer close(notificationManagerStopped)
- m.Run(nil)
- }()
-
- // Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later.
- m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
-
- select {
- case <-receiverReceivedRequest:
- // Nothing more to do.
- case <-time.After(time.Second):
- require.FailNow(t, "gave up waiting for receiver to receive notification of first alert")
- }
-
- m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
-
- // Stop the notification manager and allow the receiver to proceed.
- m.Stop()
- close(releaseReceiver)
-
- // Wait for the notification manager to stop and confirm both notifications were sent.
- select {
- case <-notificationManagerStopped:
- // Nothing more to do.
- case <-time.After(200 * time.Millisecond):
- require.FailNow(t, "gave up waiting for notification manager to stop")
- }
-
- require.Equal(t, int64(2), alertsReceived.Load())
}
func TestApplyConfig(t *testing.T) {
@@ -1152,7 +1142,7 @@ func TestAlerstRelabelingIsIsolated(t *testing.T) {
defer server1.Close()
defer server2.Close()
- h := NewManager(&Options{}, model.UTF8Validation, nil)
+ h := NewManager(&Options{QueueCapacity: 10}, model.UTF8Validation, nil)
h.alertmanagers = make(map[string]*alertmanagerSet)
am1Cfg := config.DefaultAlertmanagerConfig
@@ -1172,34 +1162,29 @@ func TestAlerstRelabelingIsIsolated(t *testing.T) {
am2Cfg.Timeout = model.Duration(time.Second)
h.alertmanagers = map[string]*alertmanagerSet{
- "am1": {
- ams: []alertmanager{
- alertmanagerMock{
- urlf: func() string { return server1.URL },
- },
- },
- cfg: &am1Cfg,
- },
- "am2": {
- ams: []alertmanager{
- alertmanagerMock{
- urlf: func() string { return server2.URL },
- },
- },
- cfg: &am2Cfg,
- },
+ "am1": newTestAlertmanagerSet(&am1Cfg, nil, h.opts, h.metrics, server1.URL),
+ "am2": newTestAlertmanagerSet(&am2Cfg, nil, h.opts, h.metrics, server2.URL),
}
+ // Start send loops.
+ for _, ams := range h.alertmanagers {
+ ams.startSendLoops(ams.ams)
+ }
+ defer func() {
+ for _, ams := range h.alertmanagers {
+ ams.cleanSendLoops(ams.ams...)
+ }
+ }()
+
testAlert := &Alert{
Labels: labels.FromStrings("alertname", "test"),
}
- h.queue = []*Alert{testAlert}
expected1 = append(expected1, &Alert{
Labels: labels.FromStrings("alertname", "test", "parasite", "yes"),
})
- // am2 shouldn't get the parasite label.
+ // Am2 shouldn't get the parasite label.
expected2 = append(expected2, &Alert{
Labels: labels.FromStrings("alertname", "test"),
})
@@ -1213,6 +1198,363 @@ func TestAlerstRelabelingIsIsolated(t *testing.T) {
}
}
- require.True(t, h.sendAll(h.queue...))
+ h.Send(testAlert)
checkNoErr()
}
+
+// Regression test for https://github.com/prometheus/prometheus/issues/7676
+// The test creates a black hole alertmanager that never responds to any requests.
+// The alertmanager_config.timeout is set to infinite (1 year).
+// We check that the notifier does not hang and throughput is not affected.
+func TestNotifierQueueIndependentOfFailedAlertmanager(t *testing.T) {
+ stopBlackHole := make(chan struct{})
+ blackHoleAM := newBlackHoleAlertmanager(stopBlackHole)
+ defer func() {
+ close(stopBlackHole)
+ blackHoleAM.Close()
+ }()
+
+ doneAlertReceive := make(chan struct{})
+ immediateAM := newImmediateAlertManager(doneAlertReceive)
+ defer immediateAM.Close()
+
+ do := func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+ return client.Do(req.WithContext(ctx))
+ }
+
+ reg := prometheus.NewRegistry()
+ h := NewManager(&Options{
+ Do: do,
+ QueueCapacity: 10,
+ MaxBatchSize: DefaultMaxBatchSize,
+ Registerer: reg,
+ }, model.UTF8Validation, nil)
+
+ h.alertmanagers = make(map[string]*alertmanagerSet)
+
+ amCfg := config.DefaultAlertmanagerConfig
+ amCfg.Timeout = model.Duration(time.Hour * 24 * 365)
+ h.alertmanagers["1"] = newTestAlertmanagerSet(&amCfg, http.DefaultClient, h.opts, h.metrics, blackHoleAM.URL)
+ h.alertmanagers["2"] = newTestAlertmanagerSet(&amCfg, http.DefaultClient, h.opts, h.metrics, immediateAM.URL)
+
+ doneSendAll := make(chan struct{})
+ for _, ams := range h.alertmanagers {
+ ams.startSendLoops(ams.ams)
+ }
+ defer func() {
+ for _, ams := range h.alertmanagers {
+ ams.cleanSendLoops(ams.ams...)
+ }
+ }()
+
+ go func() {
+ h.Send(&Alert{
+ Labels: labels.FromStrings("alertname", "test"),
+ })
+ close(doneSendAll)
+ }()
+
+ select {
+ case <-doneAlertReceive:
+ // This is the happy case, the alert was received by the immediate alertmanager.
+ case <-time.After(2 * time.Second):
+ t.Fatal("Timeout waiting for alert to be received by immediate alertmanager")
+ }
+
+ select {
+ case <-doneSendAll:
+ // This is the happy case, the sendAll function returned.
+ case <-time.After(2 * time.Second):
+ t.Fatal("Timeout waiting for sendAll to return")
+ }
+}
+
+// TestApplyConfigSendLoopsNotStoppedOnKeyChange reproduces a bug where sendLoops
+// are incorrectly stopped when the alertmanager config key changes but the config
+// content (and thus its hash) remains the same.
+//
+// The bug scenario:
+// 1. Old config has alertmanager set with key "config-0" and config hash X
+// 2. New config has TWO alertmanager sets where the SECOND one ("config-1") has hash X
+// 3. sendLoops are transferred from old "config-0" to new "config-1" (hash match)
+// 4. Cleanup checks if key "config-0" exists in new config — it does (different config)
+// 5. No cleanup happens for old "config-0", sendLoops work correctly
+//
+// However, there's a variant where the key disappears completely:
+// 1. Old config: "config-0" with hash X, "config-1" with hash Y
+// 2. New config: "config-0" with hash Y (was "config-1"), no "config-1"
+// 3. sendLoops from old "config-0" (hash X) have nowhere to go
+// 4. Cleanup sees "config-1" doesn't exist, tries to clean up old "config-1"
+//
+// This test verifies that when config keys change, sendLoops are correctly preserved.
+func TestApplyConfigSendLoopsNotStoppedOnKeyChange(t *testing.T) {
+ alertReceived := make(chan struct{}, 10)
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ select {
+ case alertReceived <- struct{}{}:
+ default:
+ }
+ }))
+ defer server.Close()
+
+ targetURL := server.Listener.Addr().String()
+ targetGroup := &targetgroup.Group{
+ Targets: []model.LabelSet{
+ {
+ "__address__": model.LabelValue(targetURL),
+ },
+ },
+ }
+
+ n := NewManager(&Options{QueueCapacity: 10}, model.UTF8Validation, nil)
+ cfg := &config.Config{}
+
+ // Initial config with TWO alertmanager configs.
+ // "config-0" uses file_sd_configs with foo.json (hash X)
+ // "config-1" uses file_sd_configs with bar.json (hash Y)
+ s := `
+alerting:
+ alertmanagers:
+ - file_sd_configs:
+ - files:
+ - foo.json
+ - file_sd_configs:
+ - files:
+ - bar.json
+`
+ require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg))
+ require.NoError(t, n.ApplyConfig(cfg))
+
+ // Reload with target groups to discover alertmanagers.
+ tgs := map[string][]*targetgroup.Group{
+ "config-0": {targetGroup},
+ "config-1": {targetGroup},
+ }
+ n.reload(tgs)
+ require.Len(t, n.Alertmanagers(), 2)
+
+ // Verify sendLoops exist for both configs.
+ require.Len(t, n.alertmanagers["config-0"].sendLoops, 1)
+ require.Len(t, n.alertmanagers["config-1"].sendLoops, 1)
+
+ // Start the send loops.
+ for _, ams := range n.alertmanagers {
+ ams.startSendLoops(ams.ams)
+ }
+ defer func() {
+ for _, ams := range n.alertmanagers {
+ ams.mtx.Lock()
+ ams.cleanSendLoops(ams.ams...)
+ ams.mtx.Unlock()
+ }
+ }()
+
+ // Send an alert and verify it's received (twice, once per alertmanager set).
+ n.Send(&Alert{Labels: labels.FromStrings("alertname", "test1")})
+ for range 2 {
+ select {
+ case <-alertReceived:
+ // Good, alert was sent.
+ case <-time.After(2 * time.Second):
+ require.FailNow(t, "timeout waiting for first alert")
+ }
+ }
+
+ // Apply a new config that REVERSES the order of alertmanager configs.
+ // Now "config-0" has hash Y (was bar.json) and "config-1" has hash X (was foo.json).
+ // The sendLoops should be transferred based on hash matching.
+ s = `
+alerting:
+ alertmanagers:
+ - file_sd_configs:
+ - files:
+ - bar.json
+ - file_sd_configs:
+ - files:
+ - foo.json
+`
+ require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg))
+ require.NoError(t, n.ApplyConfig(cfg))
+
+ // CRITICAL CHECK: After ApplyConfig but BEFORE reload, the sendLoops should
+ // have been transferred based on hash matching and NOT stopped.
+ // - Old "config-0" (foo.json, hash X) -> New "config-1" (foo.json, hash X)
+ // - Old "config-1" (bar.json, hash Y) -> New "config-0" (bar.json, hash Y)
+ // Both old keys exist in new config, so no cleanup should happen.
+ require.Len(t, n.alertmanagers["config-0"].sendLoops, 1, "sendLoops should be transferred to config-0")
+ require.Len(t, n.alertmanagers["config-1"].sendLoops, 1, "sendLoops should be transferred to config-1")
+
+ // Reload with target groups for the new config.
+ tgs = map[string][]*targetgroup.Group{
+ "config-0": {targetGroup},
+ "config-1": {targetGroup},
+ }
+ n.reload(tgs)
+
+ // The alertmanagers should still be discoverable.
+ require.Len(t, n.Alertmanagers(), 2)
+
+ // The critical test: send another alert and verify it's received by both.
+ n.Send(&Alert{Labels: labels.FromStrings("alertname", "test2")})
+ for range 2 {
+ select {
+ case <-alertReceived:
+ // Good, alert was sent - sendLoops are still working.
+ case <-time.After(2 * time.Second):
+ require.FailNow(t, "timeout waiting for second alert - sendLoops may have been incorrectly stopped")
+ }
+ }
+}
+
+// TestApplyConfigDuplicateHashSharesSendLoops tests a bug where multiple new
+// alertmanager configs with identical content (same hash) all receive the same
+// sendLoops map reference, causing shared mutable state between alertmanagerSets.
+//
+// Bug scenario:
+// 1. Old config: "config-0" with hash X
+// 2. New config: "config-0" AND "config-1" both with hash X (identical configs)
+// 3. Both new sets get `sendLoops = oldAmSet.sendLoops` (same map reference!)
+// 4. Now config-0 and config-1 share the same sendLoops map
+// 5. When config-1's alertmanager is removed via sync(), it cleans up the shared
+// sendLoops, breaking config-0's ability to send alerts
+func TestApplyConfigDuplicateHashSharesSendLoops(t *testing.T) {
+ n := NewManager(&Options{QueueCapacity: 10}, model.UTF8Validation, nil)
+ cfg := &config.Config{}
+
+ // Initial config with ONE alertmanager.
+ s := `
+alerting:
+ alertmanagers:
+ - file_sd_configs:
+ - files:
+ - foo.json
+`
+ require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg))
+ require.NoError(t, n.ApplyConfig(cfg))
+
+ targetGroup := &targetgroup.Group{
+ Targets: []model.LabelSet{
+ {"__address__": "alertmanager:9093"},
+ },
+ }
+ tgs := map[string][]*targetgroup.Group{"config-0": {targetGroup}}
+ n.reload(tgs)
+
+ require.Len(t, n.alertmanagers["config-0"].sendLoops, 1)
+
+ // Apply a new config with TWO IDENTICAL alertmanager configs.
+ // Both have the same hash, so both will receive sendLoops from the same old set.
+ s = `
+alerting:
+ alertmanagers:
+ - file_sd_configs:
+ - files:
+ - foo.json
+ - file_sd_configs:
+ - files:
+ - foo.json
+`
+ require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg))
+ require.NoError(t, n.ApplyConfig(cfg))
+
+ // Reload with target groups for both configs - same alertmanager URL for both.
+ tgs = map[string][]*targetgroup.Group{
+ "config-0": {targetGroup},
+ "config-1": {targetGroup},
+ }
+ n.reload(tgs)
+
+ // Both alertmanagerSets should have independent sendLoops.
+ sendLoops0 := n.alertmanagers["config-0"].sendLoops
+ sendLoops1 := n.alertmanagers["config-1"].sendLoops
+
+ require.Len(t, sendLoops0, 1, "config-0 should have sendLoops")
+ require.Len(t, sendLoops1, 1, "config-1 should have sendLoops")
+
+ // Verify that the two alertmanagerSets have INDEPENDENT sendLoops maps.
+ // They should NOT share the same sendLoop objects.
+ for k := range sendLoops0 {
+ if loop1, ok := sendLoops1[k]; ok {
+ require.NotSame(t, sendLoops0[k], loop1,
+ "config-0 and config-1 should have independent sendLoop instances, not shared references")
+ }
+ }
+}
+
+// TestApplyConfigHashChangeLeaksSendLoops tests a bug where sendLoops goroutines
+// are leaked when the config key remains the same but the config hash changes.
+//
+// Bug scenario:
+// 1. Old config has "config-0" with hash H1 and running sendLoops
+// 2. New config has "config-0" with hash H2 (modified config)
+// 3. Since hash differs, sendLoops are NOT transferred to the new alertmanagerSet
+// 4. Cleanup only checks if key exists in amSets - it does, so no cleanup
+// 5. Old sendLoops goroutines continue running and are never stopped
+func TestApplyConfigHashChangeLeaksSendLoops(t *testing.T) {
+ n := NewManager(&Options{QueueCapacity: 10}, model.UTF8Validation, nil)
+ cfg := &config.Config{}
+
+ // Initial config with one alertmanager.
+ s := `
+alerting:
+ alertmanagers:
+ - file_sd_configs:
+ - files:
+ - foo.json
+`
+ require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg))
+ require.NoError(t, n.ApplyConfig(cfg))
+
+ targetGroup := &targetgroup.Group{
+ Targets: []model.LabelSet{
+ {"__address__": "alertmanager:9093"},
+ },
+ }
+ tgs := map[string][]*targetgroup.Group{"config-0": {targetGroup}}
+ n.reload(tgs)
+
+ // Capture the old sendLoop.
+ oldSendLoops := n.alertmanagers["config-0"].sendLoops
+ require.Len(t, oldSendLoops, 1)
+ var oldSendLoop *sendLoop
+ for _, sl := range oldSendLoops {
+ oldSendLoop = sl
+ }
+
+ // Apply a new config with DIFFERENT hash (added path_prefix).
+ s = `
+alerting:
+ alertmanagers:
+ - file_sd_configs:
+ - files:
+ - foo.json
+ path_prefix: /changed
+`
+ require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg))
+ require.NoError(t, n.ApplyConfig(cfg))
+
+ // The old sendLoop should have been stopped since hash changed.
+ // Check that the stopped channel is closed.
+ select {
+ case <-oldSendLoop.stopped:
+ // Good - sendLoop was properly stopped
+ default:
+ t.Fatal("BUG: old sendLoop was not stopped when config hash changed - goroutine leak")
+ }
+}
+
+func newBlackHoleAlertmanager(stop <-chan struct{}) *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ // Do nothing, wait to be canceled.
+ <-stop
+ w.WriteHeader(http.StatusOK)
+ }))
+}
+
+func newImmediateAlertManager(done chan<- struct{}) *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ close(done)
+ }))
+}
diff --git a/notifier/metric.go b/notifier/metric.go
index d10a02614c..a150331ab1 100644
--- a/notifier/metric.go
+++ b/notifier/metric.go
@@ -24,17 +24,13 @@ type alertMetrics struct {
latencyHistogram *prometheus.HistogramVec
errors *prometheus.CounterVec
sent *prometheus.CounterVec
- dropped prometheus.Counter
- queueLength prometheus.GaugeFunc
+ dropped *prometheus.CounterVec
+ queueLength *prometheus.GaugeVec
queueCapacity prometheus.Gauge
alertmanagersDiscovered prometheus.GaugeFunc
}
-func newAlertMetrics(
- r prometheus.Registerer,
- queueCap int,
- queueLen, alertmanagersDiscovered func() float64,
-) *alertMetrics {
+func newAlertMetrics(r prometheus.Registerer, alertmanagersDiscovered func() float64) *alertMetrics {
m := &alertMetrics{
latencySummary: prometheus.NewSummaryVec(prometheus.SummaryOpts{
Namespace: namespace,
@@ -74,18 +70,18 @@ func newAlertMetrics(
},
[]string{alertmanagerLabel},
),
- dropped: prometheus.NewCounter(prometheus.CounterOpts{
+ dropped: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dropped_total",
Help: "Total number of alerts dropped due to errors when sending to Alertmanager.",
- }),
- queueLength: prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ }, []string{alertmanagerLabel}),
+ queueLength: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "queue_length",
Help: "The number of alert notifications in the queue.",
- }, queueLen),
+ }, []string{alertmanagerLabel}),
queueCapacity: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
@@ -98,8 +94,6 @@ func newAlertMetrics(
}, alertmanagersDiscovered),
}
- m.queueCapacity.Set(float64(queueCap))
-
if r != nil {
r.MustRegister(
m.latencySummary,
diff --git a/notifier/sendloop.go b/notifier/sendloop.go
new file mode 100644
index 0000000000..0413390265
--- /dev/null
+++ b/notifier/sendloop.go
@@ -0,0 +1,273 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package notifier
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log/slog"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/prometheus/prometheus/config"
+)
+
+type sendLoop struct {
+ alertmanagerURL string
+
+ cfg *config.AlertmanagerConfig
+ client *http.Client
+ opts *Options
+
+ metrics *alertMetrics
+
+ mtx sync.RWMutex
+ queue []*Alert
+ hasWork chan struct{}
+ stopped chan struct{}
+ stopOnce sync.Once
+
+ logger *slog.Logger
+}
+
+func newSendLoop(
+ alertmanagerURL string,
+ client *http.Client,
+ cfg *config.AlertmanagerConfig,
+ opts *Options,
+ logger *slog.Logger,
+ metrics *alertMetrics,
+) *sendLoop {
+ // This will initialize the Counters for the AM to 0 and set the static queue capacity gauge.
+ metrics.dropped.WithLabelValues(alertmanagerURL)
+ metrics.errors.WithLabelValues(alertmanagerURL)
+ metrics.sent.WithLabelValues(alertmanagerURL)
+ metrics.queueLength.WithLabelValues(alertmanagerURL)
+
+ return &sendLoop{
+ alertmanagerURL: alertmanagerURL,
+ client: client,
+ cfg: cfg,
+ opts: opts,
+ logger: logger,
+ metrics: metrics,
+ queue: make([]*Alert, 0, opts.QueueCapacity),
+ hasWork: make(chan struct{}, 1),
+ stopped: make(chan struct{}),
+ }
+}
+
+func (s *sendLoop) add(alerts ...*Alert) {
+ select {
+ case <-s.stopped:
+ return
+ default:
+ }
+
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
+ var dropped int
+ // Queue capacity should be significantly larger than a single alert
+ // batch could be.
+ if d := len(alerts) - s.opts.QueueCapacity; d > 0 {
+ s.logger.Warn("Alert batch larger than queue capacity, dropping alerts", "count", d)
+ dropped += d
+ alerts = alerts[d:]
+ }
+
+ // If the queue is full, remove the oldest alerts in favor
+ // of newer ones.
+ if d := (len(s.queue) + len(alerts)) - s.opts.QueueCapacity; d > 0 {
+ s.logger.Warn("Alert notification queue full, dropping alerts", "count", d)
+ dropped += d
+ s.queue = s.queue[d:]
+ }
+
+ s.queue = append(s.queue, alerts...)
+
+ // Notify sending goroutine that there are alerts to be processed.
+ // If we cannot send on the channel, it means the signal already exists
+ // and has not been consumed yet.
+ s.notifyWork()
+
+ s.metrics.queueLength.WithLabelValues(s.alertmanagerURL).Set(float64(len(s.queue)))
+ if dropped > 0 {
+ s.metrics.dropped.WithLabelValues(s.alertmanagerURL).Add(float64(dropped))
+ }
+}
+
+func (s *sendLoop) notifyWork() {
+ select {
+ case <-s.stopped:
+ return
+ case s.hasWork <- struct{}{}:
+ default:
+ }
+}
+
+func (s *sendLoop) stop() {
+ s.stopOnce.Do(func() {
+ s.logger.Debug("Stopping send loop")
+ close(s.stopped)
+
+ if s.opts.DrainOnShutdown {
+ s.drainQueue()
+ } else {
+ ql := s.queueLen()
+ s.logger.Warn("Alert notification queue not drained on shutdown, dropping alerts", "count", ql)
+ s.metrics.dropped.WithLabelValues(s.alertmanagerURL).Add(float64(ql))
+ }
+
+ s.metrics.latencySummary.DeleteLabelValues(s.alertmanagerURL)
+ s.metrics.latencyHistogram.DeleteLabelValues(s.alertmanagerURL)
+ s.metrics.sent.DeleteLabelValues(s.alertmanagerURL)
+ s.metrics.dropped.DeleteLabelValues(s.alertmanagerURL)
+ s.metrics.errors.DeleteLabelValues(s.alertmanagerURL)
+ s.metrics.queueLength.DeleteLabelValues(s.alertmanagerURL)
+ })
+}
+
+func (s *sendLoop) drainQueue() {
+ for s.queueLen() > 0 {
+ s.sendOneBatch()
+ }
+}
+
+func (s *sendLoop) queueLen() int {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+
+ return len(s.queue)
+}
+
+func (s *sendLoop) nextBatch() []*Alert {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
+ var alerts []*Alert
+ if maxBatchSize := s.opts.MaxBatchSize; len(s.queue) > maxBatchSize {
+ alerts = append(make([]*Alert, 0, maxBatchSize), s.queue[:maxBatchSize]...)
+ s.queue = s.queue[maxBatchSize:]
+ } else {
+ alerts = append(make([]*Alert, 0, len(s.queue)), s.queue...)
+ s.queue = s.queue[:0]
+ }
+ s.metrics.queueLength.WithLabelValues(s.alertmanagerURL).Set(float64(len(s.queue)))
+
+ return alerts
+}
+
+func (s *sendLoop) sendOneBatch() {
+ alerts := s.nextBatch()
+
+ if !s.sendAll(alerts) {
+ s.metrics.dropped.WithLabelValues(s.alertmanagerURL).Add(float64(len(alerts)))
+ }
+}
+
+// loop continuously consumes the notifications queue and sends alerts to
+// the Alertmanager.
+func (s *sendLoop) loop() {
+ s.logger.Debug("Starting send loop")
+ for {
+ // If we've been asked to stop, that takes priority over sending any further notifications.
+ select {
+ case <-s.stopped:
+ return
+ default:
+ select {
+ case <-s.stopped:
+ return
+ case <-s.hasWork:
+ s.sendOneBatch()
+
+ // If the queue still has items left, kick off the next iteration.
+ if s.queueLen() > 0 {
+ s.notifyWork()
+ }
+ }
+ }
+ }
+}
+
+func (s *sendLoop) sendAll(alerts []*Alert) bool {
+ if len(alerts) == 0 {
+ return true
+ }
+
+ begin := time.Now()
+
+ var payload []byte
+ var err error
+ switch s.cfg.APIVersion {
+ case config.AlertmanagerAPIVersionV2:
+ openAPIAlerts := alertsToOpenAPIAlerts(alerts)
+ payload, err = json.Marshal(openAPIAlerts)
+ if err != nil {
+ s.logger.Error("Encoding alerts for Alertmanager API v2 failed", "err", err)
+ return false
+ }
+
+ default:
+ s.logger.Error(
+ fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", s.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions),
+ "err", err,
+ )
+ return false
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.cfg.Timeout))
+ defer cancel()
+
+ if err := s.sendOne(ctx, s.client, s.alertmanagerURL, payload); err != nil {
+ s.logger.Error("Error sending alerts", "count", len(alerts), "err", err)
+ s.metrics.errors.WithLabelValues(s.alertmanagerURL).Add(float64(len(alerts)))
+ return false
+ }
+ durationSeconds := time.Since(begin).Seconds()
+ s.metrics.latencySummary.WithLabelValues(s.alertmanagerURL).Observe(durationSeconds)
+ s.metrics.latencyHistogram.WithLabelValues(s.alertmanagerURL).Observe(durationSeconds)
+ s.metrics.sent.WithLabelValues(s.alertmanagerURL).Add(float64(len(alerts)))
+
+ return true
+}
+
+func (s *sendLoop) sendOne(ctx context.Context, c *http.Client, url string, b []byte) error {
+ req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(b))
+ if err != nil {
+ return err
+ }
+ req.Header.Set("User-Agent", userAgent)
+ req.Header.Set("Content-Type", contentTypeJSON)
+ resp, err := s.opts.Do(ctx, c, req)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ io.Copy(io.Discard, resp.Body)
+ resp.Body.Close()
+ }()
+
+ // Any HTTP status 2xx is OK.
+ if resp.StatusCode/100 != 2 {
+ return fmt.Errorf("bad response status %s", resp.Status)
+ }
+
+ return nil
+}
diff --git a/notifier/sendloop_test.go b/notifier/sendloop_test.go
new file mode 100644
index 0000000000..1e04c0d9a0
--- /dev/null
+++ b/notifier/sendloop_test.go
@@ -0,0 +1,187 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package notifier
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "log/slog"
+ "net/http"
+ "strconv"
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus"
+ prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+func TestCustomDo(t *testing.T) {
+ const testURL = "http://testurl.com/"
+ const testBody = "testbody"
+
+ var received bool
+ h := sendLoop{
+ opts: &Options{
+ Do: func(_ context.Context, _ *http.Client, req *http.Request) (*http.Response, error) {
+ received = true
+ body, err := io.ReadAll(req.Body)
+
+ require.NoError(t, err)
+
+ require.Equal(t, testBody, string(body))
+
+ require.Equal(t, testURL, req.URL.String())
+
+ return &http.Response{
+ Body: io.NopCloser(bytes.NewBuffer(nil)),
+ }, nil
+ },
+ },
+ }
+
+ h.sendOne(context.Background(), nil, testURL, []byte(testBody))
+
+ require.True(t, received)
+}
+
+func TestHandlerNextBatch(t *testing.T) {
+ sendLoop := newSendLoop("http://mock", nil, &config.DefaultAlertmanagerConfig, &Options{MaxBatchSize: DefaultMaxBatchSize}, slog.New(slog.DiscardHandler), newAlertMetrics(prometheus.NewRegistry(), nil))
+
+ for i := range make([]struct{}, 2*DefaultMaxBatchSize+1) {
+ sendLoop.queue = append(sendLoop.queue, &Alert{
+ Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
+ })
+ }
+ expected := append([]*Alert{}, sendLoop.queue...)
+
+ require.NoError(t, alertsEqual(expected[0:DefaultMaxBatchSize], sendLoop.nextBatch()))
+ require.NoError(t, alertsEqual(expected[DefaultMaxBatchSize:2*DefaultMaxBatchSize], sendLoop.nextBatch()))
+ require.NoError(t, alertsEqual(expected[2*DefaultMaxBatchSize:], sendLoop.nextBatch()))
+ require.Empty(t, sendLoop.queue)
+}
+
+func TestAddAlertsToQueue(t *testing.T) {
+ alert1 := &Alert{Labels: labels.FromStrings("alertname", "existing1")}
+ alert2 := &Alert{Labels: labels.FromStrings("alertname", "existing2")}
+
+ s := newSendLoop("http://foo.bar/", nil, nil, &Options{QueueCapacity: 5}, slog.New(slog.DiscardHandler), newAlertMetrics(prometheus.NewRegistry(), nil))
+ s.add(alert1, alert2)
+ require.Equal(t, []*Alert{alert1, alert2}, s.queue)
+ require.Len(t, s.queue, 2)
+
+ alert3 := &Alert{Labels: labels.FromStrings("alertname", "new1")}
+ alert4 := &Alert{Labels: labels.FromStrings("alertname", "new2")}
+
+ // Add new alerts to the queue, expect 0 dropped
+ s.add(alert3, alert4)
+ require.Zero(t, prom_testutil.ToFloat64(s.metrics.dropped.WithLabelValues(s.alertmanagerURL)))
+
+ // Verify all new alerts were added to the queue
+ require.Equal(t, []*Alert{alert1, alert2, alert3, alert4}, s.queue)
+ require.Len(t, s.queue, 4)
+}
+
+func TestAddAlertsToQueueExceedingCapacity(t *testing.T) {
+ alert1 := &Alert{Labels: labels.FromStrings("alertname", "alert1")}
+ alert2 := &Alert{Labels: labels.FromStrings("alertname", "alert2")}
+
+ s := newSendLoop("http://foo.bar/", nil, nil, &Options{QueueCapacity: 3}, slog.New(slog.DiscardHandler), newAlertMetrics(prometheus.NewRegistry(), nil))
+ s.add(alert1, alert2)
+
+ alert3 := &Alert{Labels: labels.FromStrings("alertname", "alert3")}
+ alert4 := &Alert{Labels: labels.FromStrings("alertname", "alert4")}
+
+ // Add new alerts to queue, expect 1 dropped
+ s.add(alert3, alert4)
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(s.metrics.dropped.WithLabelValues(s.alertmanagerURL)))
+
+ // Verify all new alerts were added to the queue
+ require.Equal(t, []*Alert{alert2, alert3, alert4}, s.queue)
+}
+
+func TestAddAlertsToQueueExceedingTotalCapacity(t *testing.T) {
+ alert1 := &Alert{Labels: labels.FromStrings("alertname", "alert1")}
+ alert2 := &Alert{Labels: labels.FromStrings("alertname", "alert2")}
+
+ s := newSendLoop("http://foo.bar/", nil, nil, &Options{QueueCapacity: 3}, slog.New(slog.DiscardHandler), newAlertMetrics(prometheus.NewRegistry(), nil))
+ s.add(alert1, alert2)
+
+ alert3 := &Alert{Labels: labels.FromStrings("alertname", "alert3")}
+ alert4 := &Alert{Labels: labels.FromStrings("alertname", "alert4")}
+ alert5 := &Alert{Labels: labels.FromStrings("alertname", "alert5")}
+ alert6 := &Alert{Labels: labels.FromStrings("alertname", "alert6")}
+
+ // Add new alerts to queue, expect 3 dropped: 1 from new batch + 2 from existing queued items
+ s.add(alert3, alert4, alert5, alert6)
+ require.Equal(t, 3.0, prom_testutil.ToFloat64(s.metrics.dropped.WithLabelValues(s.alertmanagerURL)))
+
+ // Verify all new alerts were added to the queue
+ require.Equal(t, []*Alert{alert4, alert5, alert6}, s.queue)
+}
+
+func TestNextBatchAlertsFromQueue(t *testing.T) {
+ s := newSendLoop("http://foo.bar/", nil, nil, &Options{QueueCapacity: 5, MaxBatchSize: 3}, slog.New(slog.DiscardHandler), newAlertMetrics(prometheus.NewRegistry(), nil))
+
+ alert1 := &Alert{Labels: labels.FromStrings("alertname", "alert1")}
+ alert2 := &Alert{Labels: labels.FromStrings("alertname", "alert2")}
+ alert3 := &Alert{Labels: labels.FromStrings("alertname", "alert3")}
+ s.add(alert1, alert2, alert3)
+
+ // Test batch-size alerts in the queue
+ require.Equal(t, []*Alert{alert1, alert2, alert3}, s.nextBatch())
+ require.Empty(t, s.nextBatch())
+
+ // Test full queue
+ alert4 := &Alert{Labels: labels.FromStrings("alertname", "alert4")}
+ alert5 := &Alert{Labels: labels.FromStrings("alertname", "alert5")}
+ s.add(alert1, alert2, alert3, alert4, alert5)
+ require.Equal(t, []*Alert{alert1, alert2, alert3}, s.nextBatch())
+ require.Equal(t, []*Alert{alert4, alert5}, s.nextBatch())
+ require.Empty(t, s.nextBatch())
+}
+
+func TestMetrics(t *testing.T) {
+ const alertmanagerURL = "http://alertmanager:9093"
+
+ // Use a single registry throughout the test - this is critical to catch registry conflicts
+ reg := prometheus.NewRegistry()
+ alertmanagersDiscoveredFunc := func() float64 { return 0 }
+ metrics := newAlertMetrics(reg, alertmanagersDiscoveredFunc)
+
+ logger := slog.New(slog.DiscardHandler)
+ opts := &Options{QueueCapacity: 10, MaxBatchSize: DefaultMaxBatchSize}
+
+ // Create first sendLoop - this initializes metrics with the alertmanager URL label
+ sendLoop1 := newSendLoop(alertmanagerURL, nil, &config.DefaultAlertmanagerConfig, opts, logger, metrics)
+
+ // Verify metrics are initialized
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(metrics.dropped.WithLabelValues(alertmanagerURL)))
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(metrics.sent.WithLabelValues(alertmanagerURL)))
+
+ // Stop the sendLoop - this should clean up all metrics
+ sendLoop1.stop()
+
+ // Create second sendLoop with the same URL - this should NOT panic or conflict
+ // because metrics were properly cleaned up
+ sendLoop2 := newSendLoop(alertmanagerURL, nil, &config.DefaultAlertmanagerConfig, opts, logger, metrics)
+ defer sendLoop2.stop()
+
+ // Verify metrics are re-initialized correctly
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(metrics.dropped.WithLabelValues(alertmanagerURL)))
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(metrics.sent.WithLabelValues(alertmanagerURL)))
+}
diff --git a/notifier/util_test.go b/notifier/util_test.go
index a9f0509ba1..78f45ba85c 100644
--- a/notifier/util_test.go
+++ b/notifier/util_test.go
@@ -15,6 +15,7 @@ package notifier
import (
"testing"
+ "time"
"github.com/prometheus/alertmanager/api/v2/models"
"github.com/stretchr/testify/require"
@@ -25,3 +26,99 @@ import (
func TestLabelsToOpenAPILabelSet(t *testing.T) {
require.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.FromStrings("aaa", "111", "bbb", "222")))
}
+
+// Edge case tests for utility functions
+
+func TestLabelsToOpenAPILabelSetEmpty(t *testing.T) {
+ result := labelsToOpenAPILabelSet(labels.EmptyLabels())
+ require.Empty(t, result)
+}
+
+func TestLabelsToOpenAPILabelSetSpecialCharacters(t *testing.T) {
+ result := labelsToOpenAPILabelSet(labels.FromStrings(
+ "special/chars", "value with spaces",
+ "unicode", "αβγ",
+ "empty", "",
+ ))
+
+ expected := models.LabelSet{
+ "special/chars": "value with spaces",
+ "unicode": "αβγ",
+ "empty": "",
+ }
+ require.Equal(t, expected, result)
+}
+
+func TestAlertsToOpenAPIAlertsEmpty(t *testing.T) {
+ result := alertsToOpenAPIAlerts([]*Alert{})
+ require.Empty(t, result)
+}
+
+func TestAlertsToOpenAPIAlertsNil(t *testing.T) {
+ result := alertsToOpenAPIAlerts(nil)
+ require.Empty(t, result)
+}
+
+func TestAlertsToOpenAPIAlertsSingle(t *testing.T) {
+ now := time.Now()
+ alert := &Alert{
+ Labels: labels.FromStrings("alertname", "test", "severity", "critical"),
+ Annotations: labels.FromStrings("summary", "Test alert"),
+ StartsAt: now,
+ EndsAt: now.Add(time.Hour),
+ GeneratorURL: "http://prometheus:9090/graph",
+ }
+
+ result := alertsToOpenAPIAlerts([]*Alert{alert})
+ require.Len(t, result, 1)
+
+ apiAlert := result[0]
+ require.Equal(t, "test", apiAlert.Labels["alertname"])
+ require.Equal(t, "critical", apiAlert.Labels["severity"])
+ require.Equal(t, "Test alert", apiAlert.Annotations["summary"])
+ require.Equal(t, "http://prometheus:9090/graph", string(apiAlert.GeneratorURL))
+}
+
+func TestAlertsToOpenAPIAlertsMultiple(t *testing.T) {
+ now := time.Now()
+ alerts := []*Alert{
+ {
+ Labels: labels.FromStrings("alertname", "alert1"),
+ Annotations: labels.FromStrings("desc", "First alert"),
+ StartsAt: now,
+ EndsAt: now.Add(time.Hour),
+ },
+ {
+ Labels: labels.FromStrings("alertname", "alert2"),
+ Annotations: labels.FromStrings("desc", "Second alert"),
+ StartsAt: now.Add(time.Minute),
+ EndsAt: now.Add(2 * time.Hour),
+ },
+ }
+
+ result := alertsToOpenAPIAlerts(alerts)
+ require.Len(t, result, 2)
+
+ require.Equal(t, "alert1", result[0].Labels["alertname"])
+ require.Equal(t, "alert2", result[1].Labels["alertname"])
+ require.Equal(t, "First alert", result[0].Annotations["desc"])
+ require.Equal(t, "Second alert", result[1].Annotations["desc"])
+}
+
+func TestAlertsToOpenAPIAlertsEmptyFields(t *testing.T) {
+ alert := &Alert{
+ Labels: labels.EmptyLabels(),
+ Annotations: labels.EmptyLabels(),
+ StartsAt: time.Time{},
+ EndsAt: time.Time{},
+ GeneratorURL: "",
+ }
+
+ result := alertsToOpenAPIAlerts([]*Alert{alert})
+ require.Len(t, result, 1)
+
+ apiAlert := result[0]
+ require.Empty(t, apiAlert.Labels)
+ require.Empty(t, apiAlert.Annotations)
+ require.Empty(t, string(apiAlert.GeneratorURL))
+}
diff --git a/promql/bench_test.go b/promql/bench_test.go
index f647b03600..9f0de52ec8 100644
--- a/promql/bench_test.go
+++ b/promql/bench_test.go
@@ -36,6 +36,8 @@ import (
"github.com/prometheus/prometheus/util/teststorage"
)
+var testParser = parser.NewParser(parser.Options{})
+
func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, interval, numIntervals int) error {
ctx := context.Background()
@@ -332,18 +334,15 @@ func rangeQueryCases() []benchCase {
}
func BenchmarkRangeQuery(b *testing.B) {
- parser.EnableExtendedRangeSelectors = true
- b.Cleanup(func() {
- parser.EnableExtendedRangeSelectors = false
- })
stor := teststorage.New(b)
stor.DisableCompactions() // Don't want auto-compaction disrupting timings.
- defer stor.Close()
+
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 50000000,
Timeout: 100 * time.Second,
+ Parser: parser.NewParser(parser.Options{EnableExtendedRangeSelectors: true, EnableExperimentalFunctions: true}),
}
engine := promqltest.NewTestEngineWithOpts(b, opts)
@@ -383,7 +382,6 @@ func BenchmarkRangeQuery(b *testing.B) {
func BenchmarkJoinQuery(b *testing.B) {
stor := teststorage.New(b)
stor.DisableCompactions() // Don't want auto-compaction disrupting timings.
- defer stor.Close()
opts := promql.EngineOpts{
Logger: nil,
@@ -393,40 +391,44 @@ func BenchmarkJoinQuery(b *testing.B) {
}
engine := promqltest.NewTestEngineWithOpts(b, opts)
- const interval = 10000 // 10s interval.
+ const (
+ interval = 10000 // 10s interval.
+ steps = 5000
+ numInstances = 1000
+ )
- // A day of data plus 10k steps.
- numIntervals := 8640 + 10000
+ // A day of data plus steps.
+ numIntervals := 8640 + steps
- require.NoError(b, setupJoinQueryTestData(stor, engine, interval, numIntervals, 1000))
+ require.NoError(b, setupJoinQueryTestData(stor, engine, interval, numIntervals, numInstances))
for _, c := range []benchCase{
{
expr: `rpc_request_success_total + rpc_request_error_total`,
- steps: 10000,
+ steps: steps,
},
{
expr: `rpc_request_success_total + ON (job, instance) GROUP_LEFT rpc_request_error_total`,
- steps: 10000,
+ steps: steps,
},
{
expr: `rpc_request_success_total AND rpc_request_error_total{instance=~"0.*"}`, // 0.* keeps 1/16 of UUID values
- steps: 10000,
+ steps: steps,
},
{
expr: `rpc_request_success_total OR rpc_request_error_total{instance=~"0.*"}`, // 0.* keeps 1/16 of UUID values
- steps: 10000,
+ steps: steps,
},
{
expr: `rpc_request_success_total UNLESS rpc_request_error_total{instance=~"0.*"}`, // 0.* keeps 1/16 of UUID values
- steps: 10000,
+ steps: steps,
},
} {
name := fmt.Sprintf("expr=%s/steps=%d", c.expr, c.steps)
b.Run(name, func(b *testing.B) {
ctx := context.Background()
- b.ReportAllocs()
- for b.Loop() {
+
+ queryFn := func() {
qry, err := engine.NewRangeQuery(
ctx, stor, nil, c.expr,
timestamp.Time(int64((numIntervals-c.steps)*10_000)),
@@ -439,13 +441,20 @@ func BenchmarkJoinQuery(b *testing.B) {
qry.Close()
}
+
+ queryFn() // Warm up run.
+
+ b.ResetTimer()
+ b.ReportAllocs()
+ for b.Loop() {
+ queryFn()
+ }
})
}
}
func BenchmarkNativeHistograms(b *testing.B) {
testStorage := teststorage.New(b)
- defer testStorage.Close()
app := testStorage.Appender(context.TODO())
if err := generateNativeHistogramSeries(app, 3000); err != nil {
@@ -523,7 +532,6 @@ func BenchmarkNativeHistograms(b *testing.B) {
func BenchmarkNativeHistogramsCustomBuckets(b *testing.B) {
testStorage := teststorage.New(b)
- defer testStorage.Close()
app := testStorage.Appender(context.TODO())
if err := generateNativeHistogramCustomBucketsSeries(app, 3000); err != nil {
@@ -594,7 +602,6 @@ func BenchmarkNativeHistogramsCustomBuckets(b *testing.B) {
func BenchmarkInfoFunction(b *testing.B) {
// Initialize test storage and generate test series data.
testStorage := teststorage.New(b)
- defer testStorage.Close()
start := time.Unix(0, 0)
end := start.Add(2 * time.Hour)
@@ -636,6 +643,7 @@ func BenchmarkInfoFunction(b *testing.B) {
Timeout: 100 * time.Second,
EnableAtModifier: true,
EnableNegativeOffset: true,
+ Parser: parser.NewParser(parser.Options{EnableExperimentalFunctions: true}),
}
engine := promql.NewEngine(opts)
b.Run(tc.name, func(b *testing.B) {
@@ -796,13 +804,13 @@ func BenchmarkParser(b *testing.B) {
b.Run(c, func(b *testing.B) {
b.ReportAllocs()
for b.Loop() {
- parser.ParseExpr(c)
+ testParser.ParseExpr(c)
}
})
}
for _, c := range cases {
b.Run("preprocess "+c, func(b *testing.B) {
- expr, _ := parser.ParseExpr(c)
+ expr, _ := testParser.ParseExpr(c)
start, end := time.Now().Add(-time.Hour), time.Now()
for b.Loop() {
promql.PreprocessExpr(expr, start, end, 0)
@@ -814,7 +822,7 @@ func BenchmarkParser(b *testing.B) {
b.Run(name, func(b *testing.B) {
b.ReportAllocs()
for b.Loop() {
- parser.ParseExpr(c)
+ testParser.ParseExpr(c)
}
})
}
diff --git a/promql/durations_test.go b/promql/durations_test.go
index e9759af0dd..103c068dc1 100644
--- a/promql/durations_test.go
+++ b/promql/durations_test.go
@@ -23,11 +23,7 @@ import (
)
func TestDurationVisitor(t *testing.T) {
- // Enable experimental duration expression parsing.
- parser.ExperimentalDurationExpr = true
- t.Cleanup(func() {
- parser.ExperimentalDurationExpr = false
- })
+ p := parser.NewParser(parser.Options{ExperimentalDurationExpr: true})
complexExpr := `sum_over_time(
rate(metric[5m] offset 1h)[10m:30s] offset 2h
) +
@@ -38,7 +34,7 @@ func TestDurationVisitor(t *testing.T) {
metric[2h * 0.5]
)`
- expr, err := parser.ParseExpr(complexExpr)
+ expr, err := p.ParseExpr(complexExpr)
require.NoError(t, err)
err = parser.Walk(&durationVisitor{}, expr, nil)
diff --git a/promql/engine.go b/promql/engine.go
index 57a1f41bb8..eb41e40605 100644
--- a/promql/engine.go
+++ b/promql/engine.go
@@ -50,6 +50,7 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/features"
+ "github.com/prometheus/prometheus/util/kahansum"
"github.com/prometheus/prometheus/util/logging"
"github.com/prometheus/prometheus/util/stats"
"github.com/prometheus/prometheus/util/zeropool"
@@ -334,6 +335,9 @@ type EngineOpts struct {
// FeatureRegistry is the registry for tracking enabled/disabled features.
FeatureRegistry features.Collector
+
+ // Parser is the PromQL parser instance used for parsing expressions.
+ Parser parser.Parser
}
// Engine handles the lifetime of queries from beginning to end.
@@ -353,6 +357,7 @@ type Engine struct {
enablePerStepStats bool
enableDelayedNameRemoval bool
enableTypeAndUnitLabels bool
+ parser parser.Parser
}
// NewEngine returns a new engine.
@@ -431,6 +436,10 @@ func NewEngine(opts EngineOpts) *Engine {
metrics.maxConcurrentQueries.Set(-1)
}
+ if opts.Parser == nil {
+ opts.Parser = parser.NewParser(parser.Options{})
+ }
+
if opts.LookbackDelta == 0 {
opts.LookbackDelta = defaultLookbackDelta
if l := opts.Logger; l != nil {
@@ -459,7 +468,9 @@ func NewEngine(opts EngineOpts) *Engine {
r.Enable(features.PromQL, "per_query_lookback_delta")
r.Enable(features.PromQL, "subqueries")
- parser.RegisterFeatures(r)
+ if opts.Parser != nil {
+ opts.Parser.RegisterFeatures(r)
+ }
}
return &Engine{
@@ -475,6 +486,7 @@ func NewEngine(opts EngineOpts) *Engine {
enablePerStepStats: opts.EnablePerStepStats,
enableDelayedNameRemoval: opts.EnableDelayedNameRemoval,
enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels,
+ parser: opts.Parser,
}
}
@@ -523,7 +535,7 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts
return nil, err
}
defer finishQueue()
- expr, err := parser.ParseExpr(qs)
+ expr, err := ng.parser.ParseExpr(qs)
if err != nil {
return nil, err
}
@@ -544,7 +556,7 @@ func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts Q
return nil, err
}
defer finishQueue()
- expr, err := parser.ParseExpr(qs)
+ expr, err := ng.parser.ParseExpr(qs)
if err != nil {
return nil, err
}
@@ -1667,7 +1679,7 @@ func (ev *evaluator) smoothSeries(series []storage.Series, offset time.Duration)
// Interpolate between prev and next.
// TODO: detect if the sample is a counter, based on __type__ or metadata.
prev, next := floats[i-1], floats[i]
- val := interpolate(prev, next, ts, false, false)
+ val := interpolate(prev, next, ts, false)
ss.Floats = append(ss.Floats, FPoint{F: val, T: ts})
case i > 0:
@@ -2862,7 +2874,8 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
if matching.Card == parser.CardManyToMany {
panic("many-to-many only allowed for set operators")
}
- if len(lhs) == 0 || len(rhs) == 0 {
+ if (len(lhs) == 0 && len(rhs) == 0) ||
+ ((len(lhs) == 0 || len(rhs) == 0) && matching.FillValues.RHS == nil && matching.FillValues.LHS == nil) {
return nil, nil // Short-circuit: nothing is going to match.
}
@@ -2910,17 +2923,9 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
}
matchedSigs := enh.matchedSigs
- // For all lhs samples find a respective rhs sample and perform
- // the binary operation.
var lastErr error
- for i, ls := range lhs {
- sigOrd := lhsh[i].sigOrdinal
-
- rs, found := rightSigs[sigOrd] // Look for a match in the rhs Vector.
- if !found {
- continue
- }
+ doBinOp := func(ls, rs Sample, sigOrd int) {
// Account for potentially swapped sidedness.
fl, fr := ls.F, rs.F
hl, hr := ls.H, rs.H
@@ -2931,7 +2936,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
floatValue, histogramValue, keep, info, err := vectorElemBinop(op, fl, fr, hl, hr, pos)
if err != nil {
lastErr = err
- continue
+ return
}
if info != nil {
lastErr = info
@@ -2971,7 +2976,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
}
if !keep && !returnBool {
- continue
+ return
}
enh.Out = append(enh.Out, Sample{
@@ -2981,6 +2986,43 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
DropName: returnBool,
})
}
+
+ // For all lhs samples, find a respective rhs sample and perform
+ // the binary operation.
+ for i, ls := range lhs {
+ sigOrd := lhsh[i].sigOrdinal
+
+ rs, found := rightSigs[sigOrd] // Look for a match in the rhs Vector.
+ if !found {
+ fill := matching.FillValues.RHS
+ if fill == nil {
+ continue
+ }
+ rs = Sample{
+ Metric: ls.Metric.MatchLabels(matching.On, matching.MatchingLabels...),
+ F: *fill,
+ }
+ }
+
+ doBinOp(ls, rs, sigOrd)
+ }
+
+ // For any rhs samples which have not been matched, check if we need to
+ // perform the operation with a fill value from the lhs.
+ if fill := matching.FillValues.LHS; fill != nil {
+ for sigOrd, rs := range rightSigs {
+ if _, matched := matchedSigs[sigOrd]; matched {
+ continue // Already matched.
+ }
+ ls := Sample{
+ Metric: rs.Metric.MatchLabels(matching.On, matching.MatchingLabels...),
+ F: *fill,
+ }
+
+ doBinOp(ls, rs, sigOrd)
+ }
+ }
+
return enh.Out, lastErr
}
@@ -3209,23 +3251,26 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
}
type groupedAggregation struct {
- floatValue float64
- histogramValue *histogram.FloatHistogram
- floatMean float64
- floatKahanC float64 // "Compensating value" for Kahan summation.
- groupCount float64
- heap vectorByValueHeap
+ floatValue float64
+ floatMean float64
+ floatKahanC float64 // Compensation float for Kahan summation.
+ histogramValue *histogram.FloatHistogram
+ histogramMean *histogram.FloatHistogram
+ histogramKahanC *histogram.FloatHistogram // Compensation histogram for Kahan summation.
+ groupCount float64
+ heap vectorByValueHeap
// All bools together for better packing within the struct.
- seen bool // Was this output groups seen in the input at this timestamp.
- hasFloat bool // Has at least 1 float64 sample aggregated.
- hasHistogram bool // Has at least 1 histogram sample aggregated.
- incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets.
- groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group.
- incrementalMean bool // True after reverting to incremental calculation of the mean value.
- counterResetSeen bool // Counter reset hint CounterReset seen. Currently only used for histogram samples.
- notCounterResetSeen bool // Counter reset hint NotCounterReset seen. Currently only used for histogram samples.
- dropName bool // True if any sample in this group has DropName set.
+ seen bool // Was this output groups seen in the input at this timestamp.
+ hasFloat bool // Has at least 1 float64 sample aggregated.
+ hasHistogram bool // Has at least 1 histogram sample aggregated.
+ incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets.
+ groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group.
+ floatIncrementalMean bool // True after reverting to incremental calculation for float-based mean value.
+ histogramIncrementalMean bool // True after reverting to incremental calculation for histogram-based mean value.
+ counterResetSeen bool // Counter reset hint CounterReset seen. Currently only used for histogram samples.
+ notCounterResetSeen bool // Counter reset hint NotCounterReset seen. Currently only used for histogram samples.
+ dropName bool // True if any sample in this group has DropName set.
}
// aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix.
@@ -3315,6 +3360,11 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
group.dropName = true
}
+ var (
+ nhcbBoundsReconciled bool
+ err error
+ )
+
switch op {
case parser.SUM:
if h != nil {
@@ -3326,7 +3376,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
case histogram.NotCounterReset:
group.notCounterResetSeen = true
}
- _, _, nhcbBoundsReconciled, err := group.histogramValue.Add(h)
+ group.histogramKahanC, _, nhcbBoundsReconciled, err = group.histogramValue.KahanAdd(h, group.histogramKahanC)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
@@ -3340,18 +3390,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// point in copying the histogram in that case.
} else {
group.hasFloat = true
- group.floatValue, group.floatKahanC = kahanSumInc(f, group.floatValue, group.floatKahanC)
+ group.floatValue, group.floatKahanC = kahansum.Inc(f, group.floatValue, group.floatKahanC)
}
case parser.AVG:
- // For the average calculation of histograms, we use
- // incremental mean calculation without the help of
- // Kahan summation (but this should change, see
- // https://github.com/prometheus/prometheus/issues/14105
- // ). For floats, we improve the accuracy with the help
- // of Kahan summation. For a while, we assumed that
- // incremental mean calculation combined with Kahan
- // summation (see
+ // We improve the accuracy with the help of Kahan summation.
+ // For a while, we assumed that incremental mean calculation
+ // combined with Kahan summation (see
// https://stackoverflow.com/questions/61665473/is-it-beneficial-for-precision-to-calculate-the-incremental-mean-average
// for inspiration) is generally the preferred solution.
// However, it then turned out that direct mean
@@ -3386,20 +3431,37 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
case histogram.NotCounterReset:
group.notCounterResetSeen = true
}
- left := h.Copy().Div(group.groupCount)
- right := group.histogramValue.Copy().Div(group.groupCount)
-
- toAdd, _, nhcbBoundsReconciled, err := left.Sub(right)
- if err != nil {
- handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
- group.incompatibleHistograms = true
- continue
+ if !group.histogramIncrementalMean {
+ v := group.histogramValue.Copy()
+ var c *histogram.FloatHistogram
+ if group.histogramKahanC != nil {
+ c = group.histogramKahanC.Copy()
+ }
+ c, _, nhcbBoundsReconciled, err = v.KahanAdd(h, c)
+ if err != nil {
+ handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
+ group.incompatibleHistograms = true
+ continue
+ }
+ if nhcbBoundsReconciled {
+ annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(e.Expr.PositionRange(), annotations.HistogramAgg))
+ }
+ if !v.HasOverflow() {
+ group.histogramValue, group.histogramKahanC = v, c
+ break
+ }
+ group.histogramIncrementalMean = true
+ group.histogramMean = group.histogramValue.Copy().Div(group.groupCount - 1)
+ if group.histogramKahanC != nil {
+ group.histogramKahanC.Div(group.groupCount - 1)
+ }
}
- if nhcbBoundsReconciled {
- annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(e.Expr.PositionRange(), annotations.HistogramAgg))
+ q := (group.groupCount - 1) / group.groupCount
+ if group.histogramKahanC != nil {
+ group.histogramKahanC.Mul(q)
}
-
- _, _, nhcbBoundsReconciled, err = group.histogramValue.Add(toAdd)
+ toAdd := h.Copy().Div(group.groupCount)
+ group.histogramKahanC, _, nhcbBoundsReconciled, err = group.histogramMean.Mul(q).KahanAdd(toAdd, group.histogramKahanC)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
@@ -3414,8 +3476,8 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// point in copying the histogram in that case.
} else {
group.hasFloat = true
- if !group.incrementalMean {
- newV, newC := kahanSumInc(f, group.floatValue, group.floatKahanC)
+ if !group.floatIncrementalMean {
+ newV, newC := kahansum.Inc(f, group.floatValue, group.floatKahanC)
if !math.IsInf(newV, 0) {
// The sum doesn't overflow, so we propagate it to the
// group struct and continue with the regular
@@ -3426,12 +3488,12 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// If we are here, we know that the sum _would_ overflow. So
// instead of continue to sum up, we revert to incremental
// calculation of the mean value from here on.
- group.incrementalMean = true
+ group.floatIncrementalMean = true
group.floatMean = group.floatValue / (group.groupCount - 1)
group.floatKahanC /= group.groupCount - 1
}
q := (group.groupCount - 1) / group.groupCount
- group.floatMean, group.floatKahanC = kahanSumInc(
+ group.floatMean, group.floatKahanC = kahansum.Inc(
f/group.groupCount,
q*group.floatMean,
q*group.floatKahanC,
@@ -3506,8 +3568,24 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
case aggr.incompatibleHistograms:
continue
case aggr.hasHistogram:
+ if aggr.histogramIncrementalMean {
+ if aggr.histogramKahanC != nil {
+ aggr.histogramValue, _, _, _ = aggr.histogramMean.Add(aggr.histogramKahanC)
+ // Add can theoretically return ErrHistogramsIncompatibleSchema, but at
+ // this stage errors should not occur if earlier KahanAdd calls succeeded.
+ } else {
+ aggr.histogramValue = aggr.histogramMean
+ }
+ } else {
+ aggr.histogramValue.Div(aggr.groupCount)
+ if aggr.histogramKahanC != nil {
+ aggr.histogramValue, _, _, _ = aggr.histogramValue.Add(aggr.histogramKahanC.Div(aggr.groupCount))
+ // Add can theoretically return ErrHistogramsIncompatibleSchema, but at
+ // this stage errors should not occur if earlier KahanAdd calls succeeded.
+ }
+ }
aggr.histogramValue = aggr.histogramValue.Compact(0)
- case aggr.incrementalMean:
+ case aggr.floatIncrementalMean:
aggr.floatValue = aggr.floatMean + aggr.floatKahanC
default:
aggr.floatValue = aggr.floatValue/aggr.groupCount + aggr.floatKahanC/aggr.groupCount
@@ -3535,6 +3613,11 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
case aggr.incompatibleHistograms:
continue
case aggr.hasHistogram:
+ if aggr.histogramKahanC != nil {
+ aggr.histogramValue, _, _, _ = aggr.histogramValue.Add(aggr.histogramKahanC)
+ // Add can theoretically return ErrHistogramsIncompatibleSchema, but at
+ // this stage errors should not occur if earlier KahanAdd calls succeeded.
+ }
aggr.histogramValue.Compact(0)
default:
aggr.floatValue += aggr.floatKahanC
diff --git a/promql/engine_internal_test.go b/promql/engine_internal_test.go
index f040f53e61..27bf5503f4 100644
--- a/promql/engine_internal_test.go
+++ b/promql/engine_internal_test.go
@@ -27,12 +27,14 @@ import (
"github.com/prometheus/prometheus/util/annotations"
)
+var testParser = parser.NewParser(parser.Options{})
+
func TestRecoverEvaluatorRuntime(t *testing.T) {
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
ev := &evaluator{logger: logger}
- expr, _ := parser.ParseExpr("sum(up)")
+ expr, _ := testParser.ParseExpr("sum(up)")
var err error
diff --git a/promql/engine_test.go b/promql/engine_test.go
index 7b7a67a54b..f911419c62 100644
--- a/promql/engine_test.go
+++ b/promql/engine_test.go
@@ -52,8 +52,6 @@ const (
)
func TestMain(m *testing.M) {
- // Enable experimental functions testing
- parser.EnableExperimentalFunctions = true
testutil.TolerantVerifyLeak(m)
}
@@ -676,7 +674,6 @@ func TestEngineEvalStmtTimestamps(t *testing.T) {
load 10s
metric 1 2
`)
- t.Cleanup(func() { storage.Close() })
cases := []struct {
Query string
@@ -789,7 +786,6 @@ load 10s
metricWith3SampleEvery10Seconds{a="3",b="2"} 1+1x100
metricWith1HistogramEvery10Seconds {{schema:1 count:5 sum:20 buckets:[1 2 1 1]}}+{{schema:1 count:10 sum:5 buckets:[1 2 3 4]}}x100
`)
- t.Cleanup(func() { storage.Close() })
cases := []struct {
Query string
@@ -1339,7 +1335,6 @@ load 10s
bigmetric{a="1"} 1+1x100
bigmetric{a="2"} 1+1x100
`)
- t.Cleanup(func() { storage.Close() })
// These test cases should be touching the limit exactly (hence no exceeding).
// Exceeding the limit will be tested by doing -1 to the MaxSamples.
@@ -1511,11 +1506,6 @@ load 10s
}
func TestExtendedRangeSelectors(t *testing.T) {
- parser.EnableExtendedRangeSelectors = true
- t.Cleanup(func() {
- parser.EnableExtendedRangeSelectors = false
- })
-
engine := newTestEngine(t)
storage := promqltest.LoadedStorage(t, `
load 10s
@@ -1523,7 +1513,6 @@ func TestExtendedRangeSelectors(t *testing.T) {
withreset 1+1x4 1+1x5
notregular 0 5 100 2 8
`)
- t.Cleanup(func() { storage.Close() })
tc := []struct {
query string
@@ -1664,6 +1653,40 @@ func TestExtendedRangeSelectors(t *testing.T) {
}
}
+// TestParserConfigIsolation ensures the engine's parser configuration is respected.
+func TestParserConfigIsolation(t *testing.T) {
+ ctx := context.Background()
+ storage := promqltest.LoadedStorage(t, `
+ load 10s
+ metric 1+1x10
+ `)
+ t.Cleanup(func() { storage.Close() })
+
+ query := "metric[10s] smoothed"
+ t.Run("engine_with_feature_disabled_rejects", func(t *testing.T) {
+ engine := promql.NewEngine(promql.EngineOpts{
+ MaxSamples: 1000, Timeout: 10 * time.Second,
+ Parser: parser.NewParser(parser.Options{EnableExtendedRangeSelectors: false}),
+ })
+ t.Cleanup(func() { _ = engine.Close() })
+ _, err := engine.NewInstantQuery(ctx, storage, nil, query, time.Unix(10, 0))
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "parse")
+ })
+ t.Run("engine_with_feature_enabled_accepts", func(t *testing.T) {
+ engine := promql.NewEngine(promql.EngineOpts{
+ MaxSamples: 1000, Timeout: 10 * time.Second,
+ Parser: parser.NewParser(parser.Options{EnableExtendedRangeSelectors: true}),
+ })
+ t.Cleanup(func() { _ = engine.Close() })
+ q, err := engine.NewInstantQuery(ctx, storage, nil, query, time.Unix(10, 0))
+ require.NoError(t, err)
+ defer q.Close()
+ res := q.Exec(ctx)
+ require.NoError(t, res.Err)
+ })
+}
+
func TestAtModifier(t *testing.T) {
engine := newTestEngine(t)
storage := promqltest.LoadedStorage(t, `
@@ -1677,7 +1700,6 @@ load 10s
load 1ms
metric_ms 0+1x10000
`)
- t.Cleanup(func() { storage.Close() })
lbls1 := labels.FromStrings("__name__", "metric", "job", "1")
lbls2 := labels.FromStrings("__name__", "metric", "job", "2")
@@ -2283,7 +2305,6 @@ func TestSubquerySelector(t *testing.T) {
t.Run("", func(t *testing.T) {
engine := newTestEngine(t)
storage := promqltest.LoadedStorage(t, tst.loadString)
- t.Cleanup(func() { storage.Close() })
for _, c := range tst.cases {
t.Run(c.Query, func(t *testing.T) {
@@ -3239,7 +3260,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
for _, test := range testCases {
t.Run(test.input, func(t *testing.T) {
- expr, err := parser.ParseExpr(test.input)
+ expr, err := testParser.ParseExpr(test.input)
require.NoError(t, err)
expr, err = promql.PreprocessExpr(expr, startTime, endTime, 0)
require.NoError(t, err)
@@ -3410,7 +3431,6 @@ metric 0 1 2
t.Run(c.name, func(t *testing.T) {
engine := promqltest.NewTestEngine(t, false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery)
storage := promqltest.LoadedStorage(t, load)
- t.Cleanup(func() { storage.Close() })
opts := promql.NewPrometheusQueryOpts(false, c.queryLookback)
qry, err := engine.NewInstantQuery(context.Background(), storage, opts, query, c.ts)
@@ -3444,7 +3464,7 @@ func TestHistogramCopyFromIteratorRegression(t *testing.T) {
histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:1 count:1 buckets:[1]}}
`
storage := promqltest.LoadedStorage(t, load)
- t.Cleanup(func() { storage.Close() })
+
engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery)
verify := func(t *testing.T, qry promql.Query, expected []histogram.FloatHistogram) {
@@ -3747,12 +3767,12 @@ func TestHistogramRateWithFloatStaleness(t *testing.T) {
recoded bool
)
- newc, recoded, app, err = app.AppendHistogram(nil, 0, h1.Copy(), false)
+ newc, recoded, app, err = app.AppendHistogram(nil, 0, 0, h1.Copy(), false)
require.NoError(t, err)
require.False(t, recoded)
require.Nil(t, newc)
- newc, recoded, _, err = app.AppendHistogram(nil, 10, h1.Copy(), false)
+ newc, recoded, _, err = app.AppendHistogram(nil, 0, 10, h1.Copy(), false)
require.NoError(t, err)
require.False(t, recoded)
require.Nil(t, newc)
@@ -3762,7 +3782,7 @@ func TestHistogramRateWithFloatStaleness(t *testing.T) {
app, err = c2.Appender()
require.NoError(t, err)
- app.Append(20, math.Float64frombits(value.StaleNaN))
+ app.Append(0, 20, math.Float64frombits(value.StaleNaN))
// Make a chunk with two normal histograms that have zero value.
h2 := histogram.Histogram{
@@ -3773,12 +3793,12 @@ func TestHistogramRateWithFloatStaleness(t *testing.T) {
app, err = c3.Appender()
require.NoError(t, err)
- newc, recoded, app, err = app.AppendHistogram(nil, 30, h2.Copy(), false)
+ newc, recoded, app, err = app.AppendHistogram(nil, 0, 30, h2.Copy(), false)
require.NoError(t, err)
require.False(t, recoded)
require.Nil(t, newc)
- newc, recoded, _, err = app.AppendHistogram(nil, 40, h2.Copy(), false)
+ newc, recoded, _, err = app.AppendHistogram(nil, 0, 40, h2.Copy(), false)
require.NoError(t, err)
require.False(t, recoded)
require.Nil(t, newc)
@@ -3849,6 +3869,7 @@ func TestEvaluationWithDelayedNameRemovalDisabled(t *testing.T) {
MaxSamples: 10000,
Timeout: 10 * time.Second,
EnableDelayedNameRemoval: false,
+ Parser: parser.NewParser(promqltest.TestParserOpts),
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
diff --git a/promql/functions.go b/promql/functions.go
index 9c04392232..2cb90a9b6c 100644
--- a/promql/functions.go
+++ b/promql/functions.go
@@ -33,6 +33,7 @@ import (
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/schema"
"github.com/prometheus/prometheus/util/annotations"
+ "github.com/prometheus/prometheus/util/kahansum"
)
// FunctionCall is the type of a PromQL function implementation
@@ -70,7 +71,7 @@ func funcTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (
// it returns the interpolated value at the left boundary; otherwise, it returns the first sample's value.
func pickOrInterpolateLeft(floats []FPoint, first int, rangeStart int64, smoothed, isCounter bool) float64 {
if smoothed && floats[first].T < rangeStart {
- return interpolate(floats[first], floats[first+1], rangeStart, isCounter, true)
+ return interpolate(floats[first], floats[first+1], rangeStart, isCounter)
}
return floats[first].F
}
@@ -80,25 +81,20 @@ func pickOrInterpolateLeft(floats []FPoint, first int, rangeStart int64, smoothe
// it returns the interpolated value at the right boundary; otherwise, it returns the last sample's value.
func pickOrInterpolateRight(floats []FPoint, last int, rangeEnd int64, smoothed, isCounter bool) float64 {
if smoothed && last > 0 && floats[last].T > rangeEnd {
- return interpolate(floats[last-1], floats[last], rangeEnd, isCounter, false)
+ return interpolate(floats[last-1], floats[last], rangeEnd, isCounter)
}
return floats[last].F
}
// interpolate performs linear interpolation between two points.
-// If isCounter is true and there is a counter reset:
-// - on the left edge, it sets the value to 0.
-// - on the right edge, it adds the left value to the right value.
+// If isCounter is true and there is a counter reset, it models the counter
+// as starting from 0 (post-reset) by setting y1 to 0.
// It then calculates the interpolated value at the given timestamp.
-func interpolate(p1, p2 FPoint, t int64, isCounter, leftEdge bool) float64 {
+func interpolate(p1, p2 FPoint, t int64, isCounter bool) float64 {
y1 := p1.F
y2 := p2.F
if isCounter && y2 < y1 {
- if leftEdge {
- y1 = 0
- } else {
- y2 += y1
- }
+ y1 = 0
}
return y1 + (y2-y1)*float64(t-p1.T)/float64(p2.T-p1.T)
@@ -562,6 +558,9 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 {
// trend factor increases the influence. of trends. Algorithm taken from
// https://en.wikipedia.org/wiki/Exponential_smoothing .
func funcDoubleExponentialSmoothing(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(vectorVals) < 2 || len(vectorVals[0]) == 0 || len(vectorVals[1]) == 0 || len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
samples := matrixVal[0]
// The smoothing factor argument.
sf := vectorVals[0][0].F
@@ -776,12 +775,18 @@ func funcScalar(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNo
}
func aggrOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector {
+ if len(matrixVal) == 0 {
+ return enh.Out
+ }
el := matrixVal[0]
return append(enh.Out, Sample{F: aggrFn(el)})
}
func aggrHistOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
el := matrixVal[0]
res, err := aggrFn(el)
@@ -790,14 +795,14 @@ func aggrHistOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series)
// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
firstSeries := matrixVal[0]
if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 {
return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(firstSeries.Metric), args[0].PositionRange()))
}
- // For the average calculation of histograms, we use incremental mean
- // calculation without the help of Kahan summation (but this should
- // change, see https://github.com/prometheus/prometheus/issues/14105 ).
- // For floats, we improve the accuracy with the help of Kahan summation.
+ // We improve the accuracy with the help of Kahan summation.
// For a while, we assumed that incremental mean calculation combined
// with Kahan summation (see
// https://stackoverflow.com/questions/61665473/is-it-beneficial-for-precision-to-calculate-the-incremental-mean-average
@@ -840,23 +845,47 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
}
}()
- mean := s.Histograms[0].H.Copy()
- trackCounterReset(mean)
+ var (
+ sum = s.Histograms[0].H.Copy()
+ mean, kahanC *histogram.FloatHistogram
+ count = 1.
+ incrementalMean bool
+ nhcbBoundsReconciled bool
+ err error
+ )
+ trackCounterReset(sum)
for i, h := range s.Histograms[1:] {
trackCounterReset(h.H)
- count := float64(i + 2)
- left := h.H.Copy().Div(count)
- right := mean.Copy().Div(count)
-
- toAdd, _, nhcbBoundsReconciled, err := left.Sub(right)
- if err != nil {
- return mean, err
+ count = float64(i + 2)
+ if !incrementalMean {
+ sumCopy := sum.Copy()
+ var cCopy *histogram.FloatHistogram
+ if kahanC != nil {
+ cCopy = kahanC.Copy()
+ }
+ cCopy, _, nhcbBoundsReconciled, err = sumCopy.KahanAdd(h.H, cCopy)
+ if err != nil {
+ return sumCopy.Div(count), err
+ }
+ if nhcbBoundsReconciled {
+ nhcbBoundsReconciledSeen = true
+ }
+ if !sumCopy.HasOverflow() {
+ sum, kahanC = sumCopy, cCopy
+ continue
+ }
+ incrementalMean = true
+ mean = sum.Copy().Div(count - 1)
+ if kahanC != nil {
+ kahanC.Div(count - 1)
+ }
}
- if nhcbBoundsReconciled {
- nhcbBoundsReconciledSeen = true
+ q := (count - 1) / count
+ if kahanC != nil {
+ kahanC.Mul(q)
}
-
- _, _, nhcbBoundsReconciled, err = mean.Add(toAdd)
+ toAdd := h.H.Copy().Div(count)
+ kahanC, _, nhcbBoundsReconciled, err = mean.Mul(q).KahanAdd(toAdd, kahanC)
if err != nil {
return mean, err
}
@@ -864,7 +893,18 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
nhcbBoundsReconciledSeen = true
}
}
- return mean, nil
+ if incrementalMean {
+ if kahanC != nil {
+ _, _, _, err := mean.Add(kahanC)
+ return mean, err
+ }
+ return mean, nil
+ }
+ if kahanC != nil {
+ _, _, _, err := sum.Div(count).Add(kahanC.Div(count))
+ return sum, err
+ }
+ return sum.Div(count), nil
})
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
@@ -883,7 +923,7 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
for i, f := range s.Floats[1:] {
count = float64(i + 2)
if !incrementalMean {
- newSum, newC := kahanSumInc(f.F, sum, kahanC)
+ newSum, newC := kahansum.Inc(f.F, sum, kahanC)
// Perform regular mean calculation as long as
// the sum doesn't overflow.
if !math.IsInf(newSum, 0) {
@@ -897,7 +937,7 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
kahanC /= (count - 1)
}
q := (count - 1) / count
- mean, kahanC = kahanSumInc(f.F/count, q*mean, q*kahanC)
+ mean, kahanC = kahansum.Inc(f.F/count, q*mean, q*kahanC)
}
if incrementalMean {
return mean + kahanC
@@ -915,6 +955,9 @@ func funcCountOverTime(_ []Vector, matrixVals Matrix, _ parser.Expressions, enh
// === first_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcFirstOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
el := matrixVal[0]
var f FPoint
@@ -943,6 +986,9 @@ func funcFirstOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *
// === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
el := matrixVal[0]
var f FPoint
@@ -969,6 +1015,9 @@ func funcLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *E
// === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcMadOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
samples := matrixVal[0]
var annos annotations.Annotations
if len(samples.Floats) == 0 {
@@ -993,6 +1042,9 @@ func funcMadOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
// === ts_of_first_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcTsOfFirstOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
el := matrixVal[0]
var tf int64 = math.MaxInt64
@@ -1013,6 +1065,9 @@ func funcTsOfFirstOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, e
// === ts_of_last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcTsOfLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
el := matrixVal[0]
var tf int64
@@ -1047,6 +1102,9 @@ func funcTsOfMinOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions,
// compareOverTime is a helper used by funcMaxOverTime and funcMinOverTime.
func compareOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper, compareFn func(float64, float64) bool, returnTimestamp bool) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
samples := matrixVal[0]
var annos annotations.Annotations
if len(samples.Floats) == 0 {
@@ -1087,6 +1145,9 @@ func funcMinOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh
// === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
firstSeries := matrixVal[0]
if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 {
return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(firstSeries.Metric), args[0].PositionRange()))
@@ -1117,9 +1178,14 @@ func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
sum := s.Histograms[0].H.Copy()
trackCounterReset(sum)
+ var (
+ comp *histogram.FloatHistogram
+ nhcbBoundsReconciled bool
+ err error
+ )
for _, h := range s.Histograms[1:] {
trackCounterReset(h.H)
- _, _, nhcbBoundsReconciled, err := sum.Add(h.H)
+ comp, _, nhcbBoundsReconciled, err = sum.KahanAdd(h.H, comp)
if err != nil {
return sum, err
}
@@ -1127,7 +1193,16 @@ func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
nhcbBoundsReconciledSeen = true
}
}
- return sum, nil
+ if comp != nil {
+ sum, _, nhcbBoundsReconciled, err = sum.Add(comp)
+ if err != nil {
+ return sum, err
+ }
+ if nhcbBoundsReconciled {
+ nhcbBoundsReconciledSeen = true
+ }
+ }
+ return sum, err
})
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
@@ -1139,7 +1214,7 @@ func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
return aggrOverTime(matrixVal, enh, func(s Series) float64 {
var sum, c float64
for _, f := range s.Floats {
- sum, c = kahanSumInc(f.F, sum, c)
+ sum, c = kahansum.Inc(f.F, sum, c)
}
if math.IsInf(sum, 0) {
return sum
@@ -1150,6 +1225,9 @@ func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
// === quantile_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcQuantileOverTime(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(vectorVals) == 0 || len(vectorVals[0]) == 0 || len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
q := vectorVals[0][0].F
el := matrixVal[0]
if len(el.Floats) == 0 {
@@ -1171,6 +1249,9 @@ func funcQuantileOverTime(vectorVals []Vector, matrixVal Matrix, args parser.Exp
}
func varianceOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
samples := matrixVal[0]
var annos annotations.Annotations
if len(samples.Floats) == 0 {
@@ -1186,8 +1267,8 @@ func varianceOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHe
for _, f := range s.Floats {
count++
delta := f.F - (mean + cMean)
- mean, cMean = kahanSumInc(delta/count, mean, cMean)
- aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux)
+ mean, cMean = kahansum.Inc(delta/count, mean, cMean)
+ aux, cAux = kahansum.Inc(delta*(f.F-(mean+cMean)), aux, cAux)
}
variance := (aux + cAux) / count
if varianceToResult == nil {
@@ -1400,24 +1481,6 @@ func funcTimestamp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *Eva
return enh.Out, nil
}
-// We get incorrect results if this function is inlined; see https://github.com/prometheus/prometheus/issues/16714.
-//
-//go:noinline
-func kahanSumInc(inc, sum, c float64) (newSum, newC float64) {
- t := sum + inc
- switch {
- case math.IsInf(t, 0):
- c = 0
-
- // Using Neumaier improvement, swap if next term larger than sum.
- case math.Abs(sum) >= math.Abs(inc):
- c += (sum - t) + inc
- default:
- c += (inc - t) + sum
- }
- return t, c
-}
-
// linearRegression performs a least-square linear regression analysis on the
// provided SamplePairs. It returns the slope, and the intercept value at the
// provided time.
@@ -1440,10 +1503,10 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f
}
n += 1.0
x := float64(sample.T-interceptTime) / 1e3
- sumX, cX = kahanSumInc(x, sumX, cX)
- sumY, cY = kahanSumInc(sample.F, sumY, cY)
- sumXY, cXY = kahanSumInc(x*sample.F, sumXY, cXY)
- sumX2, cX2 = kahanSumInc(x*x, sumX2, cX2)
+ sumX, cX = kahansum.Inc(x, sumX, cX)
+ sumY, cY = kahansum.Inc(sample.F, sumY, cY)
+ sumXY, cXY = kahansum.Inc(x*sample.F, sumXY, cXY)
+ sumX2, cX2 = kahansum.Inc(x*x, sumX2, cX2)
}
if constY {
if math.IsInf(initY, 0) {
@@ -1466,6 +1529,9 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f
// === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcDeriv(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
samples := matrixVal[0]
// No sense in trying to compute a derivative without at least two float points.
@@ -1490,6 +1556,9 @@ func funcDeriv(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalN
// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) (Vector, Annotations) ===
func funcPredictLinear(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(vectorVals) == 0 || len(vectorVals[0]) == 0 || len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
samples := matrixVal[0]
duration := vectorVals[0][0].F
@@ -1573,7 +1642,7 @@ func histogramVariance(vectorVals []Vector, enh *EvalNodeHelper, varianceToResul
}
}
delta := val - mean
- variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
+ variance, cVariance = kahansum.Inc(bucket.Count*delta*delta, variance, cVariance)
}
variance += cVariance
variance /= h.Count
@@ -1596,6 +1665,9 @@ func funcHistogramStdVar(vectorVals []Vector, _ Matrix, _ parser.Expressions, en
// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramFraction(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(vectorVals) < 3 || len(vectorVals[0]) == 0 || len(vectorVals[1]) == 0 {
+ return enh.Out, nil
+ }
lower := vectorVals[0][0].F
upper := vectorVals[1][0].F
inVec := vectorVals[2]
@@ -1641,6 +1713,9 @@ func funcHistogramFraction(vectorVals []Vector, _ Matrix, args parser.Expression
// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(vectorVals) < 2 || len(vectorVals[0]) == 0 {
+ return enh.Out, nil
+ }
q := vectorVals[0][0].F
inVec := vectorVals[1]
var annos annotations.Annotations
@@ -1671,13 +1746,13 @@ func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expression
// Deal with classic histograms that have already been filtered for conflicting native histograms.
for _, mb := range enh.signatureToMetricWithBuckets {
if len(mb.buckets) > 0 {
- res, forcedMonotonicity, _ := BucketQuantile(q, mb.buckets)
+ quantile, forcedMonotonicity, _, minBucket, maxBucket, maxDiff := BucketQuantile(q, mb.buckets)
if forcedMonotonicity {
+ metricName := ""
if enh.enableDelayedNameRemoval {
- annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(getMetricName(mb.metric), args[1].PositionRange()))
- } else {
- annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo("", args[1].PositionRange()))
+ metricName = getMetricName(mb.metric)
}
+ annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(metricName, args[1].PositionRange(), enh.Ts, minBucket, maxBucket, maxDiff))
}
if !enh.enableDelayedNameRemoval {
@@ -1686,7 +1761,7 @@ func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expression
enh.Out = append(enh.Out, Sample{
Metric: mb.metric,
- F: res,
+ F: quantile,
DropName: true,
})
}
@@ -1714,6 +1789,9 @@ func pickFirstSampleIndex(floats []FPoint, args parser.Expressions, enh *EvalNod
// === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcResets(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
floats := matrixVal[0].Floats
histograms := matrixVal[0].Histograms
resets := 0
@@ -1763,6 +1841,9 @@ func funcResets(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *Eval
// === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcChanges(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ if len(matrixVal) == 0 {
+ return enh.Out, nil
+ }
floats := matrixVal[0].Floats
histograms := matrixVal[0].Histograms
changes := 0
diff --git a/promql/functions_internal_test.go b/promql/functions_internal_test.go
index bb52e4976b..cd170823a8 100644
--- a/promql/functions_internal_test.go
+++ b/promql/functions_internal_test.go
@@ -24,6 +24,7 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser/posrange"
+ "github.com/prometheus/prometheus/util/kahansum"
)
func TestHistogramRateCounterResetHint(t *testing.T) {
@@ -79,7 +80,7 @@ func TestKahanSumInc(t *testing.T) {
runTest := func(t *testing.T, a, b, expected float64) {
t.Run(fmt.Sprintf("%v + %v = %v", a, b, expected), func(t *testing.T) {
- sum, c := kahanSumInc(b, a, 0)
+ sum, c := kahansum.Inc(b, a, 0)
result := sum + c
if math.IsNaN(expected) {
@@ -108,13 +109,13 @@ func TestInterpolate(t *testing.T) {
{FPoint{T: 1, F: 100}, FPoint{T: 2, F: 200}, 1, false, 100},
{FPoint{T: 0, F: 100}, FPoint{T: 2, F: 200}, 1, false, 150},
{FPoint{T: 0, F: 200}, FPoint{T: 2, F: 100}, 1, false, 150},
- {FPoint{T: 0, F: 200}, FPoint{T: 2, F: 0}, 1, true, 200},
- {FPoint{T: 0, F: 200}, FPoint{T: 2, F: 100}, 1, true, 250},
- {FPoint{T: 0, F: 500}, FPoint{T: 2, F: 100}, 1, true, 550},
- {FPoint{T: 0, F: 500}, FPoint{T: 10, F: 0}, 1, true, 500},
+ {FPoint{T: 0, F: 200}, FPoint{T: 2, F: 0}, 1, true, 0},
+ {FPoint{T: 0, F: 200}, FPoint{T: 2, F: 100}, 1, true, 50},
+ {FPoint{T: 0, F: 500}, FPoint{T: 2, F: 100}, 1, true, 50},
+ {FPoint{T: 0, F: 500}, FPoint{T: 10, F: 0}, 1, true, 0},
}
for _, test := range tests {
- result := interpolate(test.p1, test.p2, test.t, test.isCounter, false)
+ result := interpolate(test.p1, test.p2, test.t, test.isCounter)
require.Equal(t, test.expected, result)
}
}
diff --git a/promql/functions_test.go b/promql/functions_test.go
index 2566843092..023417bfc2 100644
--- a/promql/functions_test.go
+++ b/promql/functions_test.go
@@ -33,7 +33,7 @@ func TestDeriv(t *testing.T) {
// This requires more precision than the usual test system offers,
// so we test it by hand.
storage := teststorage.New(t)
- defer storage.Close()
+
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
diff --git a/promql/fuzz.go b/promql/fuzz.go
index f9cc4794a6..3fa28abe48 100644
--- a/promql/fuzz.go
+++ b/promql/fuzz.go
@@ -60,6 +60,8 @@ const (
// Use package-scope symbol table to avoid memory allocation on every fuzzing operation.
var symbolTable = labels.NewSymbolTable()
+var fuzzParser = parser.NewParser(parser.Options{})
+
func fuzzParseMetricWithContentType(in []byte, contentType string) int {
p, warning := textparse.New(in, contentType, symbolTable, textparse.ParserOptions{})
if p == nil || warning != nil {
@@ -103,7 +105,7 @@ func FuzzParseMetricSelector(in []byte) int {
if len(in) > maxInputSize {
return fuzzMeh
}
- _, err := parser.ParseMetricSelector(string(in))
+ _, err := fuzzParser.ParseMetricSelector(string(in))
if err == nil {
return fuzzInteresting
}
@@ -116,7 +118,7 @@ func FuzzParseExpr(in []byte) int {
if len(in) > maxInputSize {
return fuzzMeh
}
- _, err := parser.ParseExpr(string(in))
+ _, err := fuzzParser.ParseExpr(string(in))
if err == nil {
return fuzzInteresting
}
diff --git a/promql/histogram_stats_iterator_test.go b/promql/histogram_stats_iterator_test.go
index cfea8a568e..d3a76820da 100644
--- a/promql/histogram_stats_iterator_test.go
+++ b/promql/histogram_stats_iterator_test.go
@@ -235,4 +235,6 @@ func (h *histogramIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64,
func (*histogramIterator) AtT() int64 { return 0 }
+func (*histogramIterator) AtST() int64 { return 0 }
+
func (*histogramIterator) Err() error { return nil }
diff --git a/promql/info.go b/promql/info.go
index 204ac44b40..97a79cd0f1 100644
--- a/promql/info.go
+++ b/promql/info.go
@@ -21,6 +21,7 @@ import (
"strings"
"github.com/grafana/regexp"
+ "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
@@ -46,24 +47,20 @@ func (ev *evaluator) evalInfo(ctx context.Context, args parser.Expressions) (par
labelSelector := args[1].(*parser.VectorSelector)
for _, m := range labelSelector.LabelMatchers {
dataLabelMatchers[m.Name] = append(dataLabelMatchers[m.Name], m)
- if m.Name == labels.MetricName {
+ if m.Name == model.MetricNameLabel {
infoNameMatchers = append(infoNameMatchers, m)
}
}
} else {
- infoNameMatchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, targetInfo)}
+ infoNameMatchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, targetInfo)}
}
// Don't try to enrich info series.
ignoreSeries := map[uint64]struct{}{}
-loop:
for _, s := range mat {
- name := s.Metric.Get(labels.MetricName)
- for _, m := range infoNameMatchers {
- if m.Matches(name) {
- ignoreSeries[s.Metric.Hash()] = struct{}{}
- continue loop
- }
+ name := s.Metric.Get(model.MetricNameLabel)
+ if len(infoNameMatchers) > 0 && matchersMatch(infoNameMatchers, name) {
+ ignoreSeries[s.Metric.Hash()] = struct{}{}
}
}
@@ -79,6 +76,15 @@ loop:
return res, annots
}
+func matchersMatch(matchers []*labels.Matcher, value string) bool {
+ for _, m := range matchers {
+ if !m.Matches(value) {
+ return false
+ }
+ }
+ return true
+}
+
// infoSelectHints calculates the storage.SelectHints for selecting info series, given expr (first argument to info call).
func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints {
var nodeTimestamp *int64
@@ -122,6 +128,19 @@ func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints {
// Series in ignoreSeries are not fetched.
// dataLabelMatchers may be mutated.
func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeries map[uint64]struct{}, dataLabelMatchers map[string][]*labels.Matcher, selectHints storage.SelectHints) (Matrix, annotations.Annotations, error) {
+ removeNameFromDataLabelMatchers := func() {
+ for name, ms := range dataLabelMatchers {
+ ms = slices.DeleteFunc(ms, func(m *labels.Matcher) bool {
+ return m.Name == model.MetricNameLabel
+ })
+ if len(ms) > 0 {
+ dataLabelMatchers[name] = ms
+ } else {
+ delete(dataLabelMatchers, name)
+ }
+ }
+ }
+
// A map of values for all identifying labels we are interested in.
idLblValues := map[string]map[string]struct{}{}
for _, s := range mat {
@@ -143,6 +162,11 @@ func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeri
}
}
if len(idLblValues) == 0 {
+ // Even when returning early, we need to remove __name__ from dataLabelMatchers
+ // since it's not a data label selector (it's used to select which info metrics
+ // to consider). Without this, combineWithInfoVector would incorrectly exclude
+ // series when only __name__ is specified in the selector.
+ removeNameFromDataLabelMatchers()
return nil, nil, nil
}
@@ -166,24 +190,19 @@ func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeri
for name, re := range idLblRegexps {
infoLabelMatchers = append(infoLabelMatchers, labels.MustNewMatcher(labels.MatchRegexp, name, re))
}
- var nameMatcher *labels.Matcher
- for name, ms := range dataLabelMatchers {
- for i, m := range ms {
- if m.Name == labels.MetricName {
- nameMatcher = m
- ms = slices.Delete(ms, i, i+1)
+ hasNameMatcher := false
+ for _, ms := range dataLabelMatchers {
+ for _, m := range ms {
+ if m.Name == model.MetricNameLabel {
+ hasNameMatcher = true
}
infoLabelMatchers = append(infoLabelMatchers, m)
}
- if len(ms) > 0 {
- dataLabelMatchers[name] = ms
- } else {
- delete(dataLabelMatchers, name)
- }
}
- if nameMatcher == nil {
+ removeNameFromDataLabelMatchers()
+ if !hasNameMatcher {
// Default to using the target_info metric.
- infoLabelMatchers = append([]*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, targetInfo)}, infoLabelMatchers...)
+ infoLabelMatchers = append([]*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, targetInfo)}, infoLabelMatchers...)
}
infoIt := ev.querier.Select(ctx, false, &selectHints, infoLabelMatchers...)
@@ -203,7 +222,7 @@ func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Mat
sigFunction := func(name string) func(labels.Labels) string {
return func(lset labels.Labels) string {
lb.Reset()
- lb.Add(labels.MetricName, name)
+ lb.Add(model.MetricNameLabel, name)
lset.MatchLabels(true, identifyingLabels...).Range(func(l labels.Label) {
lb.Add(l.Name, l.Value)
})
@@ -215,7 +234,7 @@ func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Mat
infoMetrics := map[string]struct{}{}
for _, is := range infoMat {
lblMap := is.Metric.Map()
- infoMetrics[lblMap[labels.MetricName]] = struct{}{}
+ infoMetrics[lblMap[model.MetricNameLabel]] = struct{}{}
}
sigfs := make(map[string]func(labels.Labels) string, len(infoMetrics))
for name := range infoMetrics {
@@ -260,7 +279,7 @@ func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Mat
infoSigs := make(map[uint64]string, len(infoMat))
for _, s := range infoMat {
- name := s.Metric.Map()[labels.MetricName]
+ name := s.Metric.Map()[model.MetricNameLabel]
infoSigs[s.Metric.Hash()] = sigfs[name](s.Metric)
}
@@ -398,7 +417,7 @@ func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[u
}
err := is.Metric.Validate(func(l labels.Label) error {
- if l.Name == labels.MetricName {
+ if l.Name == model.MetricNameLabel {
return nil
}
if _, exists := dataLabelMatchers[l.Name]; len(dataLabelMatchers) > 0 && !exists {
diff --git a/promql/parser/ast.go b/promql/parser/ast.go
index 130f9aefb7..6496095287 100644
--- a/promql/parser/ast.go
+++ b/promql/parser/ast.go
@@ -318,6 +318,19 @@ type VectorMatching struct {
// Include contains additional labels that should be included in
// the result from the side with the lower cardinality.
Include []string
+ // Fill-in values to use when a series from one side does not find a match on the other side.
+ FillValues VectorMatchFillValues
+}
+
+// VectorMatchFillValues contains the fill values to use for Vector matching
+// when one side does not find a match on the other side.
+// When a fill value is nil, no fill is applied for that side, and there
+// is no output for the match group if there is no match.
+type VectorMatchFillValues struct {
+ // RHS is the fill value to use for the right-hand side.
+ RHS *float64
+ // LHS is the fill value to use for the left-hand side.
+ LHS *float64
}
// Visitor allows visiting a Node and its child nodes. The Visit method is
diff --git a/promql/parser/features.go b/promql/parser/features.go
index ec64678237..3bd3c493f5 100644
--- a/promql/parser/features.go
+++ b/promql/parser/features.go
@@ -18,14 +18,15 @@ import "github.com/prometheus/prometheus/util/features"
// RegisterFeatures registers all PromQL features with the feature registry.
// This includes operators (arithmetic and comparison/set), aggregators (standard
// and experimental), and functions.
-func RegisterFeatures(r features.Collector) {
+func (pql *promQLParser) RegisterFeatures(r features.Collector) {
// Register core PromQL language keywords.
for keyword, itemType := range key {
if itemType.IsKeyword() {
- // Handle experimental keywords separately.
switch keyword {
case "anchored", "smoothed":
- r.Set(features.PromQL, keyword, EnableExtendedRangeSelectors)
+ r.Set(features.PromQL, keyword, pql.options.EnableExtendedRangeSelectors)
+ case "fill", "fill_left", "fill_right":
+ r.Set(features.PromQL, keyword, pql.options.EnableBinopFillModifiers)
default:
r.Enable(features.PromQL, keyword)
}
@@ -42,16 +43,16 @@ func RegisterFeatures(r features.Collector) {
// Register aggregators.
for a := ItemType(aggregatorsStart + 1); a < aggregatorsEnd; a++ {
if a.IsAggregator() {
- experimental := a.IsExperimentalAggregator() && !EnableExperimentalFunctions
+ experimental := a.IsExperimentalAggregator() && !pql.options.EnableExperimentalFunctions
r.Set(features.PromQLOperators, a.String(), !experimental)
}
}
// Register functions.
for f, fc := range Functions {
- r.Set(features.PromQLFunctions, f, !fc.Experimental || EnableExperimentalFunctions)
+ r.Set(features.PromQLFunctions, f, !fc.Experimental || pql.options.EnableExperimentalFunctions)
}
// Register experimental parser features.
- r.Set(features.PromQL, "duration_expr", ExperimentalDurationExpr)
+ r.Set(features.PromQL, "duration_expr", pql.options.ExperimentalDurationExpr)
}
diff --git a/promql/parser/functions.go b/promql/parser/functions.go
index 2f2b1c68e4..c7c7332305 100644
--- a/promql/parser/functions.go
+++ b/promql/parser/functions.go
@@ -23,9 +23,6 @@ type Function struct {
Experimental bool
}
-// EnableExperimentalFunctions controls whether experimentalFunctions are enabled.
-var EnableExperimentalFunctions bool
-
// Functions is a list of all functions supported by PromQL, including their types.
var Functions = map[string]*Function{
"abs": {
diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y
index 47776f53d0..1196002b76 100644
--- a/promql/parser/generated_parser.y
+++ b/promql/parser/generated_parser.y
@@ -139,6 +139,9 @@ BOOL
BY
GROUP_LEFT
GROUP_RIGHT
+FILL
+FILL_LEFT
+FILL_RIGHT
IGNORING
OFFSET
SMOOTHED
@@ -190,7 +193,7 @@ START_METRIC_SELECTOR
%type int
%type uint
%type number series_value signed_number signed_or_unsigned_number
-%type step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr anchored_expr smoothed_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector duration_expr paren_duration_expr positive_duration_expr offset_duration_expr
+%type step_invariant_expr aggregate_expr aggregate_modifier bin_modifier fill_modifiers binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers fill_value label_matchers matrix_selector number_duration_literal offset_expr anchored_expr smoothed_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector duration_expr paren_duration_expr positive_duration_expr offset_duration_expr
%start start
@@ -302,7 +305,7 @@ binary_expr : expr ADD bin_modifier expr { $$ = yylex.(*parser).newBinar
// Using left recursion for the modifier rules, helps to keep the parser stack small and
// reduces allocations.
-bin_modifier : group_modifiers;
+bin_modifier : fill_modifiers;
bool_modifier : /* empty */
{ $$ = &BinaryExpr{
@@ -346,6 +349,47 @@ group_modifiers: bool_modifier /* empty */
}
;
+fill_modifiers: group_modifiers /* empty */
+ /* Only fill() */
+ | group_modifiers FILL fill_value
+ {
+ $$ = $1
+ fill := $3.(*NumberLiteral).Val
+ $$.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill
+ $$.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill
+ }
+ /* Only fill_left() */
+ | group_modifiers FILL_LEFT fill_value
+ {
+ $$ = $1
+ fill := $3.(*NumberLiteral).Val
+ $$.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill
+ }
+ /* Only fill_right() */
+ | group_modifiers FILL_RIGHT fill_value
+ {
+ $$ = $1
+ fill := $3.(*NumberLiteral).Val
+ $$.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill
+ }
+ /* fill_left() fill_right() */
+ | group_modifiers FILL_LEFT fill_value FILL_RIGHT fill_value
+ {
+ $$ = $1
+ fill_left := $3.(*NumberLiteral).Val
+ fill_right := $5.(*NumberLiteral).Val
+ $$.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill_left
+ $$.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill_right
+ }
+ /* fill_right() fill_left() */
+ | group_modifiers FILL_RIGHT fill_value FILL_LEFT fill_value
+ {
+ fill_right := $3.(*NumberLiteral).Val
+ fill_left := $5.(*NumberLiteral).Val
+ $$.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill_left
+ $$.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill_right
+ }
+ ;
grouping_labels : LEFT_PAREN grouping_label_list RIGHT_PAREN
{ $$ = $2 }
@@ -387,6 +431,21 @@ grouping_label : maybe_label
{ yylex.(*parser).unexpected("grouping opts", "label"); $$ = Item{} }
;
+fill_value : LEFT_PAREN number_duration_literal RIGHT_PAREN
+ {
+ $$ = $2.(*NumberLiteral)
+ }
+ | LEFT_PAREN unary_op number_duration_literal RIGHT_PAREN
+ {
+ nl := $3.(*NumberLiteral)
+ if $2.Typ == SUB {
+ nl.Val *= -1
+ }
+ nl.PosRange.Start = $2.Pos
+ $$ = nl
+ }
+ ;
+
/*
* Function calls.
*/
@@ -397,7 +456,7 @@ function_call : IDENTIFIER function_call_body
if !exist{
yylex.(*parser).addParseErrf($1.PositionRange(),"unknown function with name %q", $1.Val)
}
- if fn != nil && fn.Experimental && !EnableExperimentalFunctions {
+ if fn != nil && fn.Experimental && !yylex.(*parser).options.EnableExperimentalFunctions {
yylex.(*parser).addParseErrf($1.PositionRange(),"function %q is not enabled", $1.Val)
}
$$ = &Call{
@@ -697,7 +756,7 @@ metric : metric_identifier label_set
;
-metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO | STEP | RANGE | ANCHORED | SMOOTHED;
+metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | FILL | FILL_LEFT | FILL_RIGHT | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO | STEP | RANGE | ANCHORED | SMOOTHED;
label_set : LEFT_BRACE label_set_list RIGHT_BRACE
{ $$ = labels.New($2...) }
@@ -791,14 +850,15 @@ series_item : BLANK
// Histogram descriptions (part of unit testing).
| histogram_series_value
{
- $$ = []SequenceValue{{Histogram:$1}}
+ $$ = []SequenceValue{yylex.(*parser).newHistogramSequenceValue($1)}
}
| histogram_series_value TIMES uint
{
$$ = []SequenceValue{}
// Add an additional value for time 0, which we ignore in tests.
+ sv := yylex.(*parser).newHistogramSequenceValue($1)
for i:=uint64(0); i <= $3; i++{
- $$ = append($$, SequenceValue{Histogram:$1})
+ $$ = append($$, sv)
//$1 += $2
}
}
@@ -954,7 +1014,7 @@ counter_reset_hint : UNKNOWN_COUNTER_RESET | COUNTER_RESET | NOT_COUNTER_RESET |
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK | LIMITK | LIMIT_RATIO;
// Inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
-maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO | STEP | RANGE | ANCHORED | SMOOTHED;
+maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | FILL | FILL_LEFT | FILL_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO | STEP | RANGE | ANCHORED | SMOOTHED;
unary_op : ADD | SUB;
@@ -1162,7 +1222,7 @@ offset_duration_expr : number_duration_literal
}
| duration_expr
;
-
+
min_max: MIN | MAX ;
duration_expr : number_duration_literal
@@ -1277,14 +1337,14 @@ duration_expr : number_duration_literal
;
paren_duration_expr : LEFT_PAREN duration_expr RIGHT_PAREN
- {
+ {
yylex.(*parser).experimentalDurationExpr($2.(Expr))
if durationExpr, ok := $2.(*DurationExpr); ok {
durationExpr.Wrapped = true
$$ = durationExpr
break
}
- $$ = $2
+ $$ = $2
}
;
diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go
index f5feec0b55..3a69f55516 100644
--- a/promql/parser/generated_parser.y.go
+++ b/promql/parser/generated_parser.y.go
@@ -113,31 +113,34 @@ const BOOL = 57420
const BY = 57421
const GROUP_LEFT = 57422
const GROUP_RIGHT = 57423
-const IGNORING = 57424
-const OFFSET = 57425
-const SMOOTHED = 57426
-const ANCHORED = 57427
-const ON = 57428
-const WITHOUT = 57429
-const keywordsEnd = 57430
-const preprocessorStart = 57431
-const START = 57432
-const END = 57433
-const STEP = 57434
-const RANGE = 57435
-const preprocessorEnd = 57436
-const counterResetHintsStart = 57437
-const UNKNOWN_COUNTER_RESET = 57438
-const COUNTER_RESET = 57439
-const NOT_COUNTER_RESET = 57440
-const GAUGE_TYPE = 57441
-const counterResetHintsEnd = 57442
-const startSymbolsStart = 57443
-const START_METRIC = 57444
-const START_SERIES_DESCRIPTION = 57445
-const START_EXPRESSION = 57446
-const START_METRIC_SELECTOR = 57447
-const startSymbolsEnd = 57448
+const FILL = 57424
+const FILL_LEFT = 57425
+const FILL_RIGHT = 57426
+const IGNORING = 57427
+const OFFSET = 57428
+const SMOOTHED = 57429
+const ANCHORED = 57430
+const ON = 57431
+const WITHOUT = 57432
+const keywordsEnd = 57433
+const preprocessorStart = 57434
+const START = 57435
+const END = 57436
+const STEP = 57437
+const RANGE = 57438
+const preprocessorEnd = 57439
+const counterResetHintsStart = 57440
+const UNKNOWN_COUNTER_RESET = 57441
+const COUNTER_RESET = 57442
+const NOT_COUNTER_RESET = 57443
+const GAUGE_TYPE = 57444
+const counterResetHintsEnd = 57445
+const startSymbolsStart = 57446
+const START_METRIC = 57447
+const START_SERIES_DESCRIPTION = 57448
+const START_EXPRESSION = 57449
+const START_METRIC_SELECTOR = 57450
+const startSymbolsEnd = 57451
var yyToknames = [...]string{
"$end",
@@ -221,6 +224,9 @@ var yyToknames = [...]string{
"BY",
"GROUP_LEFT",
"GROUP_RIGHT",
+ "FILL",
+ "FILL_LEFT",
+ "FILL_RIGHT",
"IGNORING",
"OFFSET",
"SMOOTHED",
@@ -258,376 +264,403 @@ var yyExca = [...]int16{
-1, 1,
1, -1,
-2, 0,
- -1, 41,
- 1, 150,
- 10, 150,
- 24, 150,
+ -1, 44,
+ 1, 161,
+ 10, 161,
+ 24, 161,
-2, 0,
- -1, 72,
- 2, 193,
- 15, 193,
- 79, 193,
- 87, 193,
- -2, 107,
- -1, 73,
- 2, 194,
- 15, 194,
- 79, 194,
- 87, 194,
- -2, 108,
- -1, 74,
- 2, 195,
- 15, 195,
- 79, 195,
- 87, 195,
- -2, 110,
-1, 75,
- 2, 196,
- 15, 196,
- 79, 196,
- 87, 196,
- -2, 111,
- -1, 76,
- 2, 197,
- 15, 197,
- 79, 197,
- 87, 197,
- -2, 112,
- -1, 77,
- 2, 198,
- 15, 198,
- 79, 198,
- 87, 198,
- -2, 117,
- -1, 78,
- 2, 199,
- 15, 199,
- 79, 199,
- 87, 199,
- -2, 119,
- -1, 79,
- 2, 200,
- 15, 200,
- 79, 200,
- 87, 200,
- -2, 121,
- -1, 80,
- 2, 201,
- 15, 201,
- 79, 201,
- 87, 201,
- -2, 122,
- -1, 81,
- 2, 202,
- 15, 202,
- 79, 202,
- 87, 202,
- -2, 123,
- -1, 82,
- 2, 203,
- 15, 203,
- 79, 203,
- 87, 203,
- -2, 124,
- -1, 83,
2, 204,
15, 204,
79, 204,
- 87, 204,
- -2, 125,
- -1, 84,
+ 90, 204,
+ -2, 115,
+ -1, 76,
2, 205,
15, 205,
79, 205,
- 87, 205,
- -2, 129,
- -1, 85,
+ 90, 205,
+ -2, 116,
+ -1, 77,
2, 206,
15, 206,
79, 206,
- 87, 206,
+ 90, 206,
+ -2, 118,
+ -1, 78,
+ 2, 207,
+ 15, 207,
+ 79, 207,
+ 90, 207,
+ -2, 119,
+ -1, 79,
+ 2, 208,
+ 15, 208,
+ 79, 208,
+ 90, 208,
+ -2, 123,
+ -1, 80,
+ 2, 209,
+ 15, 209,
+ 79, 209,
+ 90, 209,
+ -2, 128,
+ -1, 81,
+ 2, 210,
+ 15, 210,
+ 79, 210,
+ 90, 210,
-2, 130,
- -1, 137,
- 41, 274,
- 42, 274,
- 52, 274,
- 53, 274,
- 57, 274,
+ -1, 82,
+ 2, 211,
+ 15, 211,
+ 79, 211,
+ 90, 211,
+ -2, 132,
+ -1, 83,
+ 2, 212,
+ 15, 212,
+ 79, 212,
+ 90, 212,
+ -2, 133,
+ -1, 84,
+ 2, 213,
+ 15, 213,
+ 79, 213,
+ 90, 213,
+ -2, 134,
+ -1, 85,
+ 2, 214,
+ 15, 214,
+ 79, 214,
+ 90, 214,
+ -2, 135,
+ -1, 86,
+ 2, 215,
+ 15, 215,
+ 79, 215,
+ 90, 215,
+ -2, 136,
+ -1, 87,
+ 2, 216,
+ 15, 216,
+ 79, 216,
+ 90, 216,
+ -2, 140,
+ -1, 88,
+ 2, 217,
+ 15, 217,
+ 79, 217,
+ 90, 217,
+ -2, 141,
+ -1, 140,
+ 41, 288,
+ 42, 288,
+ 52, 288,
+ 53, 288,
+ 57, 288,
-2, 22,
- -1, 251,
- 9, 259,
- 12, 259,
- 13, 259,
- 18, 259,
- 19, 259,
- 25, 259,
- 41, 259,
- 47, 259,
- 48, 259,
- 51, 259,
- 57, 259,
- 62, 259,
- 63, 259,
- 64, 259,
- 65, 259,
- 66, 259,
- 67, 259,
- 68, 259,
- 69, 259,
- 70, 259,
- 71, 259,
- 72, 259,
- 73, 259,
- 74, 259,
- 75, 259,
- 79, 259,
- 83, 259,
- 84, 259,
- 85, 259,
- 87, 259,
- 90, 259,
- 91, 259,
- 92, 259,
- 93, 259,
+ -1, 258,
+ 9, 273,
+ 12, 273,
+ 13, 273,
+ 18, 273,
+ 19, 273,
+ 25, 273,
+ 41, 273,
+ 47, 273,
+ 48, 273,
+ 51, 273,
+ 57, 273,
+ 62, 273,
+ 63, 273,
+ 64, 273,
+ 65, 273,
+ 66, 273,
+ 67, 273,
+ 68, 273,
+ 69, 273,
+ 70, 273,
+ 71, 273,
+ 72, 273,
+ 73, 273,
+ 74, 273,
+ 75, 273,
+ 79, 273,
+ 82, 273,
+ 83, 273,
+ 84, 273,
+ 86, 273,
+ 87, 273,
+ 88, 273,
+ 90, 273,
+ 93, 273,
+ 94, 273,
+ 95, 273,
+ 96, 273,
-2, 0,
- -1, 252,
- 9, 259,
- 12, 259,
- 13, 259,
- 18, 259,
- 19, 259,
- 25, 259,
- 41, 259,
- 47, 259,
- 48, 259,
- 51, 259,
- 57, 259,
- 62, 259,
- 63, 259,
- 64, 259,
- 65, 259,
- 66, 259,
- 67, 259,
- 68, 259,
- 69, 259,
- 70, 259,
- 71, 259,
- 72, 259,
- 73, 259,
- 74, 259,
- 75, 259,
- 79, 259,
- 83, 259,
- 84, 259,
- 85, 259,
- 87, 259,
- 90, 259,
- 91, 259,
- 92, 259,
- 93, 259,
+ -1, 259,
+ 9, 273,
+ 12, 273,
+ 13, 273,
+ 18, 273,
+ 19, 273,
+ 25, 273,
+ 41, 273,
+ 47, 273,
+ 48, 273,
+ 51, 273,
+ 57, 273,
+ 62, 273,
+ 63, 273,
+ 64, 273,
+ 65, 273,
+ 66, 273,
+ 67, 273,
+ 68, 273,
+ 69, 273,
+ 70, 273,
+ 71, 273,
+ 72, 273,
+ 73, 273,
+ 74, 273,
+ 75, 273,
+ 79, 273,
+ 82, 273,
+ 83, 273,
+ 84, 273,
+ 86, 273,
+ 87, 273,
+ 88, 273,
+ 90, 273,
+ 93, 273,
+ 94, 273,
+ 95, 273,
+ 96, 273,
-2, 0,
}
const yyPrivate = 57344
-const yyLast = 1050
+const yyLast = 1224
var yyAct = [...]int16{
- 58, 186, 413, 411, 341, 418, 286, 243, 197, 95,
- 189, 48, 355, 144, 70, 227, 93, 251, 252, 356,
- 159, 190, 65, 120, 17, 88, 127, 130, 128, 129,
- 22, 425, 426, 427, 428, 131, 249, 121, 124, 335,
- 250, 67, 132, 126, 408, 407, 377, 332, 125, 123,
- 331, 102, 126, 122, 336, 154, 324, 6, 397, 18,
- 19, 111, 112, 20, 135, 114, 137, 119, 101, 375,
- 337, 323, 375, 330, 11, 12, 14, 15, 16, 21,
- 23, 25, 26, 27, 28, 29, 33, 34, 43, 133,
- 329, 13, 116, 118, 117, 24, 38, 37, 146, 30,
- 402, 124, 31, 32, 35, 36, 130, 412, 138, 396,
- 194, 125, 123, 328, 131, 126, 365, 182, 239, 401,
- 193, 199, 204, 205, 206, 207, 208, 209, 177, 363,
- 362, 181, 200, 200, 200, 200, 200, 200, 200, 178,
- 120, 238, 223, 201, 201, 201, 201, 201, 201, 201,
- 212, 215, 134, 200, 136, 211, 210, 2, 3, 4,
- 5, 222, 233, 221, 201, 245, 235, 384, 333, 371,
- 228, 247, 229, 360, 370, 359, 246, 358, 188, 273,
- 140, 368, 114, 195, 119, 194, 277, 139, 62, 369,
- 268, 237, 229, 271, 185, 193, 441, 200, 61, 196,
- 367, 201, 273, 383, 155, 278, 279, 280, 201, 116,
- 118, 117, 231, 200, 236, 121, 124, 195, 382, 440,
- 86, 218, 230, 232, 201, 381, 125, 123, 276, 275,
- 126, 122, 231, 196, 274, 146, 87, 132, 439, 327,
- 429, 438, 230, 232, 248, 141, 184, 183, 419, 253,
- 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
- 264, 265, 266, 267, 334, 357, 191, 192, 214, 353,
- 354, 202, 203, 361, 121, 124, 88, 364, 283, 7,
- 39, 213, 282, 199, 200, 125, 123, 395, 200, 126,
- 122, 366, 10, 194, 200, 201, 394, 281, 393, 201,
- 392, 391, 90, 193, 390, 201, 160, 161, 162, 163,
- 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
- 174, 389, 194, 388, 120, 195, 373, 387, 386, 385,
- 153, 99, 193, 62, 442, 374, 376, 200, 378, 185,
- 56, 196, 40, 61, 379, 380, 89, 152, 201, 151,
- 1, 100, 102, 103, 195, 104, 105, 175, 71, 108,
- 109, 398, 111, 112, 113, 86, 114, 115, 119, 101,
- 196, 66, 200, 55, 9, 9, 54, 404, 8, 53,
- 406, 87, 41, 201, 52, 158, 410, 51, 414, 415,
- 416, 184, 183, 116, 118, 117, 421, 420, 423, 422,
- 417, 430, 50, 49, 289, 47, 156, 216, 147, 46,
- 431, 432, 200, 372, 299, 433, 202, 203, 145, 96,
- 305, 435, 157, 201, 403, 437, 326, 288, 147, 94,
- 436, 97, 45, 44, 57, 242, 434, 234, 145, 338,
- 443, 200, 97, 98, 121, 124, 143, 240, 284, 301,
- 302, 97, 201, 303, 91, 125, 123, 424, 187, 126,
- 122, 316, 287, 59, 290, 292, 294, 295, 296, 304,
- 306, 309, 310, 311, 312, 313, 317, 318, 142, 0,
- 291, 293, 297, 298, 300, 307, 322, 321, 308, 289,
- 96, 0, 314, 315, 319, 320, 226, 150, 405, 299,
- 94, 225, 149, 0, 0, 305, 0, 0, 92, 285,
- 0, 0, 288, 97, 224, 148, 62, 121, 124, 0,
- 0, 0, 272, 0, 0, 0, 61, 0, 125, 123,
- 0, 0, 126, 122, 301, 302, 0, 0, 303, 0,
- 0, 0, 0, 0, 0, 0, 316, 0, 86, 290,
- 292, 294, 295, 296, 304, 306, 309, 310, 311, 312,
- 313, 317, 318, 0, 87, 291, 293, 297, 298, 300,
- 307, 322, 321, 308, 184, 183, 0, 314, 315, 319,
- 320, 62, 0, 120, 60, 88, 0, 63, 0, 0,
- 22, 61, 0, 0, 217, 0, 0, 64, 0, 269,
- 270, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 100, 102, 0, 86, 0, 0, 0, 0, 0, 18,
- 19, 111, 112, 20, 0, 114, 115, 119, 101, 87,
- 0, 0, 0, 0, 72, 73, 74, 75, 76, 77,
- 78, 79, 80, 81, 82, 83, 84, 85, 0, 0,
- 400, 13, 116, 118, 117, 24, 38, 37, 399, 30,
- 0, 0, 31, 32, 68, 69, 62, 42, 0, 60,
- 88, 0, 63, 0, 0, 22, 61, 121, 124, 0,
- 0, 0, 64, 0, 121, 124, 0, 0, 125, 123,
- 0, 0, 126, 122, 0, 125, 123, 0, 86, 126,
- 122, 0, 0, 0, 18, 19, 0, 0, 20, 0,
- 0, 0, 0, 0, 87, 0, 0, 0, 0, 72,
- 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
- 83, 84, 85, 0, 0, 0, 13, 0, 0, 220,
- 24, 38, 37, 0, 30, 0, 325, 31, 32, 68,
- 69, 62, 0, 0, 60, 88, 0, 63, 121, 124,
- 22, 61, 0, 0, 0, 0, 0, 64, 0, 125,
- 123, 0, 0, 126, 122, 0, 0, 0, 0, 0,
- 121, 124, 0, 86, 0, 0, 0, 0, 0, 18,
- 19, 125, 123, 20, 0, 126, 122, 0, 0, 87,
- 0, 0, 0, 0, 72, 73, 74, 75, 76, 77,
- 78, 79, 80, 81, 82, 83, 84, 85, 17, 39,
- 0, 13, 0, 0, 22, 24, 38, 37, 0, 30,
- 340, 0, 31, 32, 68, 69, 0, 339, 0, 0,
- 0, 343, 344, 342, 349, 351, 348, 350, 345, 346,
- 347, 352, 241, 18, 19, 0, 194, 20, 0, 244,
- 0, 0, 0, 247, 0, 0, 193, 0, 11, 12,
- 14, 15, 16, 21, 23, 25, 26, 27, 28, 29,
- 33, 34, 0, 0, 120, 13, 0, 0, 195, 24,
- 38, 37, 219, 30, 0, 0, 31, 32, 35, 36,
- 0, 0, 0, 120, 196, 0, 0, 0, 0, 0,
- 0, 100, 102, 103, 0, 104, 105, 106, 107, 108,
- 109, 110, 111, 112, 113, 0, 114, 115, 119, 101,
- 100, 102, 103, 0, 104, 105, 106, 107, 108, 109,
- 110, 111, 112, 113, 198, 114, 115, 119, 101, 120,
- 0, 62, 0, 116, 118, 117, 0, 185, 176, 0,
- 0, 61, 0, 0, 0, 62, 0, 0, 0, 0,
- 0, 185, 116, 118, 117, 61, 100, 102, 103, 0,
- 104, 105, 106, 86, 108, 109, 110, 111, 112, 113,
- 0, 114, 115, 119, 101, 0, 0, 86, 0, 87,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 184,
- 183, 0, 0, 87, 0, 0, 0, 0, 116, 118,
- 117, 0, 0, 184, 183, 409, 0, 0, 0, 0,
- 0, 0, 0, 0, 202, 203, 343, 344, 342, 349,
- 351, 348, 350, 345, 346, 347, 352, 0, 179, 180,
+ 61, 363, 190, 429, 351, 436, 431, 293, 247, 201,
+ 98, 51, 147, 193, 369, 96, 231, 412, 413, 370,
+ 132, 133, 68, 130, 73, 163, 194, 131, 443, 444,
+ 445, 446, 134, 135, 256, 253, 254, 255, 257, 258,
+ 259, 129, 70, 426, 123, 425, 124, 127, 391, 342,
+ 157, 458, 223, 198, 447, 389, 415, 128, 126, 345,
+ 451, 129, 125, 197, 414, 465, 398, 138, 379, 140,
+ 6, 103, 105, 106, 346, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 199, 117, 118, 122, 104,
+ 347, 136, 343, 46, 124, 127, 389, 133, 334, 251,
+ 397, 200, 149, 377, 192, 128, 126, 199, 134, 129,
+ 125, 198, 141, 333, 420, 396, 119, 121, 120, 123,
+ 186, 197, 395, 200, 203, 208, 209, 210, 211, 212,
+ 213, 181, 376, 419, 430, 204, 204, 204, 204, 204,
+ 204, 204, 182, 199, 185, 227, 205, 205, 205, 205,
+ 205, 205, 205, 216, 219, 215, 204, 341, 214, 200,
+ 137, 117, 139, 122, 339, 385, 237, 205, 239, 464,
+ 384, 249, 226, 2, 3, 4, 5, 91, 290, 225,
+ 340, 123, 289, 280, 250, 383, 364, 338, 124, 127,
+ 284, 119, 121, 120, 275, 195, 196, 288, 218, 128,
+ 126, 204, 460, 129, 125, 205, 280, 278, 158, 105,
+ 374, 217, 205, 286, 287, 423, 243, 204, 241, 114,
+ 115, 124, 127, 117, 373, 122, 104, 372, 205, 222,
+ 143, 437, 128, 126, 124, 127, 129, 125, 65, 242,
+ 149, 240, 337, 142, 42, 128, 126, 418, 64, 129,
+ 125, 285, 252, 119, 121, 120, 365, 366, 260, 261,
+ 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272, 273, 274, 344, 371, 127, 367, 368, 198, 283,
+ 375, 124, 127, 282, 378, 128, 126, 281, 197, 129,
+ 203, 204, 128, 126, 135, 204, 129, 125, 198, 380,
+ 65, 204, 205, 144, 7, 409, 205, 408, 197, 407,
+ 64, 406, 205, 164, 165, 166, 167, 168, 169, 170,
+ 171, 172, 173, 174, 175, 176, 177, 178, 202, 232,
+ 199, 233, 89, 156, 417, 65, 387, 405, 463, 233,
+ 404, 189, 102, 224, 403, 64, 200, 204, 90, 388,
+ 390, 10, 392, 124, 127, 393, 394, 462, 205, 402,
+ 461, 93, 124, 127, 128, 126, 401, 89, 129, 125,
+ 400, 235, 399, 128, 126, 416, 410, 129, 125, 235,
+ 8, 234, 236, 90, 44, 59, 204, 411, 43, 234,
+ 236, 92, 422, 188, 187, 1, 179, 205, 424, 155,
+ 428, 154, 230, 432, 433, 434, 150, 229, 74, 335,
+ 439, 438, 441, 440, 449, 450, 148, 435, 58, 452,
+ 228, 206, 207, 448, 336, 57, 296, 56, 386, 100,
+ 204, 69, 453, 454, 9, 9, 309, 455, 99, 55,
+ 457, 205, 315, 124, 127, 162, 421, 150, 97, 295,
+ 99, 54, 459, 53, 128, 126, 238, 148, 129, 125,
+ 97, 100, 153, 204, 466, 146, 52, 152, 95, 50,
+ 100, 311, 312, 100, 205, 313, 160, 220, 49, 161,
+ 151, 48, 159, 326, 47, 60, 297, 299, 301, 302,
+ 303, 314, 316, 319, 320, 321, 322, 323, 327, 328,
+ 246, 456, 298, 300, 304, 305, 306, 307, 308, 310,
+ 317, 332, 331, 318, 296, 348, 101, 324, 325, 329,
+ 330, 245, 244, 291, 309, 198, 94, 442, 248, 191,
+ 315, 350, 251, 294, 292, 197, 62, 295, 349, 145,
+ 0, 0, 353, 354, 352, 359, 361, 358, 360, 355,
+ 356, 357, 362, 0, 0, 0, 0, 199, 0, 311,
+ 312, 0, 0, 313, 0, 0, 0, 0, 0, 0,
+ 0, 326, 0, 200, 297, 299, 301, 302, 303, 314,
+ 316, 319, 320, 321, 322, 323, 327, 328, 0, 0,
+ 298, 300, 304, 305, 306, 307, 308, 310, 317, 332,
+ 331, 318, 0, 0, 0, 324, 325, 329, 330, 65,
+ 0, 0, 63, 91, 0, 66, 427, 0, 25, 64,
+ 0, 0, 221, 0, 0, 67, 0, 353, 354, 352,
+ 359, 361, 358, 360, 355, 356, 357, 362, 0, 0,
+ 0, 89, 0, 0, 0, 0, 0, 21, 22, 0,
+ 0, 23, 0, 0, 0, 0, 0, 90, 0, 0,
+ 0, 0, 75, 76, 77, 78, 79, 80, 81, 82,
+ 83, 84, 85, 86, 87, 88, 0, 0, 0, 13,
+ 0, 0, 16, 17, 18, 0, 27, 41, 40, 0,
+ 33, 0, 0, 34, 35, 71, 72, 65, 45, 0,
+ 63, 91, 0, 66, 0, 0, 25, 64, 0, 0,
+ 0, 0, 0, 67, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 89,
+ 0, 0, 0, 0, 0, 21, 22, 0, 0, 23,
+ 0, 0, 0, 0, 0, 90, 0, 0, 0, 0,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 0, 0, 0, 13, 0, 0,
+ 16, 17, 18, 0, 27, 41, 40, 0, 33, 0,
+ 0, 34, 35, 71, 72, 65, 0, 0, 63, 91,
+ 0, 66, 0, 0, 25, 64, 0, 0, 0, 0,
+ 0, 67, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 89, 0, 0,
+ 0, 0, 0, 21, 22, 0, 0, 23, 0, 0,
+ 0, 0, 0, 90, 0, 0, 0, 0, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 0, 0, 0, 13, 0, 0, 16, 17,
+ 18, 0, 27, 41, 40, 0, 33, 20, 91, 34,
+ 35, 71, 72, 25, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 21, 22, 0, 0, 23, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 11, 12, 14,
+ 15, 19, 24, 26, 28, 29, 30, 31, 32, 36,
+ 37, 0, 0, 0, 13, 0, 0, 16, 17, 18,
+ 0, 27, 41, 40, 0, 33, 20, 42, 34, 35,
+ 38, 39, 25, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 21, 22, 0, 0, 23, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 11, 12, 14, 15,
+ 19, 24, 26, 28, 29, 30, 31, 32, 36, 37,
+ 123, 0, 0, 13, 0, 0, 16, 17, 18, 0,
+ 27, 41, 40, 0, 33, 0, 0, 34, 35, 38,
+ 39, 123, 0, 0, 0, 0, 0, 103, 105, 106,
+ 0, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116, 0, 117, 118, 122, 104, 0, 0, 103, 105,
+ 106, 0, 107, 108, 109, 0, 111, 112, 113, 114,
+ 115, 116, 382, 117, 118, 122, 104, 0, 0, 65,
+ 0, 123, 119, 121, 120, 189, 65, 0, 0, 64,
+ 0, 381, 189, 0, 0, 0, 64, 0, 0, 0,
+ 0, 0, 0, 119, 121, 120, 0, 0, 103, 105,
+ 106, 89, 107, 108, 0, 0, 111, 112, 89, 114,
+ 115, 116, 180, 117, 118, 122, 104, 90, 0, 65,
+ 0, 0, 0, 0, 90, 189, 65, 188, 187, 64,
+ 0, 0, 279, 0, 188, 187, 64, 123, 0, 0,
+ 0, 0, 0, 119, 121, 120, 0, 0, 0, 0,
+ 0, 89, 0, 0, 0, 206, 207, 0, 89, 0,
+ 0, 0, 206, 207, 103, 105, 0, 90, 0, 0,
+ 0, 0, 0, 0, 90, 114, 115, 188, 187, 117,
+ 118, 122, 104, 0, 188, 187, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 183, 184, 0, 0, 119,
+ 121, 120, 276, 277,
}
var yyPact = [...]int16{
- 55, 269, 806, 806, 657, 12, -1000, -1000, -1000, 267,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 488,
- -1000, 329, -1000, 889, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -4, 27,
- 222, -1000, -1000, 742, -1000, 742, 263, -1000, 172, 165,
- 230, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 426, -1000,
- -1000, 495, -1000, -1000, 345, 326, -1000, -1000, 31, -1000,
- -58, -58, -58, -58, -58, -58, -58, -58, -58, -58,
- -58, -58, -58, -58, -58, -58, 956, -1000, -1000, 176,
- 942, 324, 324, 324, 324, 324, 324, 222, -52, -1000,
- 266, 266, 572, -1000, 870, 717, 126, -13, -1000, 141,
- 139, 324, 494, -1000, -1000, 168, 188, -1000, -1000, 417,
- -1000, 189, -1000, 116, 847, 742, -1000, -46, -63, -1000,
- 742, 742, 742, 742, 742, 742, 742, 742, 742, 742,
- 742, 742, 742, 742, 742, -1000, -1000, -1000, 507, 219,
- 214, 213, -4, -1000, -1000, 324, -1000, 190, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, 101, 101, 276, -1000, -4,
- -1000, 324, 172, 165, 59, 59, -13, -13, -13, -13,
- -1000, -1000, -1000, 487, -1000, -1000, 49, -1000, 889, -1000,
- -1000, -1000, -1000, 739, -1000, 406, -1000, 88, -1000, -1000,
- -1000, -1000, -1000, 48, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, 21, 142, 13, -1000, -1000, -1000, 813, 9, 266,
- 266, 266, 266, 126, 126, 569, 569, 569, 310, 935,
- 569, 569, 310, 126, 126, 569, 126, 9, -1000, 162,
- 160, 158, 324, -13, 108, 107, 324, 717, 94, -1000,
- -1000, -1000, 179, -1000, 167, -1000, -1000, -1000, -1000, -1000,
+ 68, 294, 934, 934, 688, 855, -1000, -1000, -1000, 231,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, 742, 324, -1000, -1000, -1000, -1000,
- -1000, -1000, 53, 53, 20, 53, 155, 155, 201, 150,
- -1000, -1000, 323, 322, 321, 317, 315, 298, 295, 294,
- 292, 290, 281, -1000, -1000, -1000, -1000, -1000, 87, 36,
- 324, 636, -1000, -1000, 643, -1000, 98, -1000, -1000, -1000,
- 402, -1000, 889, 476, -1000, -1000, -1000, 53, -1000, 19,
- 18, 1008, -1000, -1000, -1000, 50, 284, 284, 284, 101,
- 234, 234, 50, 234, 50, -65, -1000, -1000, 233, -1000,
- 324, -1000, -1000, -1000, -1000, -1000, -1000, 53, 53, -1000,
- -1000, -1000, 53, -1000, -1000, -1000, -1000, -1000, -1000, 284,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 324,
- 403, -1000, -1000, -1000, 217, -1000, 174, -1000, 313, -1000,
- -1000, -1000, -1000, -1000,
+ -1000, -1000, 448, -1000, 340, -1000, 996, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 5, 18, 279, -1000, -1000, 776, -1000, 776, 164,
+ -1000, 228, 215, 288, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 445, -1000, -1000, 460, -1000, -1000, 397, 329, -1000,
+ -1000, 26, -1000, -53, -53, -53, -53, -53, -53, -53,
+ -53, -53, -53, -53, -53, -53, -53, -53, -53, 1120,
+ -1000, -1000, 102, 326, 1077, 1077, 1077, 1077, 1077, 1077,
+ 279, -58, -1000, 196, 196, 600, -1000, 30, 321, 105,
+ -15, -1000, 157, 150, 1077, 400, -1000, -1000, 327, 335,
+ -1000, -1000, 436, -1000, 216, -1000, 214, 516, 776, -1000,
+ -47, -51, -41, -1000, 776, 776, 776, 776, 776, 776,
+ 776, 776, 776, 776, 776, 776, 776, 776, 776, -1000,
+ -1000, -1000, 1127, 272, 268, 264, 5, -1000, -1000, 1077,
+ -1000, 236, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 269,
+ 269, 176, -1000, 5, -1000, 1077, 228, 215, 233, 233,
+ -15, -15, -15, -15, -1000, -1000, -1000, 512, -1000, -1000,
+ 91, -1000, 996, -1000, -1000, -1000, -1000, 402, -1000, 404,
+ -1000, 162, -1000, -1000, -1000, -1000, -1000, 155, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 23, 66, 33, -1000, -1000,
+ -1000, 514, 167, 171, 171, 171, 196, 196, 196, 196,
+ 105, 105, 1133, 1133, 1133, 1067, 1017, 1133, 1133, 1067,
+ 105, 105, 1133, 105, 167, -1000, 212, 209, 195, 1077,
+ -15, 110, 81, 1077, 321, 46, -1000, -1000, -1000, 1070,
+ -1000, 163, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, 776, 1077, -1000, -1000, -1000, -1000,
+ -1000, -1000, 36, 36, 22, 36, 83, 83, 98, 49,
+ -1000, -1000, 366, 364, 360, 353, 338, 334, 331, 305,
+ 303, 301, 299, -1000, 291, -67, -65, -1000, -1000, -1000,
+ -1000, -1000, 42, 34, 1077, 312, -1000, -1000, 240, -1000,
+ 112, -1000, -1000, -1000, 424, -1000, 996, 193, -1000, -1000,
+ -1000, 36, -1000, 19, 17, 599, -1000, -1000, -1000, 77,
+ 289, 289, 289, 269, 217, 217, 77, 217, 77, -71,
+ 32, 229, 171, 171, -1000, -1000, 53, -1000, 1077, -1000,
+ -1000, -1000, -1000, -1000, -1000, 36, 36, -1000, -1000, -1000,
+ 36, -1000, -1000, -1000, -1000, -1000, -1000, 289, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 29, -1000,
+ -1000, 1077, 180, -1000, -1000, -1000, 336, -1000, -1000, 147,
+ -1000, 44, -1000, -1000, -1000, -1000, -1000,
}
var yyPgo = [...]int16{
- 0, 478, 13, 463, 6, 15, 462, 371, 22, 458,
- 9, 457, 14, 292, 378, 454, 16, 448, 19, 12,
- 447, 443, 7, 439, 4, 5, 436, 3, 2, 10,
- 435, 21, 1, 434, 433, 26, 204, 432, 422, 88,
- 409, 407, 28, 406, 41, 405, 11, 403, 402, 387,
- 385, 384, 379, 376, 373, 340, 0, 358, 8, 357,
- 350, 342,
+ 0, 539, 12, 536, 7, 16, 533, 431, 22, 529,
+ 10, 527, 24, 351, 380, 526, 15, 523, 19, 14,
+ 522, 516, 8, 515, 4, 5, 501, 3, 6, 13,
+ 500, 26, 2, 485, 484, 23, 208, 482, 481, 479,
+ 93, 478, 477, 27, 476, 1, 42, 469, 11, 466,
+ 453, 451, 445, 439, 427, 425, 418, 385, 0, 408,
+ 9, 396, 395, 388,
}
var yyR1 = [...]int8{
- 0, 60, 60, 60, 60, 60, 60, 60, 39, 39,
- 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
- 39, 39, 39, 34, 34, 34, 34, 35, 35, 37,
- 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
- 37, 37, 37, 37, 37, 36, 38, 38, 50, 50,
- 43, 43, 43, 43, 18, 18, 18, 18, 17, 17,
- 17, 4, 4, 4, 40, 42, 42, 41, 41, 41,
- 51, 58, 47, 47, 48, 49, 33, 33, 33, 9,
- 9, 45, 53, 53, 53, 53, 53, 53, 54, 55,
- 55, 55, 44, 44, 44, 1, 1, 1, 2, 2,
- 2, 2, 2, 2, 2, 14, 14, 7, 7, 7,
+ 0, 62, 62, 62, 62, 62, 62, 62, 40, 40,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 40, 40, 40, 34, 34, 34, 34, 35, 35, 38,
+ 38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
+ 38, 38, 38, 38, 38, 36, 39, 39, 52, 52,
+ 44, 44, 44, 44, 37, 37, 37, 37, 37, 37,
+ 18, 18, 18, 18, 17, 17, 17, 4, 4, 4,
+ 45, 45, 41, 43, 43, 42, 42, 42, 53, 60,
+ 49, 49, 50, 51, 33, 33, 33, 9, 9, 47,
+ 55, 55, 55, 55, 55, 55, 56, 57, 57, 57,
+ 46, 46, 46, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 14, 14, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 13, 13, 13, 13, 15,
- 15, 15, 16, 16, 16, 16, 16, 16, 16, 61,
- 21, 21, 21, 21, 20, 20, 20, 20, 20, 20,
- 20, 20, 20, 30, 30, 30, 22, 22, 22, 22,
- 23, 23, 23, 24, 24, 24, 24, 24, 24, 24,
- 24, 24, 24, 24, 25, 25, 26, 26, 26, 11,
- 11, 11, 11, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 13, 13, 13, 13,
+ 15, 15, 15, 16, 16, 16, 16, 16, 16, 16,
+ 63, 21, 21, 21, 21, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 30, 30, 30, 22, 22, 22,
+ 22, 23, 23, 23, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 25, 25, 26, 26, 26,
+ 11, 11, 11, 11, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 8, 8, 5, 5, 5, 5, 46, 46, 29, 29,
- 31, 31, 32, 32, 28, 27, 27, 52, 10, 19,
- 19, 59, 59, 59, 59, 59, 59, 59, 59, 59,
- 59, 12, 12, 56, 56, 56, 56, 56, 56, 56,
- 56, 56, 56, 56, 56, 57,
+ 6, 6, 6, 6, 8, 8, 5, 5, 5, 5,
+ 48, 48, 29, 29, 31, 31, 32, 32, 28, 27,
+ 27, 54, 10, 19, 19, 61, 61, 61, 61, 61,
+ 61, 61, 61, 61, 61, 12, 12, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 59,
}
var yyR2 = [...]int8{
@@ -636,126 +669,131 @@ var yyR2 = [...]int8{
1, 1, 1, 3, 3, 2, 2, 2, 2, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 1, 0, 1, 3, 3,
- 1, 1, 3, 3, 3, 4, 2, 1, 3, 1,
- 2, 1, 1, 1, 2, 3, 2, 3, 1, 2,
- 3, 1, 3, 3, 2, 2, 3, 5, 3, 1,
- 1, 4, 6, 5, 6, 5, 4, 3, 2, 2,
- 1, 1, 3, 4, 2, 3, 1, 2, 3, 3,
- 1, 3, 3, 2, 1, 2, 1, 1, 1, 1,
+ 1, 1, 3, 3, 1, 3, 3, 3, 5, 5,
+ 3, 4, 2, 1, 3, 1, 2, 1, 1, 1,
+ 3, 4, 2, 3, 2, 3, 1, 2, 3, 1,
+ 3, 3, 2, 2, 3, 5, 3, 1, 1, 4,
+ 6, 5, 6, 5, 4, 3, 2, 2, 1, 1,
+ 3, 4, 2, 3, 1, 2, 3, 3, 1, 3,
+ 3, 2, 1, 2, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 3, 4, 2, 0, 3,
- 1, 2, 3, 3, 1, 3, 3, 2, 1, 2,
- 0, 3, 2, 1, 1, 3, 1, 3, 4, 1,
- 3, 5, 5, 1, 1, 1, 4, 3, 3, 2,
- 3, 1, 2, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 4, 3, 3, 1, 2, 1,
+ 1, 1, 1, 1, 1, 1, 3, 4, 2, 0,
+ 3, 1, 2, 3, 3, 1, 3, 3, 2, 1,
+ 2, 0, 3, 2, 1, 1, 3, 1, 3, 4,
+ 1, 3, 5, 5, 1, 1, 1, 4, 3, 3,
+ 2, 3, 1, 2, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 4, 3, 3, 1, 2,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 2, 1, 1, 1, 2, 1, 1, 1, 0,
- 1, 1, 2, 3, 3, 4, 4, 6, 7, 4,
- 1, 1, 1, 1, 2, 3, 3, 3, 3, 3,
- 3, 3, 3, 6, 1, 3,
+ 1, 1, 1, 1, 2, 2, 1, 1, 1, 2,
+ 1, 1, 1, 0, 1, 1, 2, 3, 3, 4,
+ 4, 6, 7, 4, 1, 1, 1, 1, 2, 3,
+ 3, 3, 3, 3, 3, 3, 3, 6, 1, 3,
}
var yyChk = [...]int16{
- -1000, -60, 102, 103, 104, 105, 2, 10, -14, -7,
- -13, 62, 63, 79, 64, 65, 66, 12, 47, 48,
- 51, 67, 18, 68, 83, 69, 70, 71, 72, 73,
- 87, 90, 91, 74, 75, 92, 93, 85, 84, 13,
- -61, -14, 10, -39, -34, -37, -40, -45, -46, -47,
- -48, -49, -51, -52, -53, -54, -55, -33, -56, -3,
- 12, 19, 9, 15, 25, -8, -7, -44, 92, 93,
- -12, -57, 62, 63, 64, 65, 66, 67, 68, 69,
- 70, 71, 72, 73, 74, 75, 41, 57, 13, -55,
- -13, -15, 20, -16, 12, -10, 2, 25, -21, 2,
- 41, 59, 42, 43, 45, 46, 47, 48, 49, 50,
- 51, 52, 53, 54, 56, 57, 83, 85, 84, 58,
- 14, 41, 57, 53, 42, 52, 56, -35, -42, 2,
- 79, 87, 15, -42, -39, -56, -39, -56, -44, 15,
- 15, 15, -1, 20, -2, 12, -10, 2, 20, 7,
- 2, 4, 2, 4, 24, -36, -43, -38, -50, 78,
- -36, -36, -36, -36, -36, -36, -36, -36, -36, -36,
- -36, -36, -36, -36, -36, -59, 2, -46, -8, 92,
- 93, -12, -56, 68, 67, 15, -32, -9, 2, -29,
- -31, 90, 91, 19, 9, 41, 57, -58, 2, -56,
- -46, -8, 92, 93, -56, -56, -56, -56, -56, -56,
- -42, -35, -18, 15, 2, -18, -41, 22, -39, 22,
- 22, 22, 22, -56, 20, 7, 2, -5, 2, 4,
- 54, 44, 55, -5, 20, -16, 25, 2, 25, 2,
- -20, 5, -30, -22, 12, -29, -31, 16, -39, 82,
- 86, 80, 81, -39, -39, -39, -39, -39, -39, -39,
- -39, -39, -39, -39, -39, -39, -39, -39, -46, 92,
- 93, -12, 15, -56, 15, 15, 15, -56, 15, -29,
- -29, 21, 6, 2, -17, 22, -4, -6, 25, 2,
- 62, 78, 63, 79, 64, 65, 66, 80, 81, 12,
- 82, 47, 48, 51, 67, 18, 68, 83, 86, 69,
- 70, 71, 72, 73, 90, 91, 59, 74, 75, 92,
- 93, 85, 84, 22, 7, 7, 20, -2, 25, 2,
+ -1000, -62, 105, 106, 107, 108, 2, 10, -14, -7,
+ -13, 62, 63, 79, 64, 65, 82, 83, 84, 66,
+ 12, 47, 48, 51, 67, 18, 68, 86, 69, 70,
+ 71, 72, 73, 90, 93, 94, 74, 75, 95, 96,
+ 88, 87, 13, -63, -14, 10, -40, -34, -38, -41,
+ -47, -48, -49, -50, -51, -53, -54, -55, -56, -57,
+ -33, -58, -3, 12, 19, 9, 15, 25, -8, -7,
+ -46, 95, 96, -12, -59, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74, 75, 41,
+ 57, 13, -57, -13, -15, 20, -16, 12, -10, 2,
+ 25, -21, 2, 41, 59, 42, 43, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 56, 57, 86,
+ 88, 87, 58, 14, 41, 57, 53, 42, 52, 56,
+ -35, -43, 2, 79, 90, 15, -43, -40, -58, -40,
+ -58, -46, 15, 15, 15, -1, 20, -2, 12, -10,
+ 2, 20, 7, 2, 4, 2, 4, 24, -36, -37,
+ -44, -39, -52, 78, -36, -36, -36, -36, -36, -36,
+ -36, -36, -36, -36, -36, -36, -36, -36, -36, -61,
+ 2, -48, -8, 95, 96, -12, -58, 68, 67, 15,
+ -32, -9, 2, -29, -31, 93, 94, 19, 9, 41,
+ 57, -60, 2, -58, -48, -8, 95, 96, -58, -58,
+ -58, -58, -58, -58, -43, -35, -18, 15, 2, -18,
+ -42, 22, -40, 22, 22, 22, 22, -58, 20, 7,
+ 2, -5, 2, 4, 54, 44, 55, -5, 20, -16,
+ 25, 2, 25, 2, -20, 5, -30, -22, 12, -29,
+ -31, 16, -40, 82, 83, 84, 85, 89, 80, 81,
+ -40, -40, -40, -40, -40, -40, -40, -40, -40, -40,
+ -40, -40, -40, -40, -40, -48, 95, 96, -12, 15,
+ -58, 15, 15, 15, -58, 15, -29, -29, 21, 6,
+ 2, -17, 22, -4, -6, 25, 2, 62, 78, 63,
+ 79, 64, 65, 66, 80, 81, 82, 83, 84, 12,
+ 85, 47, 48, 51, 67, 18, 68, 86, 89, 69,
+ 70, 71, 72, 73, 93, 94, 59, 74, 75, 95,
+ 96, 88, 87, 22, 7, 7, 20, -2, 25, 2,
25, 2, 26, 26, -31, 26, 41, 57, -23, 24,
17, -24, 30, 28, 29, 35, 36, 37, 33, 31,
- 34, 32, 38, -18, -18, -19, -18, -19, 15, 15,
- 15, -56, 22, 22, -56, 22, -58, 21, 2, 22,
- 7, 2, -39, -56, -28, 19, -28, 26, -28, -22,
- -22, 24, 17, 2, 17, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6, 22, 22, -56, 22,
- 7, 21, 2, 22, -4, 22, -28, 26, 26, 17,
- -24, -27, 57, -28, -32, -32, -32, -29, -25, 14,
- -25, -27, -25, -27, -11, 96, 97, 98, 99, 7,
- -56, -28, -28, -28, -26, -32, -56, 22, 24, 21,
- 2, 22, 21, -32,
+ 34, 32, 38, -45, 15, -45, -45, -18, -18, -19,
+ -18, -19, 15, 15, 15, -58, 22, 22, -58, 22,
+ -60, 21, 2, 22, 7, 2, -40, -58, -28, 19,
+ -28, 26, -28, -22, -22, 24, 17, 2, 17, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ -48, -8, 84, 83, 22, 22, -58, 22, 7, 21,
+ 2, 22, -4, 22, -28, 26, 26, 17, -24, -27,
+ 57, -28, -32, -32, -32, -29, -25, 14, -25, -27,
+ -25, -27, -11, 99, 100, 101, 102, 22, -48, -45,
+ -45, 7, -58, -28, -28, -28, -26, -32, 22, -58,
+ 22, 24, 21, 2, 22, 21, -32,
}
var yyDef = [...]int16{
- 0, -2, 138, 138, 0, 0, 7, 6, 1, 138,
- 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
- 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
- 126, 127, 128, 129, 130, 131, 132, 133, 134, 0,
- 2, -2, 3, 4, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 0,
- 113, 246, 247, 0, 257, 0, 90, 91, 131, 132,
- 0, 284, -2, -2, -2, -2, -2, -2, -2, -2,
- -2, -2, -2, -2, -2, -2, 240, 241, 0, 5,
- 105, 0, 137, 140, 0, 144, 148, 258, 149, 153,
- 46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
- 46, 46, 46, 46, 46, 46, 0, 74, 75, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 25, 26,
- 0, 0, 0, 64, 0, 22, 88, -2, 89, 0,
- 0, 0, 0, 94, 96, 0, 100, 104, 135, 0,
- 141, 0, 147, 0, 152, 0, 45, 50, 51, 47,
+ 0, -2, 149, 149, 0, 0, 7, 6, 1, 149,
+ 114, 115, 116, 117, 118, 119, 120, 121, 122, 123,
+ 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
+ 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 0, 2, -2, 3, 4, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 0, 124, 260, 261, 0, 271, 0, 98,
+ 99, 142, 143, 0, 298, -2, -2, -2, -2, -2,
+ -2, -2, -2, -2, -2, -2, -2, -2, -2, 254,
+ 255, 0, 5, 113, 0, 148, 151, 0, 155, 159,
+ 272, 160, 164, 46, 46, 46, 46, 46, 46, 46,
+ 46, 46, 46, 46, 46, 46, 46, 46, 46, 0,
+ 82, 83, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 25, 26, 0, 0, 0, 72, 0, 22, 96,
+ -2, 97, 0, 0, 0, 0, 102, 104, 0, 108,
+ 112, 146, 0, 152, 0, 158, 0, 163, 0, 45,
+ 54, 50, 51, 47, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 80,
+ 81, 275, 0, 0, 0, 0, 284, 285, 286, 0,
+ 84, 0, 86, 266, 267, 87, 88, 262, 263, 0,
+ 0, 0, 95, 79, 287, 0, 0, 0, 289, 290,
+ 291, 292, 293, 294, 23, 24, 27, 0, 63, 28,
+ 0, 74, 76, 78, 299, 295, 296, 0, 100, 0,
+ 105, 0, 111, 256, 257, 258, 259, 0, 147, 150,
+ 153, 156, 154, 157, 162, 165, 167, 170, 174, 175,
+ 176, 0, 29, 0, 0, 0, 0, 0, -2, -2,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 276, 0, 0, 0, 0,
+ 288, 0, 0, 0, 0, 0, 264, 265, 89, 0,
+ 94, 0, 62, 65, 67, 68, 69, 218, 219, 220,
+ 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 233, 234, 235, 236, 237, 238, 239, 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250,
+ 251, 252, 253, 73, 77, 0, 101, 103, 106, 110,
+ 107, 109, 0, 0, 0, 0, 0, 0, 0, 0,
+ 180, 182, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 55, 0, 56, 57, 48, 49, 52,
+ 274, 53, 0, 0, 0, 0, 277, 278, 0, 85,
+ 0, 91, 93, 60, 0, 66, 75, 0, 166, 268,
+ 168, 0, 171, 0, 0, 0, 178, 183, 179, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 72, 73, 261, 0, 0,
- 0, 0, 270, 271, 272, 0, 76, 0, 78, 252,
- 253, 79, 80, 248, 249, 0, 0, 0, 87, 71,
- 273, 0, 0, 0, 275, 276, 277, 278, 279, 280,
- 23, 24, 27, 0, 57, 28, 0, 66, 68, 70,
- 285, 281, 282, 0, 92, 0, 97, 0, 103, 242,
- 243, 244, 245, 0, 136, 139, 142, 145, 143, 146,
- 151, 154, 156, 159, 163, 164, 165, 0, 29, 0,
- 0, -2, -2, 30, 31, 32, 33, 34, 35, 36,
- 37, 38, 39, 40, 41, 42, 43, 44, 262, 0,
- 0, 0, 0, 274, 0, 0, 0, 0, 0, 250,
- 251, 81, 0, 86, 0, 56, 59, 61, 62, 63,
- 207, 208, 209, 210, 211, 212, 213, 214, 215, 216,
- 217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
- 227, 228, 229, 230, 231, 232, 233, 234, 235, 236,
- 237, 238, 239, 65, 69, 0, 93, 95, 98, 102,
- 99, 101, 0, 0, 0, 0, 0, 0, 0, 0,
- 169, 171, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 48, 49, 52, 260, 53, 0, 0,
- 0, 0, 263, 264, 0, 77, 0, 83, 85, 54,
- 0, 60, 67, 0, 155, 254, 157, 0, 160, 0,
- 0, 0, 167, 172, 168, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 265, 266, 0, 269,
- 0, 82, 84, 55, 58, 283, 158, 0, 0, 166,
- 170, 173, 0, 256, 174, 175, 176, 177, 178, 0,
- 179, 180, 181, 182, 183, 189, 190, 191, 192, 0,
- 0, 161, 162, 255, 0, 187, 0, 267, 0, 185,
- 188, 268, 184, 186,
+ 0, 0, 0, 0, 279, 280, 0, 283, 0, 90,
+ 92, 61, 64, 297, 169, 0, 0, 177, 181, 184,
+ 0, 270, 185, 186, 187, 188, 189, 0, 190, 191,
+ 192, 193, 194, 200, 201, 202, 203, 70, 0, 58,
+ 59, 0, 0, 172, 173, 269, 0, 198, 71, 0,
+ 281, 0, 196, 199, 282, 195, 197,
}
var yyTok1 = [...]int8{
@@ -773,7 +811,7 @@ var yyTok2 = [...]int8{
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
- 102, 103, 104, 105, 106,
+ 102, 103, 104, 105, 106, 107, 108, 109,
}
var yyTok3 = [...]int8{
@@ -1298,44 +1336,83 @@ yydefault:
yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardOneToMany
yyVAL.node.(*BinaryExpr).VectorMatching.Include = yyDollar[3].strings
}
- case 54:
+ case 55:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ {
+ yyVAL.node = yyDollar[1].node
+ fill := yyDollar[3].node.(*NumberLiteral).Val
+ yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill
+ yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill
+ }
+ case 56:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ {
+ yyVAL.node = yyDollar[1].node
+ fill := yyDollar[3].node.(*NumberLiteral).Val
+ yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill
+ }
+ case 57:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ {
+ yyVAL.node = yyDollar[1].node
+ fill := yyDollar[3].node.(*NumberLiteral).Val
+ yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill
+ }
+ case 58:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ {
+ yyVAL.node = yyDollar[1].node
+ fill_left := yyDollar[3].node.(*NumberLiteral).Val
+ fill_right := yyDollar[5].node.(*NumberLiteral).Val
+ yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill_left
+ yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill_right
+ }
+ case 59:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ {
+ fill_right := yyDollar[3].node.(*NumberLiteral).Val
+ fill_left := yyDollar[5].node.(*NumberLiteral).Val
+ yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill_left
+ yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill_right
+ }
+ case 60:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.strings = yyDollar[2].strings
}
- case 55:
+ case 61:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.strings = yyDollar[2].strings
}
- case 56:
+ case 62:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.strings = []string{}
}
- case 57:
+ case 63:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("grouping opts", "\"(\"")
yyVAL.strings = nil
}
- case 58:
+ case 64:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.strings = append(yyDollar[1].strings, yyDollar[3].item.Val)
}
- case 59:
+ case 65:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.strings = []string{yyDollar[1].item.Val}
}
- case 60:
+ case 66:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("grouping opts", "\",\" or \")\"")
yyVAL.strings = yyDollar[1].strings
}
- case 61:
+ case 67:
yyDollar = yyS[yypt-1 : yypt+1]
{
if !model.UTF8Validation.IsValidLabelName(yyDollar[1].item.Val) {
@@ -1343,7 +1420,7 @@ yydefault:
}
yyVAL.item = yyDollar[1].item
}
- case 62:
+ case 68:
yyDollar = yyS[yypt-1 : yypt+1]
{
unquoted := yylex.(*parser).unquoteString(yyDollar[1].item.Val)
@@ -1354,20 +1431,35 @@ yydefault:
yyVAL.item.Pos++
yyVAL.item.Val = unquoted
}
- case 63:
+ case 69:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("grouping opts", "label")
yyVAL.item = Item{}
}
- case 64:
+ case 70:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ {
+ yyVAL.node = yyDollar[2].node.(*NumberLiteral)
+ }
+ case 71:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ {
+ nl := yyDollar[3].node.(*NumberLiteral)
+ if yyDollar[2].item.Typ == SUB {
+ nl.Val *= -1
+ }
+ nl.PosRange.Start = yyDollar[2].item.Pos
+ yyVAL.node = nl
+ }
+ case 72:
yyDollar = yyS[yypt-2 : yypt+1]
{
fn, exist := getFunction(yyDollar[1].item.Val, yylex.(*parser).functions)
if !exist {
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "unknown function with name %q", yyDollar[1].item.Val)
}
- if fn != nil && fn.Experimental && !EnableExperimentalFunctions {
+ if fn != nil && fn.Experimental && !yylex.(*parser).options.EnableExperimentalFunctions {
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "function %q is not enabled", yyDollar[1].item.Val)
}
yyVAL.node = &Call{
@@ -1379,38 +1471,38 @@ yydefault:
},
}
}
- case 65:
+ case 73:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = yyDollar[2].node
}
- case 66:
+ case 74:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.node = Expressions{}
}
- case 67:
+ case 75:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = append(yyDollar[1].node.(Expressions), yyDollar[3].node.(Expr))
}
- case 68:
+ case 76:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.node = Expressions{yyDollar[1].node.(Expr)}
}
- case 69:
+ case 77:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "trailing commas not allowed in function call args")
yyVAL.node = yyDollar[1].node
}
- case 70:
+ case 78:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = &ParenExpr{Expr: yyDollar[2].node.(Expr), PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item)}
}
- case 71:
+ case 79:
yyDollar = yyS[yypt-1 : yypt+1]
{
if numLit, ok := yyDollar[1].node.(*NumberLiteral); ok {
@@ -1424,7 +1516,7 @@ yydefault:
}
yyVAL.node = yyDollar[1].node
}
- case 72:
+ case 80:
yyDollar = yyS[yypt-3 : yypt+1]
{
if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok {
@@ -1435,41 +1527,41 @@ yydefault:
yylex.(*parser).addOffsetExpr(yyDollar[1].node, yyDollar[3].node.(*DurationExpr))
yyVAL.node = yyDollar[1].node
}
- case 73:
+ case 81:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("offset", "number, duration, step(), or range()")
yyVAL.node = yyDollar[1].node
}
- case 74:
+ case 82:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).setAnchored(yyDollar[1].node)
}
- case 75:
+ case 83:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).setSmoothed(yyDollar[1].node)
}
- case 76:
+ case 84:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).setTimestamp(yyDollar[1].node, yyDollar[3].float)
yyVAL.node = yyDollar[1].node
}
- case 77:
+ case 85:
yyDollar = yyS[yypt-5 : yypt+1]
{
yylex.(*parser).setAtModifierPreprocessor(yyDollar[1].node, yyDollar[3].item)
yyVAL.node = yyDollar[1].node
}
- case 78:
+ case 86:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("@", "timestamp")
yyVAL.node = yyDollar[1].node
}
- case 81:
+ case 89:
yyDollar = yyS[yypt-4 : yypt+1]
{
var errMsg string
@@ -1499,7 +1591,7 @@ yydefault:
EndPos: yylex.(*parser).lastClosing,
}
}
- case 82:
+ case 90:
yyDollar = yyS[yypt-6 : yypt+1]
{
var rangeNl time.Duration
@@ -1521,7 +1613,7 @@ yydefault:
EndPos: yyDollar[6].item.Pos + 1,
}
}
- case 83:
+ case 91:
yyDollar = yyS[yypt-5 : yypt+1]
{
var rangeNl time.Duration
@@ -1536,31 +1628,31 @@ yydefault:
EndPos: yyDollar[5].item.Pos + 1,
}
}
- case 84:
+ case 92:
yyDollar = yyS[yypt-6 : yypt+1]
{
yylex.(*parser).unexpected("subquery selector", "\"]\"")
yyVAL.node = yyDollar[1].node
}
- case 85:
+ case 93:
yyDollar = yyS[yypt-5 : yypt+1]
{
yylex.(*parser).unexpected("subquery selector", "number, duration, step(), range(), or \"]\"")
yyVAL.node = yyDollar[1].node
}
- case 86:
+ case 94:
yyDollar = yyS[yypt-4 : yypt+1]
{
yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"")
yyVAL.node = yyDollar[1].node
}
- case 87:
+ case 95:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("subquery or range selector", "number, duration, step(), or range()")
yyVAL.node = yyDollar[1].node
}
- case 88:
+ case 96:
yyDollar = yyS[yypt-2 : yypt+1]
{
if nl, ok := yyDollar[2].node.(*NumberLiteral); ok {
@@ -1573,7 +1665,7 @@ yydefault:
yyVAL.node = &UnaryExpr{Op: yyDollar[1].item.Typ, Expr: yyDollar[2].node.(Expr), StartPos: yyDollar[1].item.Pos}
}
}
- case 89:
+ case 97:
yyDollar = yyS[yypt-2 : yypt+1]
{
vs := yyDollar[2].node.(*VectorSelector)
@@ -1582,7 +1674,7 @@ yydefault:
yylex.(*parser).assembleVectorSelector(vs)
yyVAL.node = vs
}
- case 90:
+ case 98:
yyDollar = yyS[yypt-1 : yypt+1]
{
vs := &VectorSelector{
@@ -1593,14 +1685,14 @@ yydefault:
yylex.(*parser).assembleVectorSelector(vs)
yyVAL.node = vs
}
- case 91:
+ case 99:
yyDollar = yyS[yypt-1 : yypt+1]
{
vs := yyDollar[1].node.(*VectorSelector)
yylex.(*parser).assembleVectorSelector(vs)
yyVAL.node = vs
}
- case 92:
+ case 100:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = &VectorSelector{
@@ -1608,7 +1700,7 @@ yydefault:
PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item),
}
}
- case 93:
+ case 101:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.node = &VectorSelector{
@@ -1616,7 +1708,7 @@ yydefault:
PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[4].item),
}
}
- case 94:
+ case 102:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.node = &VectorSelector{
@@ -1624,7 +1716,7 @@ yydefault:
PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[2].item),
}
}
- case 95:
+ case 103:
yyDollar = yyS[yypt-3 : yypt+1]
{
if yyDollar[1].matchers != nil {
@@ -1633,144 +1725,144 @@ yydefault:
yyVAL.matchers = yyDollar[1].matchers
}
}
- case 96:
+ case 104:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher}
}
- case 97:
+ case 105:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label matching", "\",\" or \"}\"")
yyVAL.matchers = yyDollar[1].matchers
}
- case 98:
+ case 106:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item)
}
- case 99:
+ case 107:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item)
}
- case 100:
+ case 108:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.matcher = yylex.(*parser).newMetricNameMatcher(yyDollar[1].item)
}
- case 101:
+ case 109:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("label matching", "string")
yyVAL.matcher = nil
}
- case 102:
+ case 110:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("label matching", "string")
yyVAL.matcher = nil
}
- case 103:
+ case 111:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label matching", "label matching operator")
yyVAL.matcher = nil
}
- case 104:
+ case 112:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("label matching", "identifier or \"}\"")
yyVAL.matcher = nil
}
- case 105:
+ case 113:
yyDollar = yyS[yypt-2 : yypt+1]
{
b := labels.NewBuilder(yyDollar[2].labels)
b.Set(labels.MetricName, yyDollar[1].item.Val)
yyVAL.labels = b.Labels()
}
- case 106:
+ case 114:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.labels = yyDollar[1].labels
}
- case 135:
+ case 146:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.labels = labels.New(yyDollar[2].lblList...)
}
- case 136:
+ case 147:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.labels = labels.New(yyDollar[2].lblList...)
}
- case 137:
+ case 148:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.labels = labels.New()
}
- case 138:
+ case 149:
yyDollar = yyS[yypt-0 : yypt+1]
{
yyVAL.labels = labels.New()
}
- case 139:
+ case 150:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label)
}
- case 140:
+ case 151:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.lblList = []labels.Label{yyDollar[1].label}
}
- case 141:
+ case 152:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label set", "\",\" or \"}\"")
yyVAL.lblList = yyDollar[1].lblList
}
- case 142:
+ case 153:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)}
}
- case 143:
+ case 154:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)}
}
- case 144:
+ case 155:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.label = labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val}
}
- case 145:
+ case 156:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("label set", "string")
yyVAL.label = labels.Label{}
}
- case 146:
+ case 157:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("label set", "string")
yyVAL.label = labels.Label{}
}
- case 147:
+ case 158:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label set", "\"=\"")
yyVAL.label = labels.Label{}
}
- case 148:
+ case 159:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("label set", "identifier or \"}\"")
yyVAL.label = labels.Label{}
}
- case 149:
+ case 160:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).generatedParserResult = &seriesDescription{
@@ -1778,33 +1870,33 @@ yydefault:
values: yyDollar[2].series,
}
}
- case 150:
+ case 161:
yyDollar = yyS[yypt-0 : yypt+1]
{
yyVAL.series = []SequenceValue{}
}
- case 151:
+ case 162:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...)
}
- case 152:
+ case 163:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.series = yyDollar[1].series
}
- case 153:
+ case 164:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("series values", "")
yyVAL.series = nil
}
- case 154:
+ case 165:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.series = []SequenceValue{{Omitted: true}}
}
- case 155:
+ case 166:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1812,12 +1904,12 @@ yydefault:
yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true})
}
}
- case 156:
+ case 167:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}}
}
- case 157:
+ case 168:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1826,7 +1918,7 @@ yydefault:
yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float})
}
}
- case 158:
+ case 169:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1836,22 +1928,23 @@ yydefault:
yyDollar[1].float += yyDollar[2].float
}
}
- case 159:
+ case 170:
yyDollar = yyS[yypt-1 : yypt+1]
{
- yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}}
+ yyVAL.series = []SequenceValue{yylex.(*parser).newHistogramSequenceValue(yyDollar[1].histogram)}
}
- case 160:
+ case 171:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = []SequenceValue{}
// Add an additional value for time 0, which we ignore in tests.
+ sv := yylex.(*parser).newHistogramSequenceValue(yyDollar[1].histogram)
for i := uint64(0); i <= yyDollar[3].uint; i++ {
- yyVAL.series = append(yyVAL.series, SequenceValue{Histogram: yyDollar[1].histogram})
+ yyVAL.series = append(yyVAL.series, sv)
//$1 += $2
}
}
- case 161:
+ case 172:
yyDollar = yyS[yypt-5 : yypt+1]
{
val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint)
@@ -1860,7 +1953,7 @@ yydefault:
}
yyVAL.series = val
}
- case 162:
+ case 173:
yyDollar = yyS[yypt-5 : yypt+1]
{
val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint)
@@ -1869,7 +1962,7 @@ yydefault:
}
yyVAL.series = val
}
- case 163:
+ case 174:
yyDollar = yyS[yypt-1 : yypt+1]
{
if yyDollar[1].item.Val != "stale" {
@@ -1877,130 +1970,130 @@ yydefault:
}
yyVAL.float = math.Float64frombits(value.StaleNaN)
}
- case 166:
+ case 177:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors)
}
- case 167:
+ case 178:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors)
}
- case 168:
+ case 179:
yyDollar = yyS[yypt-3 : yypt+1]
{
m := yylex.(*parser).newMap()
yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m)
}
- case 169:
+ case 180:
yyDollar = yyS[yypt-2 : yypt+1]
{
m := yylex.(*parser).newMap()
yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m)
}
- case 170:
+ case 181:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors))
}
- case 171:
+ case 182:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.descriptors = yyDollar[1].descriptors
}
- case 172:
+ case 183:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]")
}
- case 173:
+ case 184:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["schema"] = yyDollar[3].int
}
- case 174:
+ case 185:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["sum"] = yyDollar[3].float
}
- case 175:
+ case 186:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["count"] = yyDollar[3].float
}
- case 176:
+ case 187:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["z_bucket"] = yyDollar[3].float
}
- case 177:
+ case 188:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float
}
- case 178:
+ case 189:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set
}
- case 179:
+ case 190:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set
}
- case 180:
+ case 191:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["offset"] = yyDollar[3].int
}
- case 181:
+ case 192:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set
}
- case 182:
+ case 193:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["n_offset"] = yyDollar[3].int
}
- case 183:
+ case 194:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item
}
- case 184:
+ case 195:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.bucket_set = yyDollar[2].bucket_set
}
- case 185:
+ case 196:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.bucket_set = yyDollar[2].bucket_set
}
- case 186:
+ case 197:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
}
- case 187:
+ case 198:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.bucket_set = []float64{yyDollar[1].float}
}
- case 246:
+ case 260:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.node = &NumberLiteral{
@@ -2008,7 +2101,7 @@ yydefault:
PosRange: yyDollar[1].item.PositionRange(),
}
}
- case 247:
+ case 261:
yyDollar = yyS[yypt-1 : yypt+1]
{
var err error
@@ -2023,12 +2116,12 @@ yydefault:
Duration: true,
}
}
- case 248:
+ case 262:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val)
}
- case 249:
+ case 263:
yyDollar = yyS[yypt-1 : yypt+1]
{
var err error
@@ -2039,17 +2132,17 @@ yydefault:
}
yyVAL.float = dur.Seconds()
}
- case 250:
+ case 264:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.float = yyDollar[2].float
}
- case 251:
+ case 265:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.float = -yyDollar[2].float
}
- case 254:
+ case 268:
yyDollar = yyS[yypt-1 : yypt+1]
{
var err error
@@ -2058,17 +2151,17 @@ yydefault:
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err)
}
}
- case 255:
+ case 269:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.int = -int64(yyDollar[2].uint)
}
- case 256:
+ case 270:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.int = int64(yyDollar[1].uint)
}
- case 257:
+ case 271:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.node = &StringLiteral{
@@ -2076,7 +2169,7 @@ yydefault:
PosRange: yyDollar[1].item.PositionRange(),
}
}
- case 258:
+ case 272:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.item = Item{
@@ -2085,12 +2178,12 @@ yydefault:
Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val),
}
}
- case 259:
+ case 273:
yyDollar = yyS[yypt-0 : yypt+1]
{
yyVAL.strings = nil
}
- case 261:
+ case 275:
yyDollar = yyS[yypt-1 : yypt+1]
{
nl := yyDollar[1].node.(*NumberLiteral)
@@ -2101,7 +2194,7 @@ yydefault:
}
yyVAL.node = nl
}
- case 262:
+ case 276:
yyDollar = yyS[yypt-2 : yypt+1]
{
nl := yyDollar[2].node.(*NumberLiteral)
@@ -2116,7 +2209,7 @@ yydefault:
nl.PosRange.Start = yyDollar[1].item.Pos
yyVAL.node = nl
}
- case 263:
+ case 277:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2125,7 +2218,7 @@ yydefault:
EndPos: yyDollar[3].item.PositionRange().End,
}
}
- case 264:
+ case 278:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2134,7 +2227,7 @@ yydefault:
EndPos: yyDollar[3].item.PositionRange().End,
}
}
- case 265:
+ case 279:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2147,7 +2240,7 @@ yydefault:
StartPos: yyDollar[1].item.Pos,
}
}
- case 266:
+ case 280:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2160,7 +2253,7 @@ yydefault:
StartPos: yyDollar[1].item.Pos,
}
}
- case 267:
+ case 281:
yyDollar = yyS[yypt-6 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2171,7 +2264,7 @@ yydefault:
RHS: yyDollar[5].node.(Expr),
}
}
- case 268:
+ case 282:
yyDollar = yyS[yypt-7 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2187,7 +2280,7 @@ yydefault:
},
}
}
- case 269:
+ case 283:
yyDollar = yyS[yypt-4 : yypt+1]
{
de := yyDollar[3].node.(*DurationExpr)
@@ -2202,7 +2295,7 @@ yydefault:
}
yyVAL.node = yyDollar[3].node
}
- case 273:
+ case 287:
yyDollar = yyS[yypt-1 : yypt+1]
{
nl := yyDollar[1].node.(*NumberLiteral)
@@ -2213,7 +2306,7 @@ yydefault:
}
yyVAL.node = nl
}
- case 274:
+ case 288:
yyDollar = yyS[yypt-2 : yypt+1]
{
switch expr := yyDollar[2].node.(type) {
@@ -2246,25 +2339,25 @@ yydefault:
break
}
}
- case 275:
+ case 289:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
yyVAL.node = &DurationExpr{Op: ADD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 276:
+ case 290:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
yyVAL.node = &DurationExpr{Op: SUB, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 277:
+ case 291:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
yyVAL.node = &DurationExpr{Op: MUL, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 278:
+ case 292:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
@@ -2275,7 +2368,7 @@ yydefault:
}
yyVAL.node = &DurationExpr{Op: DIV, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 279:
+ case 293:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
@@ -2286,13 +2379,13 @@ yydefault:
}
yyVAL.node = &DurationExpr{Op: MOD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 280:
+ case 294:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
yyVAL.node = &DurationExpr{Op: POW, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 281:
+ case 295:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2301,7 +2394,7 @@ yydefault:
EndPos: yyDollar[3].item.PositionRange().End,
}
}
- case 282:
+ case 296:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2310,7 +2403,7 @@ yydefault:
EndPos: yyDollar[3].item.PositionRange().End,
}
}
- case 283:
+ case 297:
yyDollar = yyS[yypt-6 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2321,7 +2414,7 @@ yydefault:
RHS: yyDollar[5].node.(Expr),
}
}
- case 285:
+ case 299:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[2].node.(Expr))
diff --git a/promql/parser/lex.go b/promql/parser/lex.go
index b3a82dc0c6..7149985767 100644
--- a/promql/parser/lex.go
+++ b/promql/parser/lex.go
@@ -137,6 +137,9 @@ var key = map[string]ItemType{
"ignoring": IGNORING,
"group_left": GROUP_LEFT,
"group_right": GROUP_RIGHT,
+ "fill": FILL,
+ "fill_left": FILL_LEFT,
+ "fill_right": FILL_RIGHT,
"bool": BOOL,
// Preprocessors.
@@ -1083,6 +1086,17 @@ Loop:
word := l.input[l.start:l.pos]
switch kw, ok := key[strings.ToLower(word)]; {
case ok:
+ // For fill/fill_left/fill_right, only treat as keyword if followed by '('
+ // This allows using these as metric names (e.g., "fill + fill").
+ // This could be done for other keywords as well, but for the new fill
+ // modifiers this is especially important so we don't break any existing
+ // queries.
+ if kw == FILL || kw == FILL_LEFT || kw == FILL_RIGHT {
+ if !l.peekFollowedByLeftParen() {
+ l.emit(IDENTIFIER)
+ break Loop
+ }
+ }
l.emit(kw)
case !strings.Contains(word, ":"):
l.emit(IDENTIFIER)
@@ -1098,6 +1112,23 @@ Loop:
return lexStatements
}
+// peekFollowedByLeftParen checks if the next non-whitespace character is '('.
+// This is used for context-sensitive keywords like fill/fill_left/fill_right
+// that should only be treated as keywords when followed by '('.
+func (l *Lexer) peekFollowedByLeftParen() bool {
+ pos := l.pos
+ for {
+ if int(pos) >= len(l.input) {
+ return false
+ }
+ r, w := utf8.DecodeRuneInString(l.input[pos:])
+ if !isSpace(r) {
+ return r == '('
+ }
+ pos += posrange.Pos(w)
+ }
+}
+
func isSpace(r rune) bool {
return r == ' ' || r == '\t' || r == '\n' || r == '\r'
}
diff --git a/promql/parser/parse.go b/promql/parser/parse.go
index 817e0d02d9..ec3e1001d9 100644
--- a/promql/parser/parse.go
+++ b/promql/parser/parse.go
@@ -30,6 +30,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql/parser/posrange"
+ "github.com/prometheus/prometheus/util/features"
"github.com/prometheus/prometheus/util/strutil"
)
@@ -39,15 +40,104 @@ var parserPool = sync.Pool{
},
}
-// ExperimentalDurationExpr is a flag to enable experimental duration expression parsing.
-var ExperimentalDurationExpr bool
-
-// EnableExtendedRangeSelectors is a flag to enable experimental extended range selectors.
-var EnableExtendedRangeSelectors bool
+// Options holds the configuration for the PromQL parser.
+type Options struct {
+ EnableExperimentalFunctions bool
+ ExperimentalDurationExpr bool
+ EnableExtendedRangeSelectors bool
+ EnableBinopFillModifiers bool
+}
+// Parser provides PromQL parsing methods. Create one with NewParser.
type Parser interface {
- ParseExpr() (Expr, error)
- Close()
+ ParseExpr(input string) (Expr, error)
+ ParseMetric(input string) (labels.Labels, error)
+ ParseMetricSelector(input string) ([]*labels.Matcher, error)
+ ParseMetricSelectors(matchers []string) ([][]*labels.Matcher, error)
+ ParseSeriesDesc(input string) (labels.Labels, []SequenceValue, error)
+ RegisterFeatures(r features.Collector)
+}
+
+type promQLParser struct {
+ options Options
+}
+
+// NewParser returns a new PromQL Parser configured with the given options.
+func NewParser(opts Options) Parser {
+ return &promQLParser{options: opts}
+}
+
+func (pql *promQLParser) ParseExpr(input string) (Expr, error) {
+ p := newParser(input, pql.options)
+ defer p.Close()
+ return p.parseExpr()
+}
+
+func (pql *promQLParser) ParseMetric(input string) (m labels.Labels, err error) {
+ p := newParser(input, pql.options)
+ defer p.Close()
+ defer p.recover(&err)
+
+ parseResult := p.parseGenerated(START_METRIC)
+ if parseResult != nil {
+ m = parseResult.(labels.Labels)
+ }
+
+ if len(p.parseErrors) != 0 {
+ err = p.parseErrors
+ }
+
+ return m, err
+}
+
+func (pql *promQLParser) ParseMetricSelector(input string) (m []*labels.Matcher, err error) {
+ p := newParser(input, pql.options)
+ defer p.Close()
+ defer p.recover(&err)
+
+ parseResult := p.parseGenerated(START_METRIC_SELECTOR)
+ if parseResult != nil {
+ m = parseResult.(*VectorSelector).LabelMatchers
+ }
+
+ if len(p.parseErrors) != 0 {
+ err = p.parseErrors
+ }
+
+ return m, err
+}
+
+func (pql *promQLParser) ParseMetricSelectors(matchers []string) ([][]*labels.Matcher, error) {
+ var matcherSets [][]*labels.Matcher
+ for _, s := range matchers {
+ ms, err := pql.ParseMetricSelector(s)
+ if err != nil {
+ return nil, err
+ }
+ matcherSets = append(matcherSets, ms)
+ }
+ return matcherSets, nil
+}
+
+func (pql *promQLParser) ParseSeriesDesc(input string) (lbls labels.Labels, values []SequenceValue, err error) {
+ p := newParser(input, pql.options)
+ p.lex.seriesDesc = true
+
+ defer p.Close()
+ defer p.recover(&err)
+
+ parseResult := p.parseGenerated(START_SERIES_DESCRIPTION)
+ if parseResult != nil {
+ result := parseResult.(*seriesDescription)
+ lbls = result.labels
+ values = result.values
+ }
+
+ if len(p.parseErrors) != 0 {
+ err = p.parseErrors
+ }
+
+ return lbls, values, err
}
type parser struct {
@@ -67,18 +157,17 @@ type parser struct {
generatedParserResult any
parseErrors ParseErrors
+
+ // lastHistogramCounterResetHintSet is set to true when the most recently
+ // built histogram had a counter_reset_hint explicitly specified.
+ // This is used to populate CounterResetHintSet in SequenceValue.
+ lastHistogramCounterResetHintSet bool
+
+ options Options
}
-type Opt func(p *parser)
-
-func WithFunctions(functions map[string]*Function) Opt {
- return func(p *parser) {
- p.functions = functions
- }
-}
-
-// NewParser returns a new parser.
-func NewParser(input string, opts ...Opt) *parser { //nolint:revive // unexported-return
+// newParser returns a new low-level parser instance from the pool.
+func newParser(input string, opts Options) *parser {
p := parserPool.Get().(*parser)
p.functions = Functions
@@ -86,6 +175,7 @@ func NewParser(input string, opts ...Opt) *parser { //nolint:revive // unexporte
p.parseErrors = nil
p.generatedParserResult = nil
p.lastClosing = posrange.Pos(0)
+ p.options = opts
// Clear lexer struct before reusing.
p.lex = Lexer{
@@ -93,15 +183,17 @@ func NewParser(input string, opts ...Opt) *parser { //nolint:revive // unexporte
state: lexStatements,
}
- // Apply user define options.
- for _, opt := range opts {
- opt(p)
- }
-
return p
}
-func (p *parser) ParseExpr() (expr Expr, err error) {
+// newParserWithFunctions returns a new low-level parser instance with custom functions.
+func newParserWithFunctions(input string, opts Options, functions map[string]*Function) *parser {
+ p := newParser(input, opts)
+ p.functions = functions
+ return p
+}
+
+func (p *parser) parseExpr() (expr Expr, err error) {
defer p.recover(&err)
parseResult := p.parseGenerated(START_EXPRESSION)
@@ -171,69 +263,16 @@ func EnrichParseError(err error, enrich func(parseErr *ParseErr)) {
}
}
-// ParseExpr returns the expression parsed from the input.
-func ParseExpr(input string) (expr Expr, err error) {
- p := NewParser(input)
- defer p.Close()
- return p.ParseExpr()
-}
-
-// ParseMetric parses the input into a metric.
-func ParseMetric(input string) (m labels.Labels, err error) {
- p := NewParser(input)
- defer p.Close()
- defer p.recover(&err)
-
- parseResult := p.parseGenerated(START_METRIC)
- if parseResult != nil {
- m = parseResult.(labels.Labels)
- }
-
- if len(p.parseErrors) != 0 {
- err = p.parseErrors
- }
-
- return m, err
-}
-
-// ParseMetricSelector parses the provided textual metric selector into a list of
-// label matchers.
-func ParseMetricSelector(input string) (m []*labels.Matcher, err error) {
- p := NewParser(input)
- defer p.Close()
- defer p.recover(&err)
-
- parseResult := p.parseGenerated(START_METRIC_SELECTOR)
- if parseResult != nil {
- m = parseResult.(*VectorSelector).LabelMatchers
- }
-
- if len(p.parseErrors) != 0 {
- err = p.parseErrors
- }
-
- return m, err
-}
-
-// ParseMetricSelectors parses a list of provided textual metric selectors into lists of
-// label matchers.
-func ParseMetricSelectors(matchers []string) (m [][]*labels.Matcher, err error) {
- var matcherSets [][]*labels.Matcher
- for _, s := range matchers {
- matchers, err := ParseMetricSelector(s)
- if err != nil {
- return nil, err
- }
- matcherSets = append(matcherSets, matchers)
- }
- return matcherSets, nil
-}
-
// SequenceValue is an omittable value in a sequence of time series values.
type SequenceValue struct {
Value float64
Omitted bool
Histogram *histogram.FloatHistogram
+ // CounterResetHintSet is true if the counter reset hint was explicitly
+ // specified in the test file using counter_reset_hint:... syntax.
+ // This allows distinguishing between "no hint specified" (don't care)
+ // vs "counter_reset_hint:unknown" (verify it's unknown).
+ CounterResetHintSet bool
}
func (v SequenceValue) String() string {
@@ -251,30 +290,6 @@ type seriesDescription struct {
values []SequenceValue
}
-// ParseSeriesDesc parses the description of a time series. It is only used in
-// the PromQL testing framework code.
-func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue, err error) {
- p := NewParser(input)
- p.lex.seriesDesc = true
-
- defer p.Close()
- defer p.recover(&err)
-
- parseResult := p.parseGenerated(START_SERIES_DESCRIPTION)
- if parseResult != nil {
- result := parseResult.(*seriesDescription)
-
- labels = result.labels
- values = result.values
- }
-
- if len(p.parseErrors) != 0 {
- err = p.parseErrors
- }
-
- return labels, values, err
-}
-
// addParseErrf formats the error and appends it to the list of parsing errors.
func (p *parser) addParseErrf(positionRange posrange.PositionRange, format string, args ...any) {
p.addParseErr(positionRange, fmt.Errorf(format, args...))
@@ -413,13 +428,18 @@ func (p *parser) InjectItem(typ ItemType) {
p.injecting = true
}
-func (*parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *BinaryExpr {
+func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *BinaryExpr {
ret := modifiers.(*BinaryExpr)
ret.LHS = lhs.(Expr)
ret.RHS = rhs.(Expr)
ret.Op = op.Typ
+ if !p.options.EnableBinopFillModifiers && (ret.VectorMatching.FillValues.LHS != nil || ret.VectorMatching.FillValues.RHS != nil) {
+ p.addParseErrf(ret.PositionRange(), "binop fill modifiers are experimental and not enabled")
+ return ret
+ }
+
return ret
}
@@ -458,7 +478,7 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node, overread bool) (
desiredArgs := 1
if ret.Op.IsAggregatorWithParam() {
- if !EnableExperimentalFunctions && ret.Op.IsExperimentalAggregator() {
+ if !p.options.EnableExperimentalFunctions && ret.Op.IsExperimentalAggregator() {
p.addParseErrf(ret.PositionRange(), "%s() is experimental and must be enabled with --enable-feature=promql-experimental-functions", ret.Op)
return ret
}
@@ -496,25 +516,30 @@ func (p *parser) mergeMaps(left, right *map[string]any) (ret *map[string]any) {
}
func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
- return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
+ // Capture the hint set flag immediately after inc histogram is built.
+ // The base histogram's hint set flag was already captured.
+ hintSet := p.lastHistogramCounterResetHintSet
+ return p.histogramsSeries(base, inc, times, hintSet, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
res, _, _, err := a.Add(b)
return res, err
})
}
func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
- return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
+ // Capture the hint set flag immediately after inc histogram is built.
+ hintSet := p.lastHistogramCounterResetHintSet
+ return p.histogramsSeries(base, inc, times, hintSet, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
res, _, _, err := a.Sub(b)
return res, err
})
}
-func (*parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64,
+func (*parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64, counterResetHintSet bool,
combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) (*histogram.FloatHistogram, error),
) ([]SequenceValue, error) {
ret := make([]SequenceValue, times+1)
// Add an additional value (the base) for time 0, which we ignore in tests.
- ret[0] = SequenceValue{Histogram: base}
+ ret[0] = SequenceValue{Histogram: base, CounterResetHintSet: counterResetHintSet}
cur := base
for i := uint64(1); i <= times; i++ {
if cur.Schema > inc.Schema {
@@ -526,7 +551,7 @@ func (*parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint6
if err != nil {
return ret, err
}
- ret[i] = SequenceValue{Histogram: cur}
+ ret[i] = SequenceValue{Histogram: cur, CounterResetHintSet: counterResetHintSet}
}
return ret, nil
@@ -535,6 +560,8 @@ func (*parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint6
// buildHistogramFromMap is used in the grammar to take then individual parts of the histogram and complete it.
func (p *parser) buildHistogramFromMap(desc *map[string]any) *histogram.FloatHistogram {
output := &histogram.FloatHistogram{}
+ // Reset the flag for each new histogram being built.
+ p.lastHistogramCounterResetHintSet = false
val, ok := (*desc)["schema"]
if ok {
@@ -595,6 +622,8 @@ func (p *parser) buildHistogramFromMap(desc *map[string]any) *histogram.FloatHis
val, ok = (*desc)["counter_reset_hint"]
if ok {
+ // Mark that the counter reset hint was explicitly specified.
+ p.lastHistogramCounterResetHintSet = true
resetHint, ok := val.(Item)
if ok {
@@ -626,6 +655,16 @@ func (p *parser) buildHistogramFromMap(desc *map[string]any) *histogram.FloatHis
return output
}
+// newHistogramSequenceValue creates a SequenceValue for a histogram,
+// setting CounterResetHintSet based on whether counter_reset_hint was
+// explicitly specified in the histogram description.
+func (p *parser) newHistogramSequenceValue(h *histogram.FloatHistogram) SequenceValue {
+ return SequenceValue{
+ Histogram: h,
+ CounterResetHintSet: p.lastHistogramCounterResetHintSet,
+ }
+}
+
func (p *parser) buildHistogramBucketsAndSpans(desc *map[string]any, bucketsKey, offsetKey string,
) (buckets []float64, spans []histogram.Span) {
bucketCount := 0
@@ -768,6 +807,9 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
if len(n.VectorMatching.MatchingLabels) > 0 {
p.addParseErrf(n.PositionRange(), "vector matching only allowed between instant vectors")
}
+ if n.VectorMatching.FillValues.LHS != nil || n.VectorMatching.FillValues.RHS != nil {
+ p.addParseErrf(n.PositionRange(), "filling in missing series only allowed between instant vectors")
+ }
n.VectorMatching = nil
case n.Op.IsSetOperator(): // Both operands are Vectors.
if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne {
@@ -776,6 +818,9 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
if n.VectorMatching.Card != CardManyToMany {
p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many")
}
+ if n.VectorMatching.FillValues.LHS != nil || n.VectorMatching.FillValues.RHS != nil {
+ p.addParseErrf(n.PositionRange(), "filling in missing series not allowed for set operators")
+ }
}
if (lt == ValueTypeScalar || rt == ValueTypeScalar) && n.Op.IsSetOperator() {
@@ -1030,7 +1075,7 @@ func (p *parser) addOffsetExpr(e Node, expr *DurationExpr) {
}
func (p *parser) setAnchored(e Node) {
- if !EnableExtendedRangeSelectors {
+ if !p.options.EnableExtendedRangeSelectors {
p.addParseErrf(e.PositionRange(), "anchored modifier is experimental and not enabled")
return
}
@@ -1053,7 +1098,7 @@ func (p *parser) setAnchored(e Node) {
}
func (p *parser) setSmoothed(e Node) {
- if !EnableExtendedRangeSelectors {
+ if !p.options.EnableExtendedRangeSelectors {
p.addParseErrf(e.PositionRange(), "smoothed modifier is experimental and not enabled")
return
}
@@ -1149,7 +1194,7 @@ func (p *parser) getAtModifierVars(e Node) (**int64, *ItemType, *posrange.Pos, b
}
func (p *parser) experimentalDurationExpr(e Expr) {
- if !ExperimentalDurationExpr {
+ if !p.options.ExperimentalDurationExpr {
p.addParseErrf(e.PositionRange(), "experimental duration expression is not enabled")
}
}
diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go
index ab5564f0ff..f5b2e2dff0 100644
--- a/promql/parser/parse_test.go
+++ b/promql/parser/parse_test.go
@@ -31,6 +31,8 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
+var testParser = NewParser(Options{})
+
func repeatError(query string, err error, start, startStep, end, endStep, count int) (errs ParseErrors) {
for i := range count {
errs = append(errs, ParseErr{
@@ -5297,18 +5299,14 @@ func readable(s string) string {
}
func TestParseExpressions(t *testing.T) {
- // Enable experimental functions testing.
- EnableExperimentalFunctions = true
- // Enable experimental duration expression parsing.
- ExperimentalDurationExpr = true
- t.Cleanup(func() {
- EnableExperimentalFunctions = false
- ExperimentalDurationExpr = false
+ optsParser := NewParser(Options{
+ EnableExperimentalFunctions: true,
+ ExperimentalDurationExpr: true,
})
for _, test := range testExpr {
t.Run(readable(test.input), func(t *testing.T) {
- expr, err := ParseExpr(test.input)
+ expr, err := optsParser.ParseExpr(test.input)
// Unexpected errors are always caused by a bug.
require.NotEqual(t, err, errUnexpected, "unexpected error occurred")
@@ -5436,7 +5434,7 @@ func TestParseSeriesDesc(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
- l, v, err := ParseSeriesDesc(tc.input)
+ l, v, err := testParser.ParseSeriesDesc(tc.input)
if tc.expectError != "" {
require.Contains(t, err.Error(), tc.expectError)
} else {
@@ -5450,7 +5448,7 @@ func TestParseSeriesDesc(t *testing.T) {
// NaN has no equality. Thus, we need a separate test for it.
func TestNaNExpression(t *testing.T) {
- expr, err := ParseExpr("NaN")
+ expr, err := testParser.ParseExpr("NaN")
require.NoError(t, err)
nl, ok := expr.(*NumberLiteral)
@@ -5878,7 +5876,7 @@ func TestParseHistogramSeries(t *testing.T) {
},
} {
t.Run(test.name, func(t *testing.T) {
- _, vals, err := ParseSeriesDesc(test.input)
+ _, vals, err := testParser.ParseSeriesDesc(test.input)
if test.expectedError != "" {
require.EqualError(t, err, test.expectedError)
return
@@ -5950,7 +5948,7 @@ func TestHistogramTestExpression(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
expression := test.input.TestExpression()
require.Equal(t, test.expected, expression)
- _, vals, err := ParseSeriesDesc("{} " + expression)
+ _, vals, err := testParser.ParseSeriesDesc("{} " + expression)
require.NoError(t, err)
require.Len(t, vals, 1)
canonical := vals[0].Histogram
@@ -5962,7 +5960,7 @@ func TestHistogramTestExpression(t *testing.T) {
func TestParseSeries(t *testing.T) {
for _, test := range testSeries {
- metric, vals, err := ParseSeriesDesc(test.input)
+ metric, vals, err := testParser.ParseSeriesDesc(test.input)
// Unexpected errors are always caused by a bug.
require.NotEqual(t, err, errUnexpected, "unexpected error occurred")
@@ -5978,7 +5976,7 @@ func TestParseSeries(t *testing.T) {
}
func TestRecoverParserRuntime(t *testing.T) {
- p := NewParser("foo bar")
+ p := newParser("foo bar", Options{})
var err error
defer func() {
@@ -5991,7 +5989,7 @@ func TestRecoverParserRuntime(t *testing.T) {
}
func TestRecoverParserError(t *testing.T) {
- p := NewParser("foo bar")
+ p := newParser("foo bar", Options{})
var err error
e := errors.New("custom error")
@@ -6026,12 +6024,12 @@ func TestExtractSelectors(t *testing.T) {
[]string{},
},
} {
- expr, err := ParseExpr(tc.input)
+ expr, err := testParser.ParseExpr(tc.input)
require.NoError(t, err)
var expected [][]*labels.Matcher
for _, s := range tc.expected {
- selector, err := ParseMetricSelector(s)
+ selector, err := testParser.ParseMetricSelector(s)
require.NoError(t, err)
expected = append(expected, selector)
}
@@ -6048,11 +6046,37 @@ func TestParseCustomFunctions(t *testing.T) {
ReturnType: ValueTypeVector,
}
input := "custom_func(metric[1m])"
- p := NewParser(input, WithFunctions(funcs))
- expr, err := p.ParseExpr()
+ p := newParserWithFunctions(input, Options{}, funcs)
+ expr, err := p.parseExpr()
require.NoError(t, err)
call, ok := expr.(*Call)
require.True(t, ok)
require.Equal(t, "custom_func", call.Func.Name)
}
+
+func TestNewParser(t *testing.T) {
+ p := NewParser(Options{
+ EnableExperimentalFunctions: true,
+ ExperimentalDurationExpr: true,
+ })
+
+ // ParseExpr should work.
+ expr, err := p.ParseExpr("up")
+ require.NoError(t, err)
+ require.NotNil(t, expr)
+
+ // ParseMetricSelector should work.
+ matchers, err := p.ParseMetricSelector(`{job="prometheus"}`)
+ require.NoError(t, err)
+ require.Len(t, matchers, 1)
+
+ // ParseMetricSelectors should work.
+ matcherSets, err := p.ParseMetricSelectors([]string{`{job="prometheus"}`, `{job="grafana"}`})
+ require.NoError(t, err)
+ require.Len(t, matcherSets, 2)
+
+ // Invalid input should return errors.
+ _, err = p.ParseExpr("===")
+ require.Error(t, err)
+}
diff --git a/promql/parser/prettier_test.go b/promql/parser/prettier_test.go
index 8ba5134d4a..d00bc283ec 100644
--- a/promql/parser/prettier_test.go
+++ b/promql/parser/prettier_test.go
@@ -114,7 +114,7 @@ task:errors:rate10s{job="s"}))`,
},
}
for _, test := range inputs {
- expr, err := ParseExpr(test.in)
+ expr, err := testParser.ParseExpr(test.in)
require.NoError(t, err)
require.Equal(t, test.out, Prettify(expr))
@@ -185,7 +185,7 @@ func TestBinaryExprPretty(t *testing.T) {
}
for _, test := range inputs {
t.Run(test.in, func(t *testing.T) {
- expr, err := ParseExpr(test.in)
+ expr, err := testParser.ParseExpr(test.in)
require.NoError(t, err)
require.Equal(t, test.out, Prettify(expr))
@@ -261,7 +261,7 @@ func TestCallExprPretty(t *testing.T) {
},
}
for _, test := range inputs {
- expr, err := ParseExpr(test.in)
+ expr, err := testParser.ParseExpr(test.in)
require.NoError(t, err)
fmt.Println("=>", expr.String())
@@ -308,7 +308,7 @@ func TestParenExprPretty(t *testing.T) {
},
}
for _, test := range inputs {
- expr, err := ParseExpr(test.in)
+ expr, err := testParser.ParseExpr(test.in)
require.NoError(t, err)
require.Equal(t, test.out, Prettify(expr))
@@ -334,7 +334,7 @@ func TestStepInvariantExpr(t *testing.T) {
},
}
for _, test := range inputs {
- expr, err := ParseExpr(test.in)
+ expr, err := testParser.ParseExpr(test.in)
require.NoError(t, err)
require.Equal(t, test.out, Prettify(expr))
@@ -594,7 +594,7 @@ or
},
}
for _, test := range inputs {
- expr, err := ParseExpr(test.in)
+ expr, err := testParser.ParseExpr(test.in)
require.NoError(t, err)
require.Equal(t, test.out, Prettify(expr))
}
@@ -662,7 +662,7 @@ func TestUnaryPretty(t *testing.T) {
}
for _, test := range inputs {
t.Run(test.in, func(t *testing.T) {
- expr, err := ParseExpr(test.in)
+ expr, err := testParser.ParseExpr(test.in)
require.NoError(t, err)
require.Equal(t, test.out, Prettify(expr))
})
@@ -670,11 +670,7 @@ func TestUnaryPretty(t *testing.T) {
}
func TestDurationExprPretty(t *testing.T) {
- // Enable experimental duration expression parsing.
- ExperimentalDurationExpr = true
- t.Cleanup(func() {
- ExperimentalDurationExpr = false
- })
+ optsParser := NewParser(Options{ExperimentalDurationExpr: true})
maxCharactersPerLine = 10
inputs := []struct {
in, out string
@@ -700,7 +696,7 @@ func TestDurationExprPretty(t *testing.T) {
}
for _, test := range inputs {
t.Run(test.in, func(t *testing.T) {
- expr, err := ParseExpr(test.in)
+ expr, err := optsParser.ParseExpr(test.in)
require.NoError(t, err)
require.Equal(t, test.out, Prettify(expr))
})
diff --git a/promql/parser/printer.go b/promql/parser/printer.go
index 01e2c46c1b..44ca15e532 100644
--- a/promql/parser/printer.go
+++ b/promql/parser/printer.go
@@ -172,6 +172,19 @@ func (node *BinaryExpr) getMatchingStr() string {
b.WriteString(")")
matching += b.String()
}
+
+ if vm.FillValues.LHS != nil || vm.FillValues.RHS != nil {
+ if vm.FillValues.LHS == vm.FillValues.RHS {
+ matching += fmt.Sprintf(" fill (%v)", *vm.FillValues.LHS)
+ } else {
+ if vm.FillValues.LHS != nil {
+ matching += fmt.Sprintf(" fill_left (%v)", *vm.FillValues.LHS)
+ }
+ if vm.FillValues.RHS != nil {
+ matching += fmt.Sprintf(" fill_right (%v)", *vm.FillValues.RHS)
+ }
+ }
+ }
}
return matching
}
diff --git a/promql/parser/printer_test.go b/promql/parser/printer_test.go
index 4499fa7860..eae91d4f88 100644
--- a/promql/parser/printer_test.go
+++ b/promql/parser/printer_test.go
@@ -22,9 +22,10 @@ import (
)
func TestExprString(t *testing.T) {
- ExperimentalDurationExpr = true
- t.Cleanup(func() {
- ExperimentalDurationExpr = false
+ optsParser := NewParser(Options{
+ ExperimentalDurationExpr: true,
+ EnableExtendedRangeSelectors: true,
+ EnableBinopFillModifiers: true,
})
// A list of valid expressions that are expected to be
// returned as out when calling String(). If out is empty the output
@@ -113,6 +114,26 @@ func TestExprString(t *testing.T) {
in: `a - ignoring() group_left c`,
out: `a - ignoring () group_left () c`,
},
+ {
+ in: `a + fill(-23) b`,
+ out: `a + fill (-23) b`,
+ },
+ {
+ in: `a + fill_left(-23) b`,
+ out: `a + fill_left (-23) b`,
+ },
+ {
+ in: `a + fill_right(42) b`,
+ out: `a + fill_right (42) b`,
+ },
+ {
+ in: `a + fill_left(-23) fill_right(42) b`,
+ out: `a + fill_left (-23) fill_right (42) b`,
+ },
+ {
+ in: `a + on(b) group_left fill(-23) c`,
+ out: `a + on (b) group_left () fill (-23) c`,
+ },
{
in: `up > bool 0`,
},
@@ -298,14 +319,9 @@ func TestExprString(t *testing.T) {
},
}
- EnableExtendedRangeSelectors = true
- t.Cleanup(func() {
- EnableExtendedRangeSelectors = false
- })
-
for _, test := range inputs {
t.Run(test.in, func(t *testing.T) {
- expr, err := ParseExpr(test.in)
+ expr, err := optsParser.ParseExpr(test.in)
require.NoError(t, err)
exp := test.in
@@ -330,7 +346,7 @@ func BenchmarkExprString(b *testing.B) {
for _, test := range inputs {
b.Run(readable(test), func(b *testing.B) {
- expr, err := ParseExpr(test)
+ expr, err := testParser.ParseExpr(test)
require.NoError(b, err)
for b.Loop() {
_ = expr.String()
@@ -462,7 +478,7 @@ func TestBinaryExprUTF8Labels(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- expr, err := ParseExpr(tc.input)
+ expr, err := testParser.ParseExpr(tc.input)
if err != nil {
t.Fatalf("Failed to parse: %v", err)
}
diff --git a/promql/promql_test.go b/promql/promql_test.go
index fc13f7e64f..01189f6e57 100644
--- a/promql/promql_test.go
+++ b/promql/promql_test.go
@@ -39,20 +39,17 @@ func TestEvaluations(t *testing.T) {
// Run a lot of queries at the same time, to check for race conditions.
func TestConcurrentRangeQueries(t *testing.T) {
stor := teststorage.New(t)
- defer stor.Close()
+
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 50000000,
Timeout: 100 * time.Second,
+ Parser: parser.NewParser(parser.Options{
+ EnableExperimentalFunctions: true,
+ EnableExtendedRangeSelectors: true,
+ }),
}
- // Enable experimental functions testing
- parser.EnableExperimentalFunctions = true
- parser.EnableExtendedRangeSelectors = true
- t.Cleanup(func() {
- parser.EnableExperimentalFunctions = false
- parser.EnableExtendedRangeSelectors = false
- })
engine := promqltest.NewTestEngineWithOpts(t, opts)
const interval = 10000 // 10s interval.
diff --git a/promql/promqltest/README.md b/promql/promqltest/README.md
index d26c01c6f1..b4efd9c128 100644
--- a/promql/promqltest/README.md
+++ b/promql/promqltest/README.md
@@ -110,6 +110,15 @@ eval range from to step
* ` ""` (optional) for matching a string literal
* `` and `` specify the expected values, and follow the same syntax as for `load` above
+### Special handling of counter reset hints in native histograms
+
+Native histograms as part of `` may or may not contain an explicit
+`counter_reset_hint` property. If a `counter_reset_hint` is provided
+explicitly, the counter reset hint of the histogram is tested to have the
+provided value (`unknown`, `reset`, `not_reset`, or `gauge`). However, if no
+`counter_reset_hint` is specified, the `counter_reset_hint` is not tested at
+all (rather than testing for the usual default value `unknown`).
+
### `expect string`
This can be used to specify that a string literal is the expected result.
diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go
index fc3872d197..a634a194fb 100644
--- a/promql/promqltest/test.go
+++ b/promql/promqltest/test.go
@@ -43,7 +43,6 @@ import (
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/convertnhcb"
"github.com/prometheus/prometheus/util/teststorage"
- "github.com/prometheus/prometheus/util/testutil"
)
var (
@@ -72,7 +71,7 @@ var testStartTime = time.Unix(0, 0).UTC()
// LoadedStorage returns storage with generated data using the provided load statements.
// Non-load statements will cause test errors.
-func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage {
+func LoadedStorage(t testing.TB, input string) *teststorage.TestStorage {
test, err := newTest(t, input, false, newTestStorage)
require.NoError(t, err)
@@ -87,6 +86,14 @@ func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage {
return test.storage.(*teststorage.TestStorage)
}
+// TestParserOpts are the parser options used for all built-in test engines.
+var TestParserOpts = parser.Options{
+ EnableExperimentalFunctions: true,
+ ExperimentalDurationExpr: true,
+ EnableExtendedRangeSelectors: true,
+ EnableBinopFillModifiers: true,
+}
+
// NewTestEngine creates a promql.Engine with enablePerStepStats, lookbackDelta and maxSamples, and returns it.
func NewTestEngine(tb testing.TB, enablePerStepStats bool, lookbackDelta time.Duration, maxSamples int) *promql.Engine {
return NewTestEngineWithOpts(tb, promql.EngineOpts{
@@ -100,6 +107,7 @@ func NewTestEngine(tb testing.TB, enablePerStepStats bool, lookbackDelta time.Du
EnablePerStepStats: enablePerStepStats,
LookbackDelta: lookbackDelta,
EnableDelayedNameRemoval: true,
+ Parser: parser.NewParser(TestParserOpts),
})
}
@@ -152,16 +160,8 @@ func RunBuiltinTests(t TBRun, engine promql.QueryEngine) {
}
// RunBuiltinTestsWithStorage runs an acceptance test suite against the provided engine and storage.
-func RunBuiltinTestsWithStorage(t TBRun, engine promql.QueryEngine, newStorage func(testutil.T) storage.Storage) {
- t.Cleanup(func() {
- parser.EnableExperimentalFunctions = false
- parser.ExperimentalDurationExpr = false
- parser.EnableExtendedRangeSelectors = false
- })
- parser.EnableExperimentalFunctions = true
- parser.ExperimentalDurationExpr = true
- parser.EnableExtendedRangeSelectors = true
-
+// The engine must be created with ParserOptions that enable all experimental features used in the test files.
+func RunBuiltinTestsWithStorage(t TBRun, engine promql.QueryEngine, newStorage func(testing.TB) storage.Storage) {
files, err := fs.Glob(testsFs, "*/*.test")
require.NoError(t, err)
@@ -175,22 +175,22 @@ func RunBuiltinTestsWithStorage(t TBRun, engine promql.QueryEngine, newStorage f
}
// RunTest parses and runs the test against the provided engine.
-func RunTest(t testutil.T, input string, engine promql.QueryEngine) {
+func RunTest(t testing.TB, input string, engine promql.QueryEngine) {
RunTestWithStorage(t, input, engine, newTestStorage)
}
// RunTestWithStorage parses and runs the test against the provided engine and storage.
-func RunTestWithStorage(t testutil.T, input string, engine promql.QueryEngine, newStorage func(testutil.T) storage.Storage) {
+func RunTestWithStorage(t testing.TB, input string, engine promql.QueryEngine, newStorage func(testing.TB) storage.Storage) {
require.NoError(t, runTest(t, input, engine, newStorage, false))
}
// testTest allows tests to be run in "test-the-test" mode (true for
// testingMode). This is a special mode for testing test code execution itself.
-func testTest(t testutil.T, input string, engine promql.QueryEngine) error {
+func testTest(t testing.TB, input string, engine promql.QueryEngine) error {
return runTest(t, input, engine, newTestStorage, true)
}
-func runTest(t testutil.T, input string, engine promql.QueryEngine, newStorage func(testutil.T) storage.Storage, testingMode bool) error {
+func runTest(t testing.TB, input string, engine promql.QueryEngine, newStorage func(testing.TB) storage.Storage, testingMode bool) error {
test, err := newTest(t, input, testingMode, newStorage)
// Why do this before checking err? newTest() can create the test storage and then return an error,
@@ -225,13 +225,14 @@ func runTest(t testutil.T, input string, engine promql.QueryEngine, newStorage f
// test is a sequence of read and write commands that are run
// against a test storage.
type test struct {
- testutil.T
+ testing.TB
+
// testingMode distinguishes between normal execution and test-execution mode.
testingMode bool
cmds []testCommand
- open func(testutil.T) storage.Storage
+ open func(testing.TB) storage.Storage
storage storage.Storage
context context.Context
@@ -239,9 +240,9 @@ type test struct {
}
// newTest returns an initialized empty Test.
-func newTest(t testutil.T, input string, testingMode bool, newStorage func(testutil.T) storage.Storage) (*test, error) {
+func newTest(t testing.TB, input string, testingMode bool, newStorage func(testing.TB) storage.Storage) (*test, error) {
test := &test{
- T: t,
+ TB: t,
cmds: []testCommand{},
testingMode: testingMode,
open: newStorage,
@@ -252,7 +253,7 @@ func newTest(t testutil.T, input string, testingMode bool, newStorage func(testu
return test, err
}
-func newTestStorage(t testutil.T) storage.Storage { return teststorage.New(t) }
+func newTestStorage(t testing.TB) storage.Storage { return teststorage.New(t) }
//go:embed testdata
var testsFs embed.FS
@@ -296,7 +297,8 @@ func parseLoad(lines []string, i int, startTime time.Time) (int, *loadCmd, error
}
func parseSeries(defLine string, line int) (labels.Labels, []parser.SequenceValue, error) {
- metric, vals, err := parser.ParseSeriesDesc(defLine)
+ testParser := parser.NewParser(TestParserOpts)
+ metric, vals, err := testParser.ParseSeriesDesc(defLine)
if err != nil {
parser.EnrichParseError(err, func(parseErr *parser.ParseErr) {
parseErr.LineOffset = line
@@ -425,7 +427,7 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
expr = rangeParts[5]
}
- _, err := parser.ParseExpr(expr)
+ _, err := parserForBuiltinTests.ParseExpr(expr)
if err != nil {
parser.EnrichParseError(err, func(parseErr *parser.ParseErr) {
parseErr.LineOffset = i
@@ -1045,7 +1047,12 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
exp := ev.expected[hash]
var expectedFloats []promql.FPoint
- var expectedHistograms []promql.HPoint
+ // expectedHPoint wraps HPoint with CounterResetHintSet flag from SequenceValue.
+ type expectedHPoint struct {
+ promql.HPoint
+ CounterResetHintSet bool
+ }
+ var expectedHistograms []expectedHPoint
for i, e := range exp.vals {
ts := ev.start.Add(time.Duration(i) * ev.step)
@@ -1057,7 +1064,10 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
t := ts.UnixNano() / int64(time.Millisecond/time.Nanosecond)
if e.Histogram != nil {
- expectedHistograms = append(expectedHistograms, promql.HPoint{T: t, H: e.Histogram})
+ expectedHistograms = append(expectedHistograms, expectedHPoint{
+ HPoint: promql.HPoint{T: t, H: e.Histogram},
+ CounterResetHintSet: e.CounterResetHintSet,
+ })
} else if !e.Omitted {
expectedFloats = append(expectedFloats, promql.FPoint{T: t, F: e.Value})
}
@@ -1086,7 +1096,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
return fmt.Errorf("expected histogram value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
}
- if !compareNativeHistogram(expected.H.Compact(0), actual.H.Compact(0)) {
+ if !compareNativeHistogram(expected.H.Compact(0), actual.H.Compact(0), expected.CounterResetHintSet) {
return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H.TestExpression(), actual.H.TestExpression(), formatSeriesResult(s))
}
}
@@ -1125,7 +1135,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
if expH != nil && v.H == nil {
return fmt.Errorf("expected histogram %s for %s but got float value %v", HistogramTestExpression(expH), v.Metric, v.F)
}
- if expH != nil && !compareNativeHistogram(expH.Compact(0), v.H.Compact(0)) {
+ if expH != nil && !compareNativeHistogram(expH.Compact(0), v.H.Compact(0), exp0.CounterResetHintSet) {
return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H))
}
if !almost.Equal(exp0.Value, v.F, defaultEpsilon) {
@@ -1163,7 +1173,9 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
// compareNativeHistogram is helper function to compare two native histograms
// which can tolerate some differ in the field of float type, such as Count, Sum.
-func compareNativeHistogram(exp, cur *histogram.FloatHistogram) bool {
+// The counterResetHintSet parameter indicates whether the counter reset hint was
+// explicitly specified in the expected histogram (from the test file).
+func compareNativeHistogram(exp, cur *histogram.FloatHistogram, counterResetHintSet bool) bool {
if exp == nil || cur == nil {
return false
}
@@ -1199,6 +1211,15 @@ func compareNativeHistogram(exp, cur *histogram.FloatHistogram) bool {
return false
}
+ // Compare CounterResetHint only if explicitly specified in expected histogram.
+ // When counterResetHintSet is false, no hint was specified, meaning "don't care".
+ // When counterResetHintSet is true, the hint was explicitly specified and must match.
+ if counterResetHintSet {
+ if exp.CounterResetHint != cur.CounterResetHint {
+ return false
+ }
+ }
+
return true
}
@@ -1342,8 +1363,13 @@ type atModifierTestCase struct {
evalTime time.Time
}
+// parserForBuiltinTests is the parser used when parsing expressions in the
+// built-in test framework (e.g. atModifierTestCases). It must match the Parser
+// used by NewTestEngine so that expressions parse consistently.
+var parserForBuiltinTests = parser.NewParser(TestParserOpts)
+
func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCase, error) {
- expr, err := parser.ParseExpr(exprStr)
+ expr, err := parserForBuiltinTests.ParseExpr(exprStr)
if err != nil {
return nil, err
}
@@ -1454,7 +1480,7 @@ func (t *test) execEval(cmd *evalCmd, engine promql.QueryEngine) error {
return do()
}
- if tt, ok := t.T.(*testing.T); ok {
+ if tt, ok := t.TB.(*testing.T); ok {
tt.Run(fmt.Sprintf("line %d/%s", cmd.line, cmd.expr), func(t *testing.T) {
require.NoError(t, do())
})
@@ -1622,12 +1648,12 @@ func assertMatrixSorted(m promql.Matrix) error {
func (t *test) clear() {
if t.storage != nil {
err := t.storage.Close()
- require.NoError(t.T, err, "Unexpected error while closing test storage.")
+ require.NoError(t.TB, err, "Unexpected error while closing test storage.")
}
if t.cancelCtx != nil {
t.cancelCtx()
}
- t.storage = t.open(t.T)
+ t.storage = t.open(t.TB)
t.context, t.cancelCtx = context.WithCancel(context.Background())
}
diff --git a/promql/promqltest/testdata/aggregators.test b/promql/promqltest/testdata/aggregators.test
index 576b36868f..a3dc61dcff 100644
--- a/promql/promqltest/testdata/aggregators.test
+++ b/promql/promqltest/testdata/aggregators.test
@@ -687,6 +687,11 @@ load 10s
eval instant at 1m sum(data{test="ten"})
{} 10
+# Plain addition doesn't use Kahan summation, so operations involving very large magnitudes
+# (±1e+100) lose precision. The smaller values are absorbed, leading to an incorrect result.
+# eval instant at 1m sum(data{test="ten",point="a"}) + sum(data{test="ten",point="b"}) + sum(data{test="ten",point="c"}) + sum(data{test="ten",point="d"})
+# {} 10
+
eval instant at 1m avg(data{test="ten"})
{} 2.5
diff --git a/promql/promqltest/testdata/at_modifier.test b/promql/promqltest/testdata/at_modifier.test
index 4091f7eabf..194c877803 100644
--- a/promql/promqltest/testdata/at_modifier.test
+++ b/promql/promqltest/testdata/at_modifier.test
@@ -215,3 +215,43 @@ eval instant at 0s sum_over_time(timestamp(timestamp(metric{job="1"} @ 999))[10s
clear
+
+# Tests for @ modifier with empty data.
+# Data only at 0s, 10s, 20s. Eval at timestamp with no data.
+load 10s
+ up 1 2 3
+
+# Functions that should return empty results when @ modifier points to timestamp with no data.
+# These were panicking before the fix.
+
+eval instant at 1111111s quantile_over_time(scalar(up) + 1, {__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s predict_linear({__name__="up"}[1h:1m] @ 1111111, 0.1)
+
+eval instant at 1111111s deriv({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s changes({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s resets({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s first_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s last_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s sum_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s avg_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s min_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s max_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s count_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s stddev_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s stdvar_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+eval instant at 1111111s mad_over_time({__name__="up"}[1h:1m] @ 1111111)
+
+clear
diff --git a/promql/promqltest/testdata/extended_vectors.test b/promql/promqltest/testdata/extended_vectors.test
index 8f431dcfd3..0bc1140522 100644
--- a/promql/promqltest/testdata/extended_vectors.test
+++ b/promql/promqltest/testdata/extended_vectors.test
@@ -358,6 +358,14 @@ load 1m
eval instant at 2m15s increase(metric[2m] smoothed)
{} 12
+# Smoothed rate interpolation across a counter reset.
+clear
+load 15s
+ metric 100 10
+
+eval instant at 12s rate(metric[10s] smoothed)
+ {} 0.666666666666667
+
clear
eval instant at 1m deriv(foo[3m] smoothed)
expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with deriv
diff --git a/promql/promqltest/testdata/fill-modifier.test b/promql/promqltest/testdata/fill-modifier.test
new file mode 100644
index 0000000000..079a48cc99
--- /dev/null
+++ b/promql/promqltest/testdata/fill-modifier.test
@@ -0,0 +1,383 @@
+# ==================== fill / fill_left / fill_right modifier tests ====================
+
+# Test data for fill modifier tests: vectors with partial overlap.
+load 5m
+ left_vector{label="a"} 10
+ left_vector{label="b"} 20
+ left_vector{label="c"} 30
+ right_vector{label="a"} 100
+ right_vector{label="b"} 200
+ right_vector{label="d"} 400
+
+# ---------- Arithmetic operators with fill modifiers ----------
+
+# fill(0): Fill both sides with 0 for addition.
+eval instant at 0m left_vector + fill(0) right_vector
+ {label="a"} 110
+ {label="b"} 220
+ {label="c"} 30
+ {label="d"} 400
+
+# fill_left(0): Only fill left side with 0.
+eval instant at 0m left_vector + fill_left(0) right_vector
+ {label="a"} 110
+ {label="b"} 220
+ {label="d"} 400
+
+# fill_right(0): Only fill right side with 0.
+eval instant at 0m left_vector + fill_right(0) right_vector
+ {label="a"} 110
+ {label="b"} 220
+ {label="c"} 30
+
+# fill_left and fill_right with different values.
+eval instant at 0m left_vector + fill_left(5) fill_right(7) right_vector
+ {label="a"} 110
+ {label="b"} 220
+ {label="c"} 37
+ {label="d"} 405
+
+# fill with NaN.
+eval instant at 0m left_vector + fill(NaN) right_vector
+ {label="a"} 110
+ {label="b"} 220
+ {label="c"} NaN
+ {label="d"} NaN
+
+# fill with Inf.
+eval instant at 0m left_vector + fill(Inf) right_vector
+ {label="a"} 110
+ {label="b"} 220
+ {label="c"} +Inf
+ {label="d"} +Inf
+
+# fill with -Inf.
+eval instant at 0m left_vector + fill(-Inf) right_vector
+ {label="a"} 110
+ {label="b"} 220
+ {label="c"} -Inf
+ {label="d"} -Inf
+
+# ---------- Comparison operators with fill modifiers ----------
+
+# fill with equality comparison.
+eval instant at 0m left_vector == fill(30) right_vector
+ left_vector{label="c"} 30
+
+# fill with inequality comparison.
+eval instant at 0m left_vector != fill(30) right_vector
+ left_vector{label="a"} 10
+ left_vector{label="b"} 20
+ {label="d"} 30
+
+# fill with greater than.
+eval instant at 0m left_vector > fill(25) right_vector
+ left_vector{label="c"} 30
+
+# ---------- Comparison operators with bool modifier and fill ----------
+
+# fill with equality comparison and bool.
+eval instant at 0m left_vector == bool fill(30) right_vector
+ {label="a"} 0
+ {label="b"} 0
+ {label="c"} 1
+ {label="d"} 0
+
+# fill with inequality comparison and bool.
+eval instant at 0m left_vector != bool fill(30) right_vector
+ {label="a"} 1
+ {label="b"} 1
+ {label="c"} 0
+ {label="d"} 1
+
+# fill with greater than and bool.
+eval instant at 0m left_vector > bool fill(25) right_vector
+ {label="a"} 0
+ {label="b"} 0
+ {label="c"} 1
+ {label="d"} 0
+
+# ---------- fill with on() and ignoring() modifiers ----------
+
+clear
+
+load 5m
+ left_vector{job="foo", instance="a"} 10
+ left_vector{job="foo", instance="b"} 20
+ left_vector{job="bar", instance="a"} 30
+ right_vector{job="foo", instance="a"} 100
+ right_vector{job="foo", instance="c"} 300
+
+# fill with on().
+eval instant at 0m left_vector + on(job, instance) fill(0) right_vector
+ {job="foo", instance="a"} 110
+ {job="foo", instance="b"} 20
+ {job="bar", instance="a"} 30
+ {job="foo", instance="c"} 300
+
+# fill_right with on().
+eval instant at 0m left_vector + on(job, instance) fill_right(0) right_vector
+ {job="foo", instance="a"} 110
+ {job="foo", instance="b"} 20
+ {job="bar", instance="a"} 30
+
+# fill_left with on().
+eval instant at 0m left_vector + on(job, instance) fill_left(0) right_vector
+ {job="foo", instance="a"} 110
+ {job="foo", instance="c"} 300
+
+# fill with ignoring() - requires group_left since ignoring(job) creates many-to-one matching
+# when two left_vector series have same instance but different jobs.
+eval instant at 0m left_vector + ignoring(job) group_left fill(0) right_vector
+ {instance="a", job="foo"} 110
+ {instance="a", job="bar"} 130
+ {instance="b", job="foo"} 20
+ {instance="c"} 300
+
+# ---------- fill with group_left / group_right (many-to-one / one-to-many) ----------
+
+clear
+
+load 5m
+ requests{method="GET", status="200"} 100
+ requests{method="POST", status="200"} 200
+ requests{method="GET", status="500"} 10
+ requests{method="POST", status="500"} 20
+ limits{status="200"} 1000
+ limits{status="404"} 500
+ limits{status="500"} 50
+
+# group_left with fill_right: fill missing "one" side series.
+eval instant at 0m requests / on(status) group_left fill_right(1) limits
+ {method="GET", status="200"} 0.1
+ {method="POST", status="200"} 0.2
+ {method="GET", status="500"} 0.2
+ {method="POST", status="500"} 0.4
+
+# group_left with fill_left: fill missing "many" side series.
+# For status="404", there's no matching requests, so a single series with the match group's labels is filled
+eval instant at 0m requests + on(status) group_left fill_left(0) limits
+ {method="GET", status="200"} 1100
+ {method="POST", status="200"} 1200
+ {method="GET", status="500"} 60
+ {method="POST", status="500"} 70
+ {status="404"} 500
+
+# group_left with fill on both sides.
+eval instant at 0m requests + on(status) group_left fill(0) limits
+ {method="GET", status="200"} 1100
+ {method="POST", status="200"} 1200
+ {method="GET", status="500"} 60
+ {method="POST", status="500"} 70
+ {status="404"} 500
+
+# group_right with fill_left: fill missing "one" side series.
+clear
+
+load 5m
+ cpu_info{instance="a", cpu="0"} 1
+ cpu_info{instance="a", cpu="1"} 1
+ cpu_info{instance="b", cpu="0"} 1
+ node_meta{instance="a"} 100
+ node_meta{instance="c"} 300
+
+# fill_left fills the "one" side (node_meta) when missing for a "many" side series.
+eval instant at 0m node_meta * on(instance) group_right fill_left(1) cpu_info
+ {instance="a", cpu="0"} 100
+ {instance="a", cpu="1"} 100
+ {instance="c"} 300
+
+# group_right with fill_right: fill missing "many" side series.
+eval instant at 0m node_meta * on(instance) group_right fill_right(0) cpu_info
+ {instance="a", cpu="0"} 100
+ {instance="a", cpu="1"} 100
+ {instance="b", cpu="0"} 0
+
+# group_right with fill on both sides.
+eval instant at 0m node_meta * on(instance) group_right fill(1) cpu_info
+ {instance="a", cpu="0"} 100
+ {instance="a", cpu="1"} 100
+ {instance="b", cpu="0"} 1
+ {instance="c"} 300
+
+# ---------- fill with group_left/group_right and extra labels ----------
+
+clear
+
+load 5m
+ requests{method="GET", status="200"} 100
+ requests{method="POST", status="200"} 200
+ limits{status="200", owner="team-a"} 1000
+ limits{status="500", owner="team-b"} 50
+
+# group_left with extra label and fill_right.
+# Note: when filling the "one" side, the joined label cannot be filled.
+eval instant at 0m requests + on(status) group_left(owner) fill_right(0) limits
+ {method="GET", status="200", owner="team-a"} 1100
+ {method="POST", status="200", owner="team-a"} 1200
+
+# ---------- Edge cases ----------
+
+clear
+
+load 5m
+ only_left{label="a"} 10
+ only_left{label="b"} 20
+ only_right{label="c"} 30
+ only_right{label="d"} 40
+
+# No overlap at all - fill creates all results.
+eval instant at 0m only_left + fill(0) only_right
+ {label="a"} 10
+ {label="b"} 20
+ {label="c"} 30
+ {label="d"} 40
+
+# No overlap - fill_left only creates right side results.
+eval instant at 0m only_left + fill_left(0) only_right
+ {label="c"} 30
+ {label="d"} 40
+
+# No overlap - fill_right only creates left side results.
+eval instant at 0m only_left + fill_right(0) only_right
+ {label="a"} 10
+ {label="b"} 20
+
+# Complete overlap - fill has no effect.
+clear
+
+load 5m
+ complete_left{label="a"} 10
+ complete_left{label="b"} 20
+ complete_right{label="a"} 100
+ complete_right{label="b"} 200
+
+eval instant at 0m complete_left + fill(99) complete_right
+ {label="a"} 110
+ {label="b"} 220
+
+# ---------- fill with range queries ----------
+
+clear
+
+load 5m
+ range_left{label="a"} 1 2 3 4 5
+ range_left{label="b"} 10 20 30 40 50
+ range_right{label="a"} 100 200 300 400 500
+ range_right{label="c"} 1000 2000 3000 4000 5000
+
+eval range from 0 to 20m step 5m range_left + fill(0) range_right
+ {label="a"} 101 202 303 404 505
+ {label="b"} 10 20 30 40 50
+ {label="c"} 1000 2000 3000 4000 5000
+
+eval range from 0 to 20m step 5m range_left + fill_right(0) range_right
+ {label="a"} 101 202 303 404 505
+ {label="b"} 10 20 30 40 50
+
+eval range from 0 to 20m step 5m range_left + fill_left(0) range_right
+ {label="a"} 101 202 303 404 505
+ {label="c"} 1000 2000 3000 4000 5000
+
+# Range queries with intermittently present series.
+clear
+
+load 5m
+ intermittent_left{label="a"} 1 _ 3 _ 5
+ intermittent_left{label="b"} _ 20 _ 40 _
+ intermittent_right{label="a"} _ 200 _ 400 _
+ intermittent_right{label="b"} 100 _ 300 _ 500
+ intermittent_right{label="c"} 1000 _ _ 4000 5000
+
+# When both sides have the same label but are present at different times,
+# fill creates results at all timestamps where at least one side is present.
+eval range from 0 to 20m step 5m intermittent_left + fill(0) intermittent_right
+ {label="a"} 1 200 3 400 5
+ {label="b"} 100 20 300 40 500
+ {label="c"} 1000 _ _ 4000 5000
+
+# fill_right only fills the right side when it's missing.
+# Output only exists when left side is present (right side filled with 0 if missing).
+eval range from 0 to 20m step 5m intermittent_left + fill_right(0) intermittent_right
+ {label="a"} 1 _ 3 _ 5
+ {label="b"} _ 20 _ 40 _
+
+# fill_left only fills the left side when it's missing.
+# Output only exists when right side is present (left side filled with 0 if missing).
+eval range from 0 to 20m step 5m intermittent_left + fill_left(0) intermittent_right
+ {label="a"} _ 200 _ 400 _
+ {label="b"} 100 _ 300 _ 500
+ {label="c"} 1000 _ _ 4000 5000
+
+# ---------- fill with vectors where one side is empty ----------
+
+clear
+
+load 5m
+ non_empty{label="a"} 10
+ non_empty{label="b"} 20
+
+# Empty right side - fill_right has no effect (nothing to add).
+eval instant at 0m non_empty + fill_right(0) nonexistent
+ {label="a"} 10
+ {label="b"} 20
+
+# Empty right side - fill_left creates nothing (no right side labels to use).
+eval instant at 0m non_empty + fill_left(0) nonexistent
+
+# Empty left side - fill_left has no effect.
+eval instant at 0m nonexistent + fill_left(0) non_empty
+ {label="a"} 10
+ {label="b"} 20
+
+# Empty left side - fill_right creates nothing.
+eval instant at 0m nonexistent + fill_right(0) non_empty
+
+# fill both sides with one side empty.
+eval instant at 0m non_empty + fill(0) nonexistent
+ {label="a"} 10
+ {label="b"} 20
+
+eval instant at 0m nonexistent + fill(0) non_empty
+ {label="a"} 10
+ {label="b"} 20
+
+# ---------- Metric names that match fill modifier keywords ----------
+
+clear
+
+load 5m
+ fill{label="a"} 1
+ fill{label="b"} 2
+ fill_left{label="a"} 10
+ fill_left{label="c"} 30
+ fill_right{label="b"} 200
+ fill_right{label="d"} 400
+ other{label="a"} 1000
+ other{label="e"} 5000
+
+# Metric named "fill" on the left side.
+eval instant at 0m fill + fill(0) other
+ {label="a"} 1001
+ {label="b"} 2
+ {label="e"} 5000
+
+# Metric named "fill" on the right side without modifier.
+eval instant at 0m other + fill
+ {label="a"} 1001
+
+# Metric named "fill" on the right side with fill() modifier.
+eval instant at 0m other + fill(0) fill
+ {label="a"} 1001
+ {label="b"} 2
+ {label="e"} 5000
+
+# Metric named "fill_left" on the right side with fill_left() modifier.
+eval instant at 0m other + fill_left(0) fill_left
+ {label="a"} 1010
+ {label="c"} 30
+
+# Metric named "fill_right" on the right side with fill_right() modifier.
+eval instant at 0m other + fill_right(0) fill_right
+ {label="a"} 1000
+ {label="e"} 5000
diff --git a/promql/promqltest/testdata/info.test b/promql/promqltest/testdata/info.test
index e15a429675..a3988abc64 100644
--- a/promql/promqltest/testdata/info.test
+++ b/promql/promqltest/testdata/info.test
@@ -70,9 +70,29 @@ eval range from 0m to 10m step 5m info(metric, {__name__=~".+_info"})
metric{instance="a", job="1", label="value", build_data="build", data="info", another_data="another info"} 0 1 2
# Info metrics themselves are ignored when it comes to enriching with info metric data labels.
-eval range from 0m to 10m step 5m info(build_info, {__name__=~".+_info", build_data=~".+"})
+eval range from 0m to 10m step 5m info(build_info, {__name__=~".+_info", another_data=~".+"})
build_info{instance="a", job="1", build_data="build"} 1 1 1
+# Info metrics themselves are ignored when it comes to enriching with info metric data labels.
+eval range from 0m to 10m step 5m info(build_info, {__name__=~".+_info"})
+ build_info{instance="a", job="1", build_data="build"} 1 1 1
+
+clear
+
+load 5m
+ metric{instance="a", job="1", label="value"} 0 1 2
+ target_info{instance="a", job="1", data="info", another_data="another info"} 1 1 1
+ build_info{instance="a", job="1", build_data="build"} 1 1 1
+ target_build{instance="a", job="1", build_data="build"} 1 1 1
+
+# Multiple positive __name__ matchers.
+eval range from 0m to 10m step 5m info(metric, {__name__=~"target_.+", __name__=~".+_info"})
+ metric{instance="a", job="1", label="value", data="info", another_data="another info"} 0 1 2
+
+# A positive and a negative __name__ matcher.
+eval range from 0m to 10m step 5m info(metric, {__name__=~".+_info", __name__!~".*build.*"})
+ metric{instance="a", job="1", label="value", data="info", another_data="another info"} 0 1 2
+
clear
# Overlapping target_info series.
@@ -166,3 +186,35 @@ eval range from 0 to 2m step 1m info({job="work"}, {__name__="info_metric"})
data_metric{instance="a", job="work", state="running", label="new"} _ _ 30
info_metric{instance="b", job="work", state="stopped"} 1 1 1
info_metric{instance="a", job="work", state="running"} 1 1 1
+
+clear
+
+load 1m
+ data_metric{} 1 2 3
+
+eval range from 0 to 2m step 1m info(data_metric, {__name__="info_metric"})
+ data_metric{} 1 2 3
+
+clear
+
+load 1m
+ data_metric{} 1 2 3
+ data_metric{instance="a"} 4 5 6
+
+eval range from 0 to 2m step 1m info(data_metric, {__name__="info_metric"})
+ data_metric{} 1 2 3
+ data_metric{instance="a"} 4 5 6
+
+clear
+
+load 1m
+ data_metric{} 1 2 3
+ data_metric{instance="a"} 4 5 6
+ data_metric{job="1"} 7 8 9
+ data_metric{instance="a", job="1"} 10 20 30
+
+eval range from 0 to 2m step 1m info(data_metric, {__name__="info_metric"})
+ data_metric{} 1 2 3
+ data_metric{instance="a"} 4 5 6
+ data_metric{job="1"} 7 8 9
+ data_metric{instance="a", job="1"} 10 20 30
diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test
index fd4b1f4178..3b497e5ff4 100644
--- a/promql/promqltest/testdata/native_histograms.test
+++ b/promql/promqltest/testdata/native_histograms.test
@@ -1283,7 +1283,7 @@ eval instant at 12m sum_over_time(nhcb_metric[13m])
eval instant at 12m avg_over_time(nhcb_metric[13m])
expect no_warn
expect info msg: PromQL info: mismatched custom buckets were reconciled during aggregation
- {} {{schema:-53 count:1 sum:1 custom_values:[5] counter_reset_hint:gauge buckets:[1]}}
+ {} {{schema:-53 count:1 sum:1 custom_values:[5] buckets:[1]}}
eval instant at 12m last_over_time(nhcb_metric[13m])
expect no_warn
@@ -1388,22 +1388,28 @@ clear
# Test native histograms with sum, count, avg.
load 10m
- histogram_sum{idx="0"} {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}}x1
- histogram_sum{idx="1"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1
- histogram_sum{idx="2"} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1
- histogram_sum{idx="3"} {{schema:1 count:0}}x1
+ histogram_sum{idx="0"} {{schema:0 count:25 sum:3.1 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}}x1
+ histogram_sum{idx="1"} {{schema:0 count:41 sum:1e100 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1
+ histogram_sum{idx="2"} {{schema:0 count:41 sum:-1e100 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1
+ histogram_sum{idx="3"} {{schema:1 count:0 sum:1.3 z_bucket:3 z_bucket_w:0.001 buckets:[2 4 2 3 2 2] n_buckets:[1 2 5 3 8 1 1 1 1 6 3]}}x1
histogram_sum_float{idx="0"} 42.0x1
eval instant at 10m sum(histogram_sum)
expect no_warn
- {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
+ {} {{schema:0 count:107 sum:4.4 z_bucket:17 z_bucket_w:0.001 buckets:[5 14 7 7 3 2 2] n_buckets:[3 13 19 6 17 18 0 0 0 10 10 4]}}
eval instant at 10m sum({idx="0"})
expect warn
-eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="1"} + ignoring(idx) histogram_sum{idx="2"} + ignoring(idx) histogram_sum{idx="3"})
+eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="3"})
expect no_warn
- {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
+ {} {{schema:0 count:25 sum:4.4 z_bucket:7 z_bucket_w:0.001 buckets:[3 8 5 3 1] n_buckets:[3 11 11 2 3 18]}}
+
+# Plain addition doesn't use Kahan summation, so operations involving very large magnitudes
+# (±1e+100) lose precision. The smaller values are absorbed, leading to an incorrect result.
+# eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="1"} + ignoring(idx) histogram_sum{idx="2"} + ignoring(idx) histogram_sum{idx="3"})
+# expect no_warn
+# {} {{schema:0 count:107 sum:4.4 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
eval instant at 10m count(histogram_sum)
expect no_warn
@@ -1411,13 +1417,63 @@ eval instant at 10m count(histogram_sum)
eval instant at 10m avg(histogram_sum)
expect no_warn
- {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}}
+ {} {{schema:0 count:26.75 sum:1.1 z_bucket:4.25 z_bucket_w:0.001 buckets:[1.25 3.5 1.75 1.75 0.75 0.5 0.5] n_buckets:[0.75 3.25 4.75 1.5 4.25 4.5 0 0 0 2.5 2.5 1]}}
+
+clear
+
+# Test native histograms with incremental avg calulation.
+# Very large floats involved trigger incremental avg calculation, as direct avg calculation would overflow float64.
+load 10m
+ histogram_avg_incremental{idx="0"} {{schema:0 count:1.7976931348623157e+308 sum:5.30921651659898 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[1.78264E+50 1.78264E+215 1.78264E+219 3363.5121756487] n_buckets:[1178.20696291113 731.697776280323 715.201503759399 1386.11378876781 855.572553278132]}}x1
+ histogram_avg_incremental{idx="1"} {{schema:0 count:1e308 sum:0.961118537914768 z_bucket:0.76342771 z_bucket_w:0.001 buckets:[0.76342771 0.76342771 0.76342771 195.70084087969] n_buckets:[421.30382970055 0 450441.779]}}x1
+ histogram_avg_incremental{idx="2"} {{schema:0 count:1e-6 sum:1.62091361305318 z_bucket:1.9592258 z_bucket_w:0.001 buckets:[1.9592258 1.9592258 1.9592258 1135.74692279] n_buckets:[0 4504.41779 588.599358265103 40.3760942760943]}}x1
+ histogram_avg_incremental{idx="3"} {{schema:0 count:1e-6 sum:0.865089463758091 z_bucket:7.69805412 z_bucket_w:0.001 buckets:[2.258E+220 2.258E+220 2.3757689E+217 1078.68071312804] n_buckets:[349.905284031261 0 0 0.161173466838949 588.599358]}}x1
+ histogram_avg_incremental{idx="4"} {{schema:0 count:1e-6 sum:0.323055185914577 z_bucket:458.90154 z_bucket_w:0.001 buckets:[7.69805412 7.69805412 2.258E+220 3173.28218135701]}}x1
+ histogram_avg_incremental{idx="5"} {{schema:0 count:1e-6 sum:0.951811357687154 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[458.90154 458.90154 7.69805412 2178.35] n_buckets:[2054.92644438789 844.560108898123]}}x1
+ histogram_avg_incremental{idx="6"} {{schema:0 count:1e-6 sum:0 z_bucket:5 z_bucket_w:0.001 buckets:[0 0 1.78264E+219 376.770478890989]}}x1
+ histogram_avg_incremental{idx="7"} {{schema:0 count:1e-6 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 458.90154 250325.5] n_buckets:[0 0.0000000011353 0 608.697257]}}x1
+# This test fails due to float64 rounding in the incremental average calculation.
+# For large intermediate means (e.g. ~1e99), multiplying by a fractional weight like (n-1)/n
+# produces values such as 2.0000000000000002e99 instead of the mathematically exact 2e99.
+# While the relative error is tiny, subtracting nearly equal high-magnitude values later
+# result in a large absolute error. The outcome also depends on the (effectively random) order
+# in which input series are processed which makes the test flaky.
+# histogram_avg_incremental_2{idx="0"} {{schema:0 count:1.7976931348623157e+308 sum:5.3 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[1.78264E+50 1.78264E+215 1.78264E+219 3363.5121756487] n_buckets:[1178.20696291113 731.697776280323 715.201503759399 1386.11378876781 855.572553278132]}}x1
+# histogram_avg_incremental_2{idx="1"} {{schema:0 count:1e308 sum:1e100 z_bucket:0.76342771 z_bucket_w:0.001 buckets:[0.76342771 0.76342771 0.76342771 195.70084087969] n_buckets:[421.30382970055 0 450441.779]}}x1
+# histogram_avg_incremental_2{idx="2"} {{schema:0 count:1e-6 sum:1 z_bucket:1.9592258 z_bucket_w:0.001 buckets:[1.9592258 1.9592258 1.9592258 1135.74692279] n_buckets:[0 4504.41779 588.599358265103 40.3760942760943]}}x1
+# histogram_avg_incremental_2{idx="3"} {{schema:0 count:1e-6 sum:-1e100 z_bucket:7.69805412 z_bucket_w:0.001 buckets:[2.258E+220 2.258E+220 2.3757689E+217 1078.68071312804] n_buckets:[349.905284031261 0 0 0.161173466838949 588.599358]}}x1
+# histogram_avg_incremental_2{idx="4"} {{schema:0 count:1e-6 sum:1 z_bucket:458.90154 z_bucket_w:0.001 buckets:[7.69805412 7.69805412 2.258E+220 3173.28218135701]}}x1
+# histogram_avg_incremental_2{idx="5"} {{schema:0 count:1e-6 sum:1 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[458.90154 458.90154 7.69805412 2178.35] n_buckets:[2054.92644438789 844.560108898123]}}x1
+# histogram_avg_incremental_2{idx="6"} {{schema:0 count:1e-6 sum:0 z_bucket:5 z_bucket_w:0.001 buckets:[0 0 1.78264E+219 376.770478890989]}}x1
+# histogram_avg_incremental_2{idx="7"} {{schema:0 count:1e-6 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 458.90154 250325.5] n_buckets:[0 0.0000000011353 0 608.697257]}}x1
+
+eval instant at 10m avg(histogram_avg_incremental)
+ {} {{schema:0 count:3.497116418577895e+307 sum:1.2539005843658437 z_bucket:4.4566e49 z_bucket_w:0.001 buckets:[2.8225e+219 2.822522283e+219 3.271129711125e+219 32728.442914086805] n_buckets:[500.5428151288539 760.0844593974477 56468.19748275306 254.4185391888429 180.5214889097665]}}
+
+# This test doesn't work, see the load section above for reasoning.
+# eval instant at 10m avg(histogram_avg_incremental_2)
+# {} {{schema:0 count:3.497116418577895e+307 sum:1.0375 z_bucket:4.4566e49 z_bucket_w:0.001 buckets:[2.8225e+219 2.822522283e+219 3.271129711125e+219 32728.442914086805] n_buckets:[500.5428151288539 760.0844593974477 56468.19748275306 254.4185391888429 180.5214889097665]}}
clear
# Test native histograms with sum_over_time, avg_over_time.
load 1m
histogram_sum_over_time {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:1 count:0}}
+ histogram_sum_over_time_2 {{schema:0 count:1e10 sum:5.30921651659898 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[1.78264E+50 1.78264E+215 1.78264E+219 3363.5121756487] n_buckets:[1178.20696291113 731.697776280323 715.201503759399 1386.11378876781 855.572553278132]}} {{schema:0 count:1e-6 sum:0.961118537914768 z_bucket:0.76342771 z_bucket_w:0.001 buckets:[0.76342771 0.76342771 0.76342771 195.70084087969] n_buckets:[421.30382970055 0 450441.779]}} {{schema:0 count:1e-6 sum:1.62091361305318 z_bucket:1.9592258 z_bucket_w:0.001 buckets:[1.9592258 1.9592258 1.9592258 1135.74692279] n_buckets:[0 4504.41779 588.599358265103 40.3760942760943]}} {{schema:0 count:1e-6 sum:0.865089463758091 z_bucket:7.69805412 z_bucket_w:0.001 buckets:[2.258E+220 2.258E+220 2.3757689E+217 1078.68071312804] n_buckets:[349.905284031261 0 0 0.161173466838949 588.599358]}} {{schema:0 count:1e-6 sum:0.323055185914577 z_bucket:458.90154 z_bucket_w:0.001 buckets:[7.69805412 7.69805412 2.258E+220 3173.28218135701]}} {{schema:0 count:1e-6 sum:0.951811357687154 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[458.90154 458.90154 7.69805412 2178.35] n_buckets:[2054.92644438789 844.560108898123]}} {{schema:0 count:1e-6 sum:0 z_bucket:5 z_bucket_w:0.001 buckets:[0 0 1.78264E+219 376.770478890989]}} {{schema:0 count:1e-6 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 458.90154 250325.5] n_buckets:[0 0.0000000011353 0 608.697257]}}
+ histogram_sum_over_time_3 {{schema:0 count:1 sum:1}} {{schema:0 count:2 sum:1e100}} {{schema:0 count:3 sum:1}} {{schema:0 count:4 sum:-1e100}}
+ histogram_sum_over_time_4 {{schema:0 count:1 sum:5.3}} {{schema:0 count:2 sum:1e100}} {{schema:0 count:3 sum:1}} {{schema:0 count:4 sum:-1e100}} {{schema:0 count:5 sum:2}} {{schema:0 count:6 sum:1e50}} {{schema:0 count:7 sum:-1e50}}
+ histogram_sum_over_time_incremental {{schema:0 count:1.7976931348623157e+308 sum:5.30921651659898 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[1.78264E+50 1.78264E+215 1.78264E+219 3363.5121756487] n_buckets:[1178.20696291113 731.697776280323 715.201503759399 1386.11378876781 855.572553278132]}} {{schema:0 count:1e308 sum:0.961118537914768 z_bucket:0.76342771 z_bucket_w:0.001 buckets:[0.76342771 0.76342771 0.76342771 195.70084087969] n_buckets:[421.30382970055 0 450441.779]}} {{schema:0 count:1e-6 sum:1.62091361305318 z_bucket:1.9592258 z_bucket_w:0.001 buckets:[1.9592258 1.9592258 1.9592258 1135.74692279] n_buckets:[0 4504.41779 588.599358265103 40.3760942760943]}} {{schema:0 count:1e-6 sum:0.865089463758091 z_bucket:7.69805412 z_bucket_w:0.001 buckets:[2.258E+220 2.258E+220 2.3757689E+217 1078.68071312804] n_buckets:[349.905284031261 0 0 0.161173466838949 588.599358]}} {{schema:0 count:1e-6 sum:0.323055185914577 z_bucket:458.90154 z_bucket_w:0.001 buckets:[7.69805412 7.69805412 2.258E+220 3173.28218135701]}} {{schema:0 count:1e-6 sum:0.951811357687154 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[458.90154 458.90154 7.69805412 2178.35] n_buckets:[2054.92644438789 844.560108898123]}} {{schema:0 count:1e-6 sum:0 z_bucket:5 z_bucket_w:0.001 buckets:[0 0 1.78264E+219 376.770478890989]}} {{schema:0 count:1e-6 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 458.90154 250325.5] n_buckets:[0 0.0000000011353 0 608.697257]}}
+ histogram_sum_over_time_incremental_2 {{schema:0 count:1.7976931348623157e+308 sum:5.3}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:2}} {{schema:0 count:1e-6 sum:0}} {{schema:0 count:1e-6 sum:0}}
+ histogram_sum_over_time_incremental_3 {{schema:0 count:1.7976931348623157e+308 sum:5.3}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:1e100}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:0}}
+ histogram_sum_over_time_incremental_4 {{schema:0 count:1.7976931348623157e+308 sum:5.3}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:1e50}} {{schema:0 count:1e-6 sum:-1e50}} {{schema:0 count:1e-6 sum:0}}
+ histogram_sum_over_time_incremental_6 {{schema:0 count:1.7976931348623157e+308 sum:1}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:-1e100}}
+# Kahan summation only compensates reliably across two magnitude scales. In following inputs, the
+# series contains three distinct magnitude groups (≈1, ≈1e50, and ≈1e100). When these magnitudes
+# are interleaved, rounding error can't be fully compensated, causing smaller values to be lost.
+# However, when values are ordered so that cancellation within one magnitude group
+# occurs first, followed by cancellation of the next group, the outcome remains accurate.
+# histogram_sum_over_time_5 {{schema:0 count:1 sum:5.3}} {{schema:0 count:2 sum:1e100}} {{schema:0 count:3 sum:1}} {{schema:0 count:4 sum:1e50}} {{schema:0 count:5 sum:2}} {{schema:0 count:6 sum:-1e100}} {{schema:0 count:7 sum:-1e50}}
+# histogram_sum_over_time_incremental_5 {{schema:0 count:1.7976931348623157e+308 sum:5.3}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:1e50}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:-1e50}} {{schema:0 count:1e-6 sum:0}}
eval instant at 3m sum_over_time(histogram_sum_over_time[4m:1m])
{} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
@@ -1425,6 +1481,83 @@ eval instant at 3m sum_over_time(histogram_sum_over_time[4m:1m])
eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m])
{} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}}
+eval instant at 7m sum_over_time(histogram_sum_over_time_2[8m:1m])
+ {} {{schema:0 count:10000000000.000008 sum:10.03120467492675 z_bucket:3.56528e+50 z_bucket_w:0.001 buckets:[2.258e+220 2.2580178264e+220 2.6169037689e+220 261827.54331269444] n_buckets:[4004.342521030831 6080.675675179582 451745.57986202446 2035.3483135107433 1444.171911278132]}}
+
+eval instant at 7m avg_over_time(histogram_sum_over_time_2[8m:1m])
+ {} {{schema:0 count:1250000000.000001 sum:1.2539005843658437 z_bucket:4.4566e49 z_bucket_w:0.001 buckets:[2.8225e+219 2.822522283e+219 3.271129711125e+219 32728.442914086805] n_buckets:[500.5428151288539 760.0844593974477 56468.19748275306 254.4185391888429 180.5214889097665]}}
+
+eval instant at 3m sum_over_time(histogram_sum_over_time_3[4m:1m])
+ {} {{schema:0 count:10 sum:2}}
+
+eval instant at 3m avg_over_time(histogram_sum_over_time_3[4m:1m])
+ {} {{schema:0 count:2.5 sum:0.5}}
+
+eval instant at 6m sum_over_time(histogram_sum_over_time_4[7m:1m])
+ {} {{schema:0 count:28 sum:8.3}}
+
+eval instant at 6m avg_over_time(histogram_sum_over_time_4[7m:1m])
+ {} {{schema:0 count:4 sum:1.1857142857142857}}
+
+# These tests don't work, see the load section above for reasoning.
+# eval instant at 6m sum_over_time(histogram_sum_over_time_5[7m:1m])
+# {} {{schema:0 count:28 sum:8.3}}
+#
+# eval instant at 6m avg_over_time(histogram_sum_over_time_5[7m:1m])
+# {} {{schema:0 count:4 sum:1.1857142857142857}}
+
+eval instant at 7m sum_over_time(histogram_sum_over_time_incremental[8m:1m])
+ {} {{schema:0 count:Inf sum:10.03120467492675 z_bucket:3.56528e+50 z_bucket_w:0.001 buckets:[2.258e+220 2.2580178264e+220 2.6169037689e+220 261827.54331269444] n_buckets:[4004.342521030831 6080.675675179582 451745.57986202446 2035.3483135107433 1444.171911278132]}}
+
+eval instant at 7m avg_over_time(histogram_sum_over_time_incremental[8m:1m])
+ {} {{schema:0 count:3.497116418577895e+307 sum:1.2539005843658437 z_bucket:4.4566e49 z_bucket_w:0.001 buckets:[2.8225e+219 2.822522283e+219 3.271129711125e+219 32728.442914086805] n_buckets:[500.5428151288539 760.0844593974477 56468.19748275306 254.4185391888429 180.5214889097665]}}
+
+eval instant at 6m sum_over_time(histogram_sum_over_time_incremental_2[7m:1m])
+ {} {{schema:0 count:Inf sum:8.3}}
+
+eval instant at 6m avg_over_time(histogram_sum_over_time_incremental_2[7m:1m])
+ {} {{schema:0 count:3.9967044783747367e+307 sum:1.1857142857142857}}
+
+eval instant at 6m sum_over_time(histogram_sum_over_time_incremental_3[7m:1m])
+ {} {{schema:0 count:Inf sum:6.3}}
+
+eval instant at 6m avg_over_time(histogram_sum_over_time_incremental_3[7m:1m])
+ {} {{schema:0 count:3.9967044783747367e+307 sum:0.9}}
+
+eval instant at 6m sum_over_time(histogram_sum_over_time_incremental_4[7m:1m])
+ {} {{schema:0 count:Inf sum:6.3}}
+
+eval instant at 6m avg_over_time(histogram_sum_over_time_incremental_4[7m:1m])
+ {} {{schema:0 count:3.9967044783747367e+307 sum:0.9}}
+
+# These tests don't work, see the load section above for reasoning.
+# eval instant at 6m sum_over_time(histogram_sum_over_time_incremental_5[7m:1m])
+# {} {{schema:0 count:Inf sum:6.3}}
+#
+# eval instant at 6m avg_over_time(histogram_sum_over_time_incremental_5[7m:1m])
+# {} {{schema:0 count:3.9967044783747367e+307 sum:0.9}}
+
+eval instant at 3m sum_over_time(histogram_sum_over_time_incremental_6[4m:1m])
+ {} {{schema:0 count:Inf sum:2}}
+
+eval instant at 3m avg_over_time(histogram_sum_over_time_incremental_6[4m:1m])
+ {} {{schema:0 count:6.99423283715579e+307 sum:0.5}}
+
+clear
+
+# Test avg_over_time with a single histogram sample (regression test for division by zero bug).
+load 1m
+ single_histogram_sample {{schema:3 sum:5 count:4 buckets:[1 2 1]}}
+ single_nhcb_sample {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}}
+
+# avg_over_time should return the histogram unchanged when there's only one sample, not Inf/NaN.
+eval instant at 0m avg_over_time(single_histogram_sample[1m])
+ {} {{schema:3 sum:5 count:4 buckets:[1 2 1]}}
+
+# Test with native histogram with custom buckets (NHCB).
+eval instant at 0m avg_over_time(single_nhcb_sample[1m])
+ {} {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}}
+
clear
# Test native histograms with sub operator.
diff --git a/promql/quantile.go b/promql/quantile.go
index c44eb89e68..f3657e1621 100644
--- a/promql/quantile.go
+++ b/promql/quantile.go
@@ -94,10 +94,7 @@ type metricWithBuckets struct {
//
// If q>1, +Inf is returned.
//
-// We also return a bool to indicate if monotonicity needed to be forced,
-// and another bool to indicate if small differences between buckets (that
-// are likely artifacts of floating point precision issues) have been
-// ignored.
+// We also return extra info, see doc for ensureMonotonicAndIgnoreSmallDeltas.
//
// Generically speaking, BucketQuantile is for calculating the
// histogram_quantile() of classic histograms. See also: HistogramQuantile
@@ -105,15 +102,21 @@ type metricWithBuckets struct {
//
// BucketQuantile is exported as a useful quantile function over a set of
// given buckets. It may be used by other PromQL engine implementations.
-func BucketQuantile(q float64, buckets Buckets) (float64, bool, bool) {
- if math.IsNaN(q) {
- return math.NaN(), false, false
- }
- if q < 0 {
- return math.Inf(-1), false, false
- }
- if q > 1 {
- return math.Inf(+1), false, false
+func BucketQuantile(q float64, buckets Buckets) (
+ quantile float64,
+ forcedMonotonic, fixedPrecision bool,
+ minBucket, maxBucket, maxDiff float64,
+) {
+ switch {
+ case math.IsNaN(q):
+ quantile = math.NaN()
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
+ case q < 0:
+ quantile = math.Inf(-1)
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
+ case q > 1:
+ quantile = math.Inf(+1)
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
}
slices.SortFunc(buckets, func(a, b Bucket) int {
// We don't expect the bucket boundary to be a NaN.
@@ -126,39 +129,44 @@ func BucketQuantile(q float64, buckets Buckets) (float64, bool, bool) {
return 0
})
if !math.IsInf(buckets[len(buckets)-1].UpperBound, +1) {
- return math.NaN(), false, false
+ quantile = math.NaN()
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
}
buckets = coalesceBuckets(buckets)
- forcedMonotonic, fixedPrecision := ensureMonotonicAndIgnoreSmallDeltas(buckets, smallDeltaTolerance)
+ forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff = ensureMonotonicAndIgnoreSmallDeltas(buckets, smallDeltaTolerance)
if len(buckets) < 2 {
- return math.NaN(), false, false
+ quantile = math.NaN()
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
}
observations := buckets[len(buckets)-1].Count
if observations == 0 {
- return math.NaN(), false, false
+ quantile = math.NaN()
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
}
rank := q * observations
b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].Count >= rank })
- if b == len(buckets)-1 {
- return buckets[len(buckets)-2].UpperBound, forcedMonotonic, fixedPrecision
+ switch {
+ case b == len(buckets)-1:
+ quantile = buckets[len(buckets)-2].UpperBound
+ case b == 0 && buckets[0].UpperBound <= 0:
+ quantile = buckets[0].UpperBound
+ default:
+ var (
+ bucketStart float64
+ bucketEnd = buckets[b].UpperBound
+ count = buckets[b].Count
+ )
+ if b > 0 {
+ bucketStart = buckets[b-1].UpperBound
+ count -= buckets[b-1].Count
+ rank -= buckets[b-1].Count
+ }
+ quantile = bucketStart + (bucketEnd-bucketStart)*(rank/count)
}
- if b == 0 && buckets[0].UpperBound <= 0 {
- return buckets[0].UpperBound, forcedMonotonic, fixedPrecision
- }
- var (
- bucketStart float64
- bucketEnd = buckets[b].UpperBound
- count = buckets[b].Count
- )
- if b > 0 {
- bucketStart = buckets[b-1].UpperBound
- count -= buckets[b-1].Count
- rank -= buckets[b-1].Count
- }
- return bucketStart + (bucketEnd-bucketStart)*(rank/count), forcedMonotonic, fixedPrecision
+ return quantile, forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
}
// HistogramQuantile calculates the quantile 'q' based on the given histogram.
@@ -655,10 +663,20 @@ func coalesceBuckets(buckets Buckets) Buckets {
// the histogram buckets, essentially removing any decreases in the count
// between successive buckets.
//
-// We return a bool to indicate if this monotonicity was forced or not, and
-// another bool to indicate if small deltas were ignored or not.
-func ensureMonotonicAndIgnoreSmallDeltas(buckets Buckets, tolerance float64) (bool, bool) {
- var forcedMonotonic, fixedPrecision bool
+// We return:
+// - a bool to indicate if monotonicity needed to be forced
+// - a bool to indicate if small differences between buckets (that are likely
+// artifacts of floating point precision issues) have been ignored.
+// - a float to indicate the minimum bucket upper bound where monotonicity was forced, if applicable
+// - a float to indicate the maximum bucket upper bound where monotonicity was forced, if applicable
+// - a float to indicate the maximum difference between the count of two consecutive buckets
+// where monotonicity was forced, if applicable
+func ensureMonotonicAndIgnoreSmallDeltas(buckets Buckets, tolerance float64) (
+ forcedMonotonic, fixedPrecision bool,
+ minBucket, maxBucket, maxDiff float64,
+) {
+ minBucket = math.Inf(+1)
+ maxBucket = math.Inf(-1)
prev := buckets[0].Count
for i := 1; i < len(buckets); i++ {
curr := buckets[i].Count // Assumed always positive.
@@ -679,11 +697,20 @@ func ensureMonotonicAndIgnoreSmallDeltas(buckets Buckets, tolerance float64) (bo
// Do not update the 'prev' value as we are ignoring the decrease.
buckets[i].Count = prev
forcedMonotonic = true
+ if buckets[i].UpperBound < minBucket {
+ minBucket = buckets[i].UpperBound
+ }
+ if buckets[i].UpperBound > maxBucket {
+ maxBucket = buckets[i].UpperBound
+ }
+ if diff := prev - curr; diff > maxDiff {
+ maxDiff = diff
+ }
continue
}
prev = curr
}
- return forcedMonotonic, fixedPrecision
+ return forcedMonotonic, fixedPrecision, minBucket, maxBucket, maxDiff
}
// quantile calculates the given quantile of a vector of samples.
diff --git a/promql/quantile_test.go b/promql/quantile_test.go
index c97ff7c3c4..e2042dc3c4 100644
--- a/promql/quantile_test.go
+++ b/promql/quantile_test.go
@@ -308,10 +308,10 @@ func TestBucketQuantile_ForcedMonotonicity(t *testing.T) {
} {
t.Run(name, func(t *testing.T) {
for q, v := range tc.expectedValues {
- res, forced, fixed := BucketQuantile(q, tc.getInput())
+ quantile, forced, fixed, _, _, _ := BucketQuantile(q, tc.getInput())
require.Equal(t, tc.expectedForced, forced)
require.Equal(t, tc.expectedFixed, fixed)
- require.InEpsilon(t, v, res, eps)
+ require.InEpsilon(t, v, quantile, eps)
}
})
}
diff --git a/promql/query_logger.go b/promql/query_logger.go
index 954f8b1a5b..0c4b218828 100644
--- a/promql/query_logger.go
+++ b/promql/query_logger.go
@@ -164,7 +164,7 @@ func trimStringByBytes(str string, size int) string {
trimIndex := len(bytesStr)
if size < len(bytesStr) {
- for !utf8.RuneStart(bytesStr[size]) {
+ for size > 0 && !utf8.RuneStart(bytesStr[size]) {
size--
}
trimIndex = size
diff --git a/promql/query_logger_test.go b/promql/query_logger_test.go
index 8c88757bd7..edd3baad12 100644
--- a/promql/query_logger_test.go
+++ b/promql/query_logger_test.go
@@ -127,6 +127,47 @@ func TestMMapFile(t *testing.T) {
require.Equal(t, []byte(data), bytes[:2], "Mmap failed")
}
+func TestTrimStringByBytes(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ input string
+ size int
+ expected string
+ }{
+ {
+ name: "normal ASCII string",
+ input: "hello",
+ size: 3,
+ expected: "hel",
+ },
+ {
+ name: "no trimming needed",
+ input: "hi",
+ size: 10,
+ expected: "hi",
+ },
+ {
+ name: "UTF-8 multibyte character boundary",
+ input: "日本", // 6 bytes (3 bytes per character)
+ size: 4,
+ expected: "日", // trims back to complete character boundary
+ },
+ {
+ name: "invalid UTF-8 continuation-only bytes",
+ input: string([]byte{0x80, 0x81, 0x82, 0x83, 0x84}), // only continuation bytes
+ size: 4,
+ expected: "",
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ require.NotPanics(t, func() {
+ result := trimStringByBytes(tc.input, tc.size)
+ require.Equal(t, tc.expected, result)
+ })
+ })
+ }
+}
+
func TestParseBrokenJSON(t *testing.T) {
for _, tc := range []struct {
b []byte
diff --git a/promql/value.go b/promql/value.go
index 02cb021024..17afdfc410 100644
--- a/promql/value.go
+++ b/promql/value.go
@@ -487,6 +487,11 @@ func (ssi *storageSeriesIterator) AtT() int64 {
return ssi.currT
}
+// TODO(krajorama): implement AtST.
+func (*storageSeriesIterator) AtST() int64 {
+ return 0
+}
+
func (ssi *storageSeriesIterator) Next() chunkenc.ValueType {
if ssi.currH != nil {
ssi.iHistograms++
diff --git a/renovate.json b/renovate.json
index 350cfe2a0d..814193329a 100644
--- a/renovate.json
+++ b/renovate.json
@@ -9,11 +9,13 @@
"gomodTidy",
"gomodUpdateImportPaths"
],
- "schedule": ["* 11 21 * *"],
+ "schedule": ["* * 21 * *"],
"timezone": "UTC",
"github-actions": {
"managerFilePatterns": ["scripts/**"]
},
+ "prConcurrentLimit": 20,
+ "prHourlyLimit": 5,
"packageRules": [
{
"description": "Don't update replace directives",
diff --git a/rules/alerting_test.go b/rules/alerting_test.go
index a2c7abcd56..91ea09e5fc 100644
--- a/rules/alerting_test.go
+++ b/rules/alerting_test.go
@@ -115,7 +115,7 @@ func TestAlertingRuleTemplateWithHistogram(t *testing.T) {
return []promql.Sample{{H: &h}}, nil
}
- expr, err := parser.ParseExpr("foo")
+ expr, err := testParser.ParseExpr("foo")
require.NoError(t, err)
rule := NewAlertingRule(
@@ -158,9 +158,8 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
load 1m
http_requests{job="app-server", instance="0"} 75 85 70 70 stale
`)
- t.Cleanup(func() { storage.Close() })
- expr, err := parser.ParseExpr(`http_requests < 100`)
+ expr, err := testParser.ParseExpr(`http_requests < 100`)
require.NoError(t, err)
rule := NewAlertingRule(
@@ -264,9 +263,8 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
load 1m
http_requests{job="app-server", instance="0"} 75 85 70 70
`)
- t.Cleanup(func() { storage.Close() })
- expr, err := parser.ParseExpr(`http_requests < 100`)
+ expr, err := testParser.ParseExpr(`http_requests < 100`)
require.NoError(t, err)
ruleWithoutExternalLabels := NewAlertingRule(
@@ -359,9 +357,8 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
load 1m
http_requests{job="app-server", instance="0"} 75 85 70 70
`)
- t.Cleanup(func() { storage.Close() })
- expr, err := parser.ParseExpr(`http_requests < 100`)
+ expr, err := testParser.ParseExpr(`http_requests < 100`)
require.NoError(t, err)
ruleWithoutExternalURL := NewAlertingRule(
@@ -454,9 +451,8 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
load 1m
http_requests{job="app-server", instance="0"} 75 85 70 70
`)
- t.Cleanup(func() { storage.Close() })
- expr, err := parser.ParseExpr(`http_requests < 100`)
+ expr, err := testParser.ParseExpr(`http_requests < 100`)
require.NoError(t, err)
rule := NewAlertingRule(
@@ -510,9 +506,8 @@ func TestAlertingRuleQueryInTemplate(t *testing.T) {
load 1m
http_requests{job="app-server", instance="0"} 70 85 70 70
`)
- t.Cleanup(func() { storage.Close() })
- expr, err := parser.ParseExpr(`sum(http_requests) < 100`)
+ expr, err := testParser.ParseExpr(`sum(http_requests) < 100`)
require.NoError(t, err)
ruleWithQueryInTemplate := NewAlertingRule(
@@ -584,7 +579,6 @@ func BenchmarkAlertingRuleAtomicField(b *testing.B) {
func TestAlertingRuleDuplicate(t *testing.T) {
storage := teststorage.New(t)
- defer storage.Close()
opts := promql.EngineOpts{
Logger: nil,
@@ -598,7 +592,7 @@ func TestAlertingRuleDuplicate(t *testing.T) {
now := time.Now()
- expr, _ := parser.ParseExpr(`vector(0) or label_replace(vector(0),"test","x","","")`)
+ expr, _ := testParser.ParseExpr(`vector(0) or label_replace(vector(0),"test","x","","")`)
rule := NewAlertingRule(
"foo",
expr,
@@ -621,7 +615,6 @@ func TestAlertingRuleLimit(t *testing.T) {
metric{label="1"} 1
metric{label="2"} 1
`)
- t.Cleanup(func() { storage.Close() })
tests := []struct {
limit int
@@ -642,7 +635,7 @@ func TestAlertingRuleLimit(t *testing.T) {
},
}
- expr, _ := parser.ParseExpr(`metric > 0`)
+ expr, _ := testParser.ParseExpr(`metric > 0`)
rule := NewAlertingRule(
"foo",
expr,
@@ -697,12 +690,14 @@ func TestQueryForStateSeries(t *testing.T) {
{
selectMockFunction: func(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet {
return storage.TestSeriesSet(storage.MockSeries(
+ nil,
[]int64{1, 2, 3},
[]float64{1, 2, 3},
[]string{"__name__", "ALERTS_FOR_STATE", "alertname", "TestRule", "severity", "critical"},
))
},
expectedSeries: storage.MockSeries(
+ nil,
[]int64{1, 2, 3},
[]float64{1, 2, 3},
[]string{"__name__", "ALERTS_FOR_STATE", "alertname", "TestRule", "severity", "critical"},
@@ -763,7 +758,7 @@ func TestSendAlertsDontAffectActiveAlerts(t *testing.T) {
al := &Alert{State: StateFiring, Labels: lbls, ActiveAt: time.Now()}
rule.active[h] = al
- expr, err := parser.ParseExpr("foo")
+ expr, err := testParser.ParseExpr("foo")
require.NoError(t, err)
rule.vector = expr
@@ -803,9 +798,8 @@ func TestKeepFiringFor(t *testing.T) {
load 1m
http_requests{job="app-server", instance="0"} 75 85 70 70 10x5
`)
- t.Cleanup(func() { storage.Close() })
- expr, err := parser.ParseExpr(`http_requests > 50`)
+ expr, err := testParser.ParseExpr(`http_requests > 50`)
require.NoError(t, err)
rule := NewAlertingRule(
@@ -914,9 +908,8 @@ func TestPendingAndKeepFiringFor(t *testing.T) {
load 1m
http_requests{job="app-server", instance="0"} 75 10x10
`)
- t.Cleanup(func() { storage.Close() })
- expr, err := parser.ParseExpr(`http_requests > 50`)
+ expr, err := testParser.ParseExpr(`http_requests > 50`)
require.NoError(t, err)
rule := NewAlertingRule(
@@ -976,7 +969,7 @@ func TestAlertingEvalWithOrigin(t *testing.T) {
lbs = labels.FromStrings("test", "test")
)
- expr, err := parser.ParseExpr(query)
+ expr, err := testParser.ParseExpr(query)
require.NoError(t, err)
rule := NewAlertingRule(
diff --git a/rules/manager.go b/rules/manager.go
index c835a7c6e8..5548359ce6 100644
--- a/rules/manager.go
+++ b/rules/manager.go
@@ -138,6 +138,9 @@ type ManagerOptions struct {
// FeatureRegistry is used to register rule manager features.
FeatureRegistry features.Collector
+
+ // Parser is the PromQL parser used for parsing rule expressions.
+ Parser parser.Parser
}
// NewManager returns an implementation of Manager, ready to be started
@@ -158,8 +161,12 @@ func NewManager(o *ManagerOptions) *Manager {
o.Metrics = NewGroupMetrics(o.Registerer)
}
+ if o.Parser == nil {
+ o.Parser = parser.NewParser(parser.Options{})
+ }
+
if o.GroupLoader == nil {
- o.GroupLoader = FileLoader{}
+ o.GroupLoader = FileLoader{parser: o.Parser}
}
if o.RuleConcurrencyController == nil {
@@ -320,14 +327,18 @@ type GroupLoader interface {
}
// FileLoader is the default GroupLoader implementation. It defers to rulefmt.ParseFile
-// and parser.ParseExpr.
-type FileLoader struct{}
-
-func (FileLoader) Load(identifier string, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*rulefmt.RuleGroups, []error) {
- return rulefmt.ParseFile(identifier, ignoreUnknownFields, nameValidationScheme)
+// for loading and uses the configured Parser for expression parsing.
+type FileLoader struct {
+ parser parser.Parser
}
-func (FileLoader) Parse(query string) (parser.Expr, error) { return parser.ParseExpr(query) }
+func (fl FileLoader) Load(identifier string, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*rulefmt.RuleGroups, []error) {
+ return rulefmt.ParseFile(identifier, ignoreUnknownFields, nameValidationScheme, fl.parser)
+}
+
+func (fl FileLoader) Parse(query string) (parser.Expr, error) {
+ return fl.parser.ParseExpr(query)
+}
// LoadGroups reads groups from a list of files.
func (m *Manager) LoadGroups(
@@ -606,7 +617,7 @@ func FromMaps(maps ...map[string]string) labels.Labels {
}
// ParseFiles parses the rule files corresponding to glob patterns.
-func ParseFiles(patterns []string, nameValidationScheme model.ValidationScheme) error {
+func ParseFiles(patterns []string, nameValidationScheme model.ValidationScheme, p parser.Parser) error {
files := map[string]string{}
for _, pat := range patterns {
fns, err := filepath.Glob(pat)
@@ -626,7 +637,7 @@ func ParseFiles(patterns []string, nameValidationScheme model.ValidationScheme)
}
}
for fn, pat := range files {
- _, errs := rulefmt.ParseFile(fn, false, nameValidationScheme)
+ _, errs := rulefmt.ParseFile(fn, false, nameValidationScheme, p)
if len(errs) > 0 {
return fmt.Errorf("parse rules from file %q (pattern: %q): %w", fn, pat, errors.Join(errs...))
}
diff --git a/rules/manager_test.go b/rules/manager_test.go
index 0991e8198a..27930fc4c7 100644
--- a/rules/manager_test.go
+++ b/rules/manager_test.go
@@ -42,13 +42,14 @@ import (
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/promql"
- "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/teststorage"
prom_testutil "github.com/prometheus/prometheus/util/testutil"
+ "github.com/prometheus/prometheus/util/testutil/synctest"
)
func TestMain(m *testing.M) {
@@ -61,9 +62,8 @@ func TestAlertingRule(t *testing.T) {
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140
`)
- t.Cleanup(func() { storage.Close() })
- expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
+ expr, err := testParser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
require.NoError(t, err)
rule := NewAlertingRule(
@@ -204,9 +204,8 @@ func TestForStateAddSamples(t *testing.T) {
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140
`)
- t.Cleanup(func() { storage.Close() })
- expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
+ expr, err := testParser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
require.NoError(t, err)
rule := NewAlertingRule(
@@ -366,9 +365,8 @@ func TestForStateRestore(t *testing.T) {
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 50 0 0 25 0 0 40 0 120
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 125 90 60 0 0 25 0 0 40 0 130
`)
- t.Cleanup(func() { storage.Close() })
- expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
+ expr, err := testParser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
require.NoError(t, err)
ng := testEngine(t)
@@ -537,7 +535,7 @@ func TestForStateRestore(t *testing.T) {
func TestStaleness(t *testing.T) {
for _, queryOffset := range []time.Duration{0, time.Minute} {
st := teststorage.New(t)
- defer st.Close()
+
engineOpts := promql.EngineOpts{
Logger: nil,
Reg: nil,
@@ -553,7 +551,7 @@ func TestStaleness(t *testing.T) {
Logger: promslog.NewNopLogger(),
}
- expr, err := parser.ParseExpr("a + 1")
+ expr, err := testParser.ParseExpr("a + 1")
require.NoError(t, err)
rule := NewRecordingRule("a_plus_one", expr, labels.Labels{})
group := NewGroup(GroupOptions{
@@ -725,7 +723,7 @@ func TestCopyState(t *testing.T) {
func TestDeletedRuleMarkedStale(t *testing.T) {
st := teststorage.New(t)
- defer st.Close()
+
oldGroup := &Group{
rules: []Rule{
NewRecordingRule("rule1", nil, labels.FromStrings("l1", "v1")),
@@ -771,7 +769,7 @@ func TestUpdate(t *testing.T) {
"test": labels.FromStrings("name", "value"),
}
st := teststorage.New(t)
- defer st.Close()
+
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
@@ -811,7 +809,7 @@ func TestUpdate(t *testing.T) {
}
// Groups will be recreated if updated.
- rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml", false, model.UTF8Validation)
+ rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml", false, model.UTF8Validation, testParser)
require.Empty(t, errs, "file parsing failures")
tmpFile, err := os.CreateTemp("", "rules.test.*.yaml")
@@ -909,7 +907,7 @@ func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File,
func TestNotify(t *testing.T) {
storage := teststorage.New(t)
- defer storage.Close()
+
engineOpts := promql.EngineOpts{
Logger: nil,
Reg: nil,
@@ -931,7 +929,7 @@ func TestNotify(t *testing.T) {
ResendDelay: 2 * time.Second,
}
- expr, err := parser.ParseExpr("a > 1")
+ expr, err := testParser.ParseExpr("a > 1")
require.NoError(t, err)
rule := NewAlertingRule("aTooHigh", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
group := NewGroup(GroupOptions{
@@ -983,7 +981,7 @@ func TestMetricsUpdate(t *testing.T) {
}
storage := teststorage.New(t)
- defer storage.Close()
+
registry := prometheus.NewRegistry()
opts := promql.EngineOpts{
Logger: nil,
@@ -1056,7 +1054,7 @@ func TestGroupStalenessOnRemoval(t *testing.T) {
sameFiles := []string{"fixtures/rules2_copy.yaml"}
storage := teststorage.New(t)
- defer storage.Close()
+
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
@@ -1134,7 +1132,7 @@ func TestMetricsStalenessOnManagerShutdown(t *testing.T) {
files := []string{"fixtures/rules2.yaml"}
storage := teststorage.New(t)
- defer storage.Close()
+
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
@@ -1201,8 +1199,10 @@ func TestRuleMovedBetweenGroups(t *testing.T) {
t.Skip("skipping test in short mode.")
}
- storage := teststorage.New(t, 600000)
- defer storage.Close()
+ storage := teststorage.New(t, func(opt *tsdb.Options) {
+ opt.OutOfOrderTimeWindow = 600000
+ })
+
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
@@ -1284,7 +1284,7 @@ func TestGroupHasAlertingRules(t *testing.T) {
func TestRuleHealthUpdates(t *testing.T) {
st := teststorage.New(t)
- defer st.Close()
+
engineOpts := promql.EngineOpts{
Logger: nil,
Reg: nil,
@@ -1300,7 +1300,7 @@ func TestRuleHealthUpdates(t *testing.T) {
Logger: promslog.NewNopLogger(),
}
- expr, err := parser.ParseExpr("a + 1")
+ expr, err := testParser.ParseExpr("a + 1")
require.NoError(t, err)
rule := NewRecordingRule("a_plus_one", expr, labels.Labels{})
group := NewGroup(GroupOptions{
@@ -1345,9 +1345,8 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) {
load 5m
http_requests{instance="0"} 75 85 50 0 0 25 0 0 40 0 120
`)
- t.Cleanup(func() { storage.Close() })
- expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
+ expr, err := testParser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
require.NoError(t, err)
testValue := 1
@@ -1460,7 +1459,6 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) {
func TestNativeHistogramsInRecordingRules(t *testing.T) {
storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
// Add some histograms.
db := storage.DB
@@ -1483,7 +1481,7 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
Logger: promslog.NewNopLogger(),
}
- expr, err := parser.ParseExpr("sum(histogram_metric)")
+ expr, err := testParser.ParseExpr("sum(histogram_metric)")
require.NoError(t, err)
rule := NewRecordingRule("sum:histogram_metric", expr, labels.Labels{})
@@ -1522,9 +1520,6 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
func TestManager_LoadGroups_ShouldCheckWhetherEachRuleHasDependentsAndDependencies(t *testing.T) {
storage := teststorage.New(t)
- t.Cleanup(func() {
- require.NoError(t, storage.Close())
- })
ruleManager := NewManager(&ManagerOptions{
Context: context.Background(),
@@ -1587,23 +1582,23 @@ func TestDependencyMap(t *testing.T) {
Logger: promslog.NewNopLogger(),
}
- expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))")
+ expr, err := testParser.ParseExpr("sum by (user) (rate(requests[1m]))")
require.NoError(t, err)
rule := NewRecordingRule("user:requests:rate1m", expr, labels.Labels{})
- expr, err = parser.ParseExpr("user:requests:rate1m <= 0")
+ expr, err = testParser.ParseExpr("user:requests:rate1m <= 0")
require.NoError(t, err)
rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
- expr, err = parser.ParseExpr("sum by (user) (rate(requests[5m]))")
+ expr, err = testParser.ParseExpr("sum by (user) (rate(requests[5m]))")
require.NoError(t, err)
rule3 := NewRecordingRule("user:requests:rate5m", expr, labels.Labels{})
- expr, err = parser.ParseExpr("increase(user:requests:rate1m[1h])")
+ expr, err = testParser.ParseExpr("increase(user:requests:rate1m[1h])")
require.NoError(t, err)
rule4 := NewRecordingRule("user:requests:increase1h", expr, labels.Labels{})
- expr, err = parser.ParseExpr(`sum by (user) ({__name__=~"user:requests.+5m"})`)
+ expr, err = testParser.ParseExpr(`sum by (user) ({__name__=~"user:requests.+5m"})`)
require.NoError(t, err)
rule5 := NewRecordingRule("user:requests:sum5m", expr, labels.Labels{})
@@ -1645,7 +1640,7 @@ func TestNoDependency(t *testing.T) {
Logger: promslog.NewNopLogger(),
}
- expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))")
+ expr, err := testParser.ParseExpr("sum by (user) (rate(requests[1m]))")
require.NoError(t, err)
rule := NewRecordingRule("user:requests:rate1m", expr, labels.Labels{})
@@ -1676,7 +1671,7 @@ func TestDependenciesEdgeCases(t *testing.T) {
Opts: opts,
})
- expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))")
+ expr, err := testParser.ParseExpr("sum by (user) (rate(requests[1m]))")
require.NoError(t, err)
rule := NewRecordingRule("user:requests:rate1m", expr, labels.Labels{})
@@ -1687,11 +1682,11 @@ func TestDependenciesEdgeCases(t *testing.T) {
})
t.Run("rules which reference no series", func(t *testing.T) {
- expr, err := parser.ParseExpr("one")
+ expr, err := testParser.ParseExpr("one")
require.NoError(t, err)
rule1 := NewRecordingRule("1", expr, labels.Labels{})
- expr, err = parser.ParseExpr("two")
+ expr, err = testParser.ParseExpr("two")
require.NoError(t, err)
rule2 := NewRecordingRule("2", expr, labels.Labels{})
@@ -1709,11 +1704,11 @@ func TestDependenciesEdgeCases(t *testing.T) {
})
t.Run("rule with regexp matcher on metric name", func(t *testing.T) {
- expr, err := parser.ParseExpr("sum(requests)")
+ expr, err := testParser.ParseExpr("sum(requests)")
require.NoError(t, err)
rule1 := NewRecordingRule("first", expr, labels.Labels{})
- expr, err = parser.ParseExpr(`sum({__name__=~".+"})`)
+ expr, err = testParser.ParseExpr(`sum({__name__=~".+"})`)
require.NoError(t, err)
rule2 := NewRecordingRule("second", expr, labels.Labels{})
@@ -1731,11 +1726,11 @@ func TestDependenciesEdgeCases(t *testing.T) {
})
t.Run("rule with not equal matcher on metric name", func(t *testing.T) {
- expr, err := parser.ParseExpr("sum(requests)")
+ expr, err := testParser.ParseExpr("sum(requests)")
require.NoError(t, err)
rule1 := NewRecordingRule("first", expr, labels.Labels{})
- expr, err = parser.ParseExpr(`sum({__name__!="requests", service="app"})`)
+ expr, err = testParser.ParseExpr(`sum({__name__!="requests", service="app"})`)
require.NoError(t, err)
rule2 := NewRecordingRule("second", expr, labels.Labels{})
@@ -1753,11 +1748,11 @@ func TestDependenciesEdgeCases(t *testing.T) {
})
t.Run("rule with not regexp matcher on metric name", func(t *testing.T) {
- expr, err := parser.ParseExpr("sum(requests)")
+ expr, err := testParser.ParseExpr("sum(requests)")
require.NoError(t, err)
rule1 := NewRecordingRule("first", expr, labels.Labels{})
- expr, err = parser.ParseExpr(`sum({__name__!~"requests.+", service="app"})`)
+ expr, err = testParser.ParseExpr(`sum({__name__!~"requests.+", service="app"})`)
require.NoError(t, err)
rule2 := NewRecordingRule("second", expr, labels.Labels{})
@@ -1777,27 +1772,27 @@ func TestDependenciesEdgeCases(t *testing.T) {
for _, metaMetric := range []string{alertMetricName, alertForStateMetricName} {
t.Run(metaMetric, func(t *testing.T) {
t.Run("rule querying alerts meta-metric with alertname", func(t *testing.T) {
- expr, err := parser.ParseExpr("sum(requests) > 0")
+ expr, err := testParser.ParseExpr("sum(requests) > 0")
require.NoError(t, err)
rule1 := NewAlertingRule("first", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
- expr, err = parser.ParseExpr(fmt.Sprintf(`sum(%s{alertname="test"}) > 0`, metaMetric))
+ expr, err = testParser.ParseExpr(fmt.Sprintf(`sum(%s{alertname="test"}) > 0`, metaMetric))
require.NoError(t, err)
rule2 := NewAlertingRule("second", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
- expr, err = parser.ParseExpr(fmt.Sprintf(`sum(%s{alertname=~"first.*"}) > 0`, metaMetric))
+ expr, err = testParser.ParseExpr(fmt.Sprintf(`sum(%s{alertname=~"first.*"}) > 0`, metaMetric))
require.NoError(t, err)
rule3 := NewAlertingRule("third", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
- expr, err = parser.ParseExpr(fmt.Sprintf(`sum(%s{alertname!="first"}) > 0`, metaMetric))
+ expr, err = testParser.ParseExpr(fmt.Sprintf(`sum(%s{alertname!="first"}) > 0`, metaMetric))
require.NoError(t, err)
rule4 := NewAlertingRule("fourth", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
- expr, err = parser.ParseExpr("sum(failures)")
+ expr, err = testParser.ParseExpr("sum(failures)")
require.NoError(t, err)
rule5 := NewRecordingRule("fifth", expr, labels.Labels{})
- expr, err = parser.ParseExpr(fmt.Sprintf(`fifth > 0 and sum(%s{alertname="fourth"}) > 0`, metaMetric))
+ expr, err = testParser.ParseExpr(fmt.Sprintf(`fifth > 0 and sum(%s{alertname="fourth"}) > 0`, metaMetric))
require.NoError(t, err)
rule6 := NewAlertingRule("sixth", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
@@ -1836,23 +1831,23 @@ func TestDependenciesEdgeCases(t *testing.T) {
})
t.Run("rule querying alerts meta-metric without alertname", func(t *testing.T) {
- expr, err := parser.ParseExpr("sum(requests)")
+ expr, err := testParser.ParseExpr("sum(requests)")
require.NoError(t, err)
rule1 := NewRecordingRule("first", expr, labels.Labels{})
- expr, err = parser.ParseExpr(`sum(requests) > 0`)
+ expr, err = testParser.ParseExpr(`sum(requests) > 0`)
require.NoError(t, err)
rule2 := NewAlertingRule("second", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
- expr, err = parser.ParseExpr(fmt.Sprintf(`sum(%s) > 0`, metaMetric))
+ expr, err = testParser.ParseExpr(fmt.Sprintf(`sum(%s) > 0`, metaMetric))
require.NoError(t, err)
rule3 := NewAlertingRule("third", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
- expr, err = parser.ParseExpr("sum(failures)")
+ expr, err = testParser.ParseExpr("sum(failures)")
require.NoError(t, err)
rule4 := NewRecordingRule("fourth", expr, labels.Labels{})
- expr, err = parser.ParseExpr(fmt.Sprintf(`fourth > 0 and sum(%s) > 0`, metaMetric))
+ expr, err = testParser.ParseExpr(fmt.Sprintf(`fourth > 0 and sum(%s) > 0`, metaMetric))
require.NoError(t, err)
rule5 := NewAlertingRule("fifth", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
@@ -1896,11 +1891,11 @@ func TestNoMetricSelector(t *testing.T) {
Logger: promslog.NewNopLogger(),
}
- expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))")
+ expr, err := testParser.ParseExpr("sum by (user) (rate(requests[1m]))")
require.NoError(t, err)
rule := NewRecordingRule("user:requests:rate1m", expr, labels.Labels{})
- expr, err = parser.ParseExpr(`count({user="bob"})`)
+ expr, err = testParser.ParseExpr(`count({user="bob"})`)
require.NoError(t, err)
rule2 := NewRecordingRule("user:requests:rate1m", expr, labels.Labels{})
@@ -1925,15 +1920,15 @@ func TestDependentRulesWithNonMetricExpression(t *testing.T) {
Logger: promslog.NewNopLogger(),
}
- expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))")
+ expr, err := testParser.ParseExpr("sum by (user) (rate(requests[1m]))")
require.NoError(t, err)
rule := NewRecordingRule("user:requests:rate1m", expr, labels.Labels{})
- expr, err = parser.ParseExpr("user:requests:rate1m <= 0")
+ expr, err = testParser.ParseExpr("user:requests:rate1m <= 0")
require.NoError(t, err)
rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
- expr, err = parser.ParseExpr("3")
+ expr, err = testParser.ParseExpr("3")
require.NoError(t, err)
rule3 := NewRecordingRule("three", expr, labels.Labels{})
@@ -2016,313 +2011,313 @@ func TestDependencyMapUpdatesOnGroupUpdate(t *testing.T) {
func TestAsyncRuleEvaluation(t *testing.T) {
t.Run("synchronous evaluation with independent rules", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
- ruleManager := NewManager(optsFactory(storage, &maxInflight, &inflightQueries, 0))
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
+ ctx := t.Context()
- expectedRuleCount := 6
- expectedSampleCount := 4
+ ruleManager := NewManager(optsFactory(storage, &maxInflight, &inflightQueries, 0))
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
- for _, group := range groups {
- require.Len(t, group.rules, expectedRuleCount)
+ expectedRuleCount := 6
+ expectedSampleCount := 4
- start := time.Now()
- DefaultEvalIterationFunc(ctx, group, start)
+ for _, group := range groups {
+ require.Len(t, group.rules, expectedRuleCount)
- // Expected evaluation order
- order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
- require.Nil(t, order)
+ start := time.Now()
+ DefaultEvalIterationFunc(ctx, group, start)
- // Never expect more than 1 inflight query at a time.
- require.EqualValues(t, 1, maxInflight.Load())
- // Each rule should take at least 1 second to execute sequentially.
- require.GreaterOrEqual(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
- // Each recording rule produces one vector.
- require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
- // Group duration is higher than the sum of rule durations (group overhead).
- require.GreaterOrEqual(t, group.GetEvaluationTime(), group.GetRuleEvaluationTimeSum())
- }
+ // Expected evaluation order
+ order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
+ require.Nil(t, order)
+
+ // Never expect more than 1 inflight query at a time.
+ require.EqualValues(t, 1, maxInflight.Load())
+ // Each rule should take at least 1 second to execute sequentially.
+ require.GreaterOrEqual(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
+ // Each recording rule produces one vector.
+ require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ // Group duration is higher than the sum of rule durations (group overhead).
+ require.GreaterOrEqual(t, group.GetEvaluationTime(), group.GetRuleEvaluationTimeSum())
+ }
+ })
})
t.Run("asynchronous evaluation with independent and dependent rules", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
- expectedRuleCount := 6
- expectedSampleCount := 4
- opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
+ ctx := t.Context()
- // Configure concurrency settings.
- opts.ConcurrentEvalsEnabled = true
- opts.MaxConcurrentEvals = 2
- opts.RuleConcurrencyController = nil
- ruleManager := NewManager(opts)
+ expectedRuleCount := 6
+ expectedSampleCount := 4
+ opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
+ // Configure concurrency settings.
+ opts.ConcurrentEvalsEnabled = true
+ opts.MaxConcurrentEvals = 2
+ opts.RuleConcurrencyController = nil
+ ruleManager := NewManager(opts)
- for _, group := range groups {
- require.Len(t, group.rules, expectedRuleCount)
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
- start := time.Now()
- DefaultEvalIterationFunc(ctx, group, start)
+ for _, group := range groups {
+ require.Len(t, group.rules, expectedRuleCount)
- // Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals.
- require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load())
- // Some rules should execute concurrently so should complete quicker.
- require.Less(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
- // Each recording rule produces one vector.
- require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
- }
+ start := time.Now()
+ DefaultEvalIterationFunc(ctx, group, start)
+
+ // Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals.
+ require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load())
+ // Some rules should execute concurrently so should complete quicker.
+ require.Less(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
+ // Each recording rule produces one vector.
+ require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ }
+ })
})
t.Run("asynchronous evaluation of all independent rules, insufficient concurrency", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
- expectedRuleCount := 8
- expectedSampleCount := expectedRuleCount
- opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
+ ctx := t.Context()
- // Configure concurrency settings.
- opts.ConcurrentEvalsEnabled = true
- opts.MaxConcurrentEvals = 2
- opts.RuleConcurrencyController = nil
- ruleManager := NewManager(opts)
+ expectedRuleCount := 8
+ expectedSampleCount := expectedRuleCount
+ opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
+ // Configure concurrency settings.
+ opts.ConcurrentEvalsEnabled = true
+ opts.MaxConcurrentEvals = 2
+ opts.RuleConcurrencyController = nil
+ ruleManager := NewManager(opts)
- for _, group := range groups {
- require.Len(t, group.rules, expectedRuleCount)
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
- start := time.Now()
- DefaultEvalIterationFunc(ctx, group, start)
+ for _, group := range groups {
+ require.Len(t, group.rules, expectedRuleCount)
- // Expected evaluation order (isn't affected by concurrency settings)
- order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
- require.Equal(t, []ConcurrentRules{
- {0, 1, 2, 3, 4, 5, 6, 7},
- }, order)
+ start := time.Now()
+ DefaultEvalIterationFunc(ctx, group, start)
- // Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals.
- require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load())
- // Some rules should execute concurrently so should complete quicker.
- require.Less(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
- // Each recording rule produces one vector.
- require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
- }
+ // Expected evaluation order (isn't affected by concurrency settings)
+ order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
+ require.Equal(t, []ConcurrentRules{
+ {0, 1, 2, 3, 4, 5, 6, 7},
+ }, order)
+
+ // Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals.
+ require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load())
+ // Some rules should execute concurrently so should complete quicker.
+ require.Less(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
+ // Each recording rule produces one vector.
+ require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ }
+ })
})
t.Run("asynchronous evaluation of all independent rules, sufficient concurrency", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
- expectedRuleCount := 8
- expectedSampleCount := expectedRuleCount
- opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
+ ctx := t.Context()
- // Configure concurrency settings.
- opts.ConcurrentEvalsEnabled = true
- opts.MaxConcurrentEvals = int64(expectedRuleCount) * 2
- opts.RuleConcurrencyController = nil
- ruleManager := NewManager(opts)
+ expectedRuleCount := 8
+ expectedSampleCount := expectedRuleCount
+ opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
+ // Configure concurrency settings.
+ opts.ConcurrentEvalsEnabled = true
+ opts.MaxConcurrentEvals = int64(expectedRuleCount) * 2
+ opts.RuleConcurrencyController = nil
+ ruleManager := NewManager(opts)
- for _, group := range groups {
- require.Len(t, group.rules, expectedRuleCount)
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
+
+ for _, group := range groups {
+ require.Len(t, group.rules, expectedRuleCount)
+
+ start := time.Now()
+
+ DefaultEvalIterationFunc(ctx, group, start)
+
+ // Expected evaluation order
+ order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
+ require.Equal(t, []ConcurrentRules{
+ {0, 1, 2, 3, 4, 5, 6, 7},
+ }, order)
+
+ // Max inflight can be up to MaxConcurrentEvals concurrent evals, since there is sufficient concurrency to run all rules at once.
+ require.LessOrEqual(t, int64(maxInflight.Load()), opts.MaxConcurrentEvals)
+ // Some rules should execute concurrently so should complete quicker.
+ require.Less(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
+ // Each recording rule produces one vector.
+ require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ // Group duration is less than the sum of rule durations
+ require.Less(t, group.GetEvaluationTime(), group.GetRuleEvaluationTimeSum())
+ }
+ })
+ })
+
+ t.Run("asynchronous evaluation of independent rules, with indeterminate. Should be synchronous", func(t *testing.T) {
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
+
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
+
+ ctx := t.Context()
+
+ ruleCount := 7
+ opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
+
+ // Configure concurrency settings.
+ opts.ConcurrentEvalsEnabled = true
+ opts.MaxConcurrentEvals = int64(ruleCount) * 2
+ opts.RuleConcurrencyController = nil
+ ruleManager := NewManager(opts)
+
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_indeterminates.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
+
+ for _, group := range groups {
+ require.Len(t, group.rules, ruleCount)
+
+ start := time.Now()
+
+ group.Eval(ctx, start)
+
+ // Never expect more than 1 inflight query at a time.
+ require.EqualValues(t, 1, maxInflight.Load())
+ // Each rule should take at least 1 second to execute sequentially.
+ require.GreaterOrEqual(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
+ // Each rule produces one vector.
+ require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ }
+ })
+ })
+
+ t.Run("asynchronous evaluation of rules that benefit from reordering", func(t *testing.T) {
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
+
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
+
+ ctx := t.Context()
+
+ ruleCount := 8
+ opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
+
+ // Configure concurrency settings.
+ opts.ConcurrentEvalsEnabled = true
+ opts.MaxConcurrentEvals = int64(ruleCount) * 2
+ opts.RuleConcurrencyController = nil
+ ruleManager := NewManager(opts)
+
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_dependents_on_base.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
+ var group *Group
+ for _, g := range groups {
+ group = g
+ }
start := time.Now()
- DefaultEvalIterationFunc(ctx, group, start)
-
// Expected evaluation order
order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
require.Equal(t, []ConcurrentRules{
- {0, 1, 2, 3, 4, 5, 6, 7},
+ {0, 4},
+ {1, 2, 3, 5, 6, 7},
}, order)
- // Max inflight can be up to MaxConcurrentEvals concurrent evals, since there is sufficient concurrency to run all rules at once.
- require.LessOrEqual(t, int64(maxInflight.Load()), opts.MaxConcurrentEvals)
- // Some rules should execute concurrently so should complete quicker.
- require.Less(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
- // Each recording rule produces one vector.
- require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
- // Group duration is less than the sum of rule durations
- require.Less(t, group.GetEvaluationTime(), group.GetRuleEvaluationTimeSum())
- }
- })
-
- t.Run("asynchronous evaluation of independent rules, with indeterminate. Should be synchronous", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
-
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
-
- ruleCount := 7
- opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
-
- // Configure concurrency settings.
- opts.ConcurrentEvalsEnabled = true
- opts.MaxConcurrentEvals = int64(ruleCount) * 2
- opts.RuleConcurrencyController = nil
- ruleManager := NewManager(opts)
-
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_indeterminates.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
-
- for _, group := range groups {
- require.Len(t, group.rules, ruleCount)
-
- start := time.Now()
-
group.Eval(ctx, start)
- // Never expect more than 1 inflight query at a time.
- require.EqualValues(t, 1, maxInflight.Load())
- // Each rule should take at least 1 second to execute sequentially.
- require.GreaterOrEqual(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
+ // Inflight queries should be equal to 6. This is the size of the second batch of rules that can be executed concurrently.
+ require.EqualValues(t, 6, maxInflight.Load())
+ // Some rules should execute concurrently so should complete quicker.
+ require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
// Each rule produces one vector.
require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
- }
- })
-
- t.Run("asynchronous evaluation of rules that benefit from reordering", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
-
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
-
- ruleCount := 8
- opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
-
- // Configure concurrency settings.
- opts.ConcurrentEvalsEnabled = true
- opts.MaxConcurrentEvals = int64(ruleCount) * 2
- opts.RuleConcurrencyController = nil
- ruleManager := NewManager(opts)
-
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_dependents_on_base.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
- var group *Group
- for _, g := range groups {
- group = g
- }
-
- start := time.Now()
-
- // Expected evaluation order
- order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
- require.Equal(t, []ConcurrentRules{
- {0, 4},
- {1, 2, 3, 5, 6, 7},
- }, order)
-
- group.Eval(ctx, start)
-
- // Inflight queries should be equal to 6. This is the size of the second batch of rules that can be executed concurrently.
- require.EqualValues(t, 6, maxInflight.Load())
- // Some rules should execute concurrently so should complete quicker.
- require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
- // Each rule produces one vector.
- require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ })
})
t.Run("attempted asynchronous evaluation of chained rules", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
- ruleCount := 7
- opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
+ ctx := t.Context()
- // Configure concurrency settings.
- opts.ConcurrentEvalsEnabled = true
- opts.MaxConcurrentEvals = int64(ruleCount) * 2
- opts.RuleConcurrencyController = nil
- ruleManager := NewManager(opts)
+ ruleCount := 7
+ opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_chain.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
- var group *Group
- for _, g := range groups {
- group = g
- }
+ // Configure concurrency settings.
+ opts.ConcurrentEvalsEnabled = true
+ opts.MaxConcurrentEvals = int64(ruleCount) * 2
+ opts.RuleConcurrencyController = nil
+ ruleManager := NewManager(opts)
- start := time.Now()
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_chain.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
+ var group *Group
+ for _, g := range groups {
+ group = g
+ }
- // Expected evaluation order
- order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
- require.Equal(t, []ConcurrentRules{
- {0, 1},
- {2},
- {3},
- {4, 5, 6},
- }, order)
+ start := time.Now()
- group.Eval(ctx, start)
+ // Expected evaluation order
+ order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
+ require.Equal(t, []ConcurrentRules{
+ {0, 1},
+ {2},
+ {3},
+ {4, 5, 6},
+ }, order)
- require.EqualValues(t, 3, maxInflight.Load())
- // Some rules should execute concurrently so should complete quicker.
- require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
- // Each rule produces one vector.
- require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ group.Eval(ctx, start)
+
+ require.EqualValues(t, 3, maxInflight.Load())
+ // Some rules should execute concurrently so should complete quicker.
+ require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
+ // Each rule produces one vector.
+ require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ })
})
}
func TestNewRuleGroupRestoration(t *testing.T) {
t.Parallel()
store := teststorage.New(t)
- t.Cleanup(func() { store.Close() })
+
var (
inflightQueries atomic.Int32
maxInflight atomic.Int32
@@ -2386,7 +2381,7 @@ func TestNewRuleGroupRestoration(t *testing.T) {
func TestNewRuleGroupRestorationWithRestoreNewGroupOption(t *testing.T) {
t.Parallel()
store := teststorage.New(t)
- t.Cleanup(func() { store.Close() })
+
var (
inflightQueries atomic.Int32
maxInflight atomic.Int32
@@ -2456,7 +2451,6 @@ func TestNewRuleGroupRestorationWithRestoreNewGroupOption(t *testing.T) {
func TestBoundedRuleEvalConcurrency(t *testing.T) {
storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
var (
inflightQueries atomic.Int32
@@ -2511,7 +2505,6 @@ func TestUpdateWhenStopped(t *testing.T) {
func TestGroup_Eval_RaceConditionOnStoppingGroupEvaluationWhileRulesAreEvaluatedConcurrently(t *testing.T) {
storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
var (
inflightQueries atomic.Int32
@@ -2603,11 +2596,11 @@ func TestLabels_FromMaps(t *testing.T) {
func TestParseFiles(t *testing.T) {
t.Run("good files", func(t *testing.T) {
- err := ParseFiles([]string{filepath.Join("fixtures", "rules.y*ml")}, model.UTF8Validation)
+ err := ParseFiles([]string{filepath.Join("fixtures", "rules.y*ml")}, model.UTF8Validation, testParser)
require.NoError(t, err)
})
t.Run("bad files", func(t *testing.T) {
- err := ParseFiles([]string{filepath.Join("fixtures", "invalid_rules.y*ml")}, model.UTF8Validation)
+ err := ParseFiles([]string{filepath.Join("fixtures", "invalid_rules.y*ml")}, model.UTF8Validation, testParser)
require.ErrorContains(t, err, "field unexpected_field not found in type rulefmt.Rule")
})
}
@@ -2730,7 +2723,6 @@ func TestRuleDependencyController_AnalyseRules(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
ruleManager := NewManager(&ManagerOptions{
Context: context.Background(),
@@ -2759,7 +2751,6 @@ func TestRuleDependencyController_AnalyseRules(t *testing.T) {
func BenchmarkRuleDependencyController_AnalyseRules(b *testing.B) {
storage := teststorage.New(b)
- b.Cleanup(func() { storage.Close() })
ruleManager := NewManager(&ManagerOptions{
Context: context.Background(),
diff --git a/rules/recording_test.go b/rules/recording_test.go
index 1fee5ede72..e59c079d91 100644
--- a/rules/recording_test.go
+++ b/rules/recording_test.go
@@ -29,10 +29,12 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
+var testParser = parser.NewParser(parser.Options{})
+
var (
ruleEvaluationTime = time.Unix(0, 0).UTC()
- exprWithMetricName, _ = parser.ParseExpr(`sort(metric)`)
- exprWithoutMetricName, _ = parser.ParseExpr(`sort(metric + metric)`)
+ exprWithMetricName, _ = testParser.ParseExpr(`sort(metric)`)
+ exprWithoutMetricName, _ = testParser.ParseExpr(`sort(metric + metric)`)
)
var ruleEvalTestScenarios = []struct {
@@ -111,7 +113,7 @@ var ruleEvalTestScenarios = []struct {
},
}
-func setUpRuleEvalTest(t require.TestingT) *teststorage.TestStorage {
+func setUpRuleEvalTest(t testing.TB) *teststorage.TestStorage {
return promqltest.LoadedStorage(t, `
load 1m
metric{label_a="1",label_b="3"} 1
@@ -121,7 +123,6 @@ func setUpRuleEvalTest(t require.TestingT) *teststorage.TestStorage {
func TestRuleEval(t *testing.T) {
storage := setUpRuleEvalTest(t)
- t.Cleanup(func() { storage.Close() })
ng := testEngine(t)
for _, scenario := range ruleEvalTestScenarios {
@@ -158,7 +159,6 @@ func BenchmarkRuleEval(b *testing.B) {
// TestRuleEvalDuplicate tests for duplicate labels in recorded metrics, see #5529.
func TestRuleEvalDuplicate(t *testing.T) {
storage := teststorage.New(t)
- defer storage.Close()
opts := promql.EngineOpts{
Logger: nil,
@@ -172,7 +172,7 @@ func TestRuleEvalDuplicate(t *testing.T) {
now := time.Now()
- expr, _ := parser.ParseExpr(`vector(0) or label_replace(vector(0),"test","x","","")`)
+ expr, _ := testParser.ParseExpr(`vector(0) or label_replace(vector(0),"test","x","","")`)
rule := NewRecordingRule("foo", expr, labels.FromStrings("test", "test"))
_, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0)
require.Error(t, err)
@@ -185,7 +185,6 @@ func TestRecordingRuleLimit(t *testing.T) {
metric{label="1"} 1
metric{label="2"} 1
`)
- t.Cleanup(func() { storage.Close() })
tests := []struct {
limit int
@@ -206,7 +205,7 @@ func TestRecordingRuleLimit(t *testing.T) {
},
}
- expr, _ := parser.ParseExpr(`metric > 0`)
+ expr, _ := testParser.ParseExpr(`metric > 0`)
rule := NewRecordingRule(
"foo",
expr,
@@ -241,7 +240,7 @@ func TestRecordingEvalWithOrigin(t *testing.T) {
lbs = labels.FromStrings("foo", "bar")
)
- expr, err := parser.ParseExpr(query)
+ expr, err := testParser.ParseExpr(query)
require.NoError(t, err)
rule := NewRecordingRule(name, expr, lbs)
diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go
index dd5179b360..1db229561d 100644
--- a/scrape/helpers_test.go
+++ b/scrape/helpers_test.go
@@ -17,6 +17,7 @@ import (
"bytes"
"context"
"encoding/binary"
+ "fmt"
"net/http"
"testing"
"time"
@@ -38,15 +39,22 @@ import (
// For readability.
type sample = teststorage.Sample
+type compatAppendable interface {
+ storage.Appendable
+ storage.AppendableV2
+}
+
func withCtx(ctx context.Context) func(sl *scrapeLoop) {
return func(sl *scrapeLoop) {
sl.ctx = ctx
}
}
-func withAppendable(appendable storage.Appendable) func(sl *scrapeLoop) {
+func withAppendable(app compatAppendable, appV2 bool) func(sl *scrapeLoop) {
return func(sl *scrapeLoop) {
- sl.appendable = appendable
+ sa := selectAppendable(app, appV2)
+ sl.appendable = sa.V1()
+ sl.appendableV2 = sa.V2()
}
}
@@ -55,8 +63,7 @@ func withAppendable(appendable storage.Appendable) func(sl *scrapeLoop) {
//
// It's recommended to use withXYZ functions for simple option customizations, e.g:
//
-// appTest := teststorage.NewAppendable()
-// sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
+// sl, _ := newTestScrapeLoop(t, withCtx(customCtx))
//
// However, when changing more than one scrapeLoop options it's more readable to have one explicit opt function:
//
@@ -64,7 +71,7 @@ func withAppendable(appendable storage.Appendable) func(sl *scrapeLoop) {
// appTest := teststorage.NewAppendable()
// sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
// sl.ctx = ctx
-// sl.appendable = appTest
+// sl.appendableV2 = appTest
// // Since we're writing samples directly below we need to provide a protocol fallback.
// sl.fallbackScrapeProtocol = "text/plain"
// })
@@ -84,8 +91,6 @@ func newTestScrapeLoop(t testing.TB, opts ...func(sl *scrapeLoop)) (_ *scrapeLoo
timeout: 1 * time.Hour,
sampleMutator: nopMutator,
reportSampleMutator: nopMutator,
-
- appendable: teststorage.NewAppendable(),
buffers: pool.New(1e3, 1e6, 3, func(sz int) any { return make([]byte, 0, sz) }),
metrics: metrics,
maxSchema: histogram.ExponentialSchemaMax,
@@ -98,6 +103,11 @@ func newTestScrapeLoop(t testing.TB, opts ...func(sl *scrapeLoop)) (_ *scrapeLoo
for _, o := range opts {
o(sl)
}
+
+ if sl.appendable != nil && sl.appendableV2 != nil {
+ t.Fatal("select the appendable to use, both were passed, likely a bug")
+ }
+
// Validate user opts for convenience.
require.Nil(t, sl.parentCtx, "newTestScrapeLoop does not support injecting non-nil parent context")
require.Nil(t, sl.appenderCtx, "newTestScrapeLoop does not support injecting non-nil appender context")
@@ -121,7 +131,8 @@ func newTestScrapeLoop(t testing.TB, opts ...func(sl *scrapeLoop)) (_ *scrapeLoo
return sl, scraper
}
-func newTestScrapePool(t *testing.T, injectNewLoop func(options scrapeLoopOptions) loop) *scrapePool {
+func newTestScrapePool(t *testing.T, app compatAppendable, appV2 bool, injectNewLoop func(options scrapeLoopOptions) loop) *scrapePool {
+ sa := selectAppendable(app, appV2)
return &scrapePool{
ctx: t.Context(),
cancel: func() {},
@@ -134,7 +145,8 @@ func newTestScrapePool(t *testing.T, injectNewLoop func(options scrapeLoopOption
loops: map[uint64]loop{},
injectTestNewLoop: injectNewLoop,
- appendable: teststorage.NewAppendable(),
+ appendable: sa.V1(), appendableV2: sa.V2(),
+
symbolTable: labels.NewSymbolTable(),
metrics: newTestScrapeMetrics(t),
}
@@ -158,3 +170,66 @@ func protoMarshalDelimited(t *testing.T, mf *dto.MetricFamily) []byte {
buf.Write(protoBuf)
return buf.Bytes()
}
+
+type selectedAppendable struct {
+ useV2 bool
+ app compatAppendable
+}
+
+// V1 returns Appendable if V1 is selected, otherwise nil.
+func (s selectedAppendable) V1() storage.Appendable {
+ if s.useV2 {
+ return nil
+ }
+ return s.app
+}
+
+// V2 returns AppendableV2 if V2 is selected, otherwise nil.
+func (s selectedAppendable) V2() storage.AppendableV2 {
+ if !s.useV2 {
+ return nil
+ }
+ return s.app
+}
+
+// selectAppendable allows to specify which appendable callers should use when the struct
+// implements both. This is how all callers are making the decision - if one appendable is nil, they
+// take another. selectAppendable allows to inject nil to e.g. storage.AppendableV2 when appV2 is false.
+func selectAppendable(app compatAppendable, appV2 bool) selectedAppendable {
+ s := selectedAppendable{
+ app: app,
+ useV2: appV2,
+ }
+ return s
+}
+
+func foreachAppendable(t *testing.T, f func(t *testing.T, appV2 bool)) {
+ for _, appV2 := range []bool{false, true} {
+ t.Run(fmt.Sprintf("appV2=%v", appV2), func(t *testing.T) {
+ f(t, appV2)
+ })
+ }
+}
+
+func TestSelectAppendable(t *testing.T) {
+ var i int
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ defer func() { i++ }()
+ switch i {
+ case 0:
+ require.False(t, appV2)
+
+ s := selectAppendable(teststorage.NewAppendable(), appV2)
+ require.NotNil(t, s.V1())
+ require.Nil(t, s.V2())
+ case 1:
+ require.True(t, appV2)
+
+ s := selectAppendable(teststorage.NewAppendable(), appV2)
+ require.Nil(t, s.V1())
+ require.NotNil(t, s.V2())
+ default:
+ t.Fatal("too many iterations")
+ }
+ })
+}
diff --git a/scrape/manager.go b/scrape/manager.go
index a2297aa824..24a63b056b 100644
--- a/scrape/manager.go
+++ b/scrape/manager.go
@@ -39,14 +39,35 @@ import (
"github.com/prometheus/prometheus/util/pool"
)
-// NewManager is the Manager constructor using Appendable.
-func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error), appendable storage.Appendable, registerer prometheus.Registerer) (*Manager, error) {
+// NewManager is the Manager constructor using storage.Appendable or storage.AppendableV2.
+//
+// If unsure which one to use/implement, implement AppendableV2 as it significantly simplifies implementation and allows more
+// (passing ST, always-on metadata, exemplars per sample).
+//
+// NewManager returns error if both appendable and appendableV2 are specified.
+//
+// Switch to AppendableV2 is in progress (https://github.com/prometheus/prometheus/issues/17632).
+// storage.Appendable will be removed soon (ETA: Q2 2026).
+func NewManager(
+ o *Options,
+ logger *slog.Logger,
+ newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error),
+ appendable storage.Appendable,
+ appendableV2 storage.AppendableV2,
+ registerer prometheus.Registerer,
+) (*Manager, error) {
if o == nil {
o = &Options{}
}
if logger == nil {
logger = promslog.NewNopLogger()
}
+ if appendable != nil && appendableV2 != nil {
+ return nil, errors.New("scrape.NewManager: appendable and appendableV2 cannot be provided at the same time")
+ }
+ if appendable == nil && appendableV2 == nil {
+ return nil, errors.New("scrape.NewManager: provide either appendable or appendableV2")
+ }
sm, err := newScrapeMetrics(registerer)
if err != nil {
@@ -55,6 +76,7 @@ func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(str
m := &Manager{
appendable: appendable,
+ appendableV2: appendableV2,
opts: o,
logger: logger,
newScrapeFailureLogger: newScrapeFailureLogger,
@@ -114,7 +136,8 @@ type Manager struct {
opts *Options
logger *slog.Logger
- appendable storage.Appendable
+ appendable storage.Appendable
+ appendableV2 storage.AppendableV2
graceShut chan struct{}
@@ -196,7 +219,7 @@ func (m *Manager) reload() {
continue
}
m.metrics.targetScrapePools.Inc()
- sp, err := newScrapePool(scrapeConfig, m.appendable, m.offsetSeed, m.logger.With("scrape_pool", setName), m.buffers, m.opts, m.metrics)
+ sp, err := newScrapePool(scrapeConfig, m.appendable, m.appendableV2, m.offsetSeed, m.logger.With("scrape_pool", setName), m.buffers, m.opts, m.metrics)
if err != nil {
m.metrics.targetScrapePoolsFailed.Inc()
m.logger.Error("error creating new scrape pool", "err", err, "scrape_pool", setName)
diff --git a/scrape/manager_test.go b/scrape/manager_test.go
index d4898eb996..395cc98a82 100644
--- a/scrape/manager_test.go
+++ b/scrape/manager_test.go
@@ -522,13 +522,13 @@ scrape_configs:
)
opts := Options{}
- scrapeManager, err := NewManager(&opts, nil, nil, nil, testRegistry)
+ scrapeManager, err := NewManager(&opts, nil, nil, nil, teststorage.NewAppendable(), testRegistry)
require.NoError(t, err)
newLoop := func(scrapeLoopOptions) loop {
ch <- struct{}{}
return noopLoop()
}
- sp := newTestScrapePool(t, newLoop)
+ sp := newTestScrapePool(t, nil, false, newLoop)
sp.activeTargets[1] = &Target{}
sp.loops[1] = noopLoop()
sp.config = cfg1.ScrapeConfigs[0]
@@ -578,11 +578,11 @@ scrape_configs:
func TestManagerTargetsUpdates(t *testing.T) {
opts := Options{}
testRegistry := prometheus.NewRegistry()
- m, err := NewManager(&opts, nil, nil, nil, testRegistry)
+ m, err := NewManager(&opts, nil, nil, nil, teststorage.NewAppendable(), testRegistry)
require.NoError(t, err)
- ts := make(chan map[string][]*targetgroup.Group)
- go m.Run(ts)
+ targetSetsCh := make(chan map[string][]*targetgroup.Group)
+ go m.Run(targetSetsCh)
defer m.Stop()
tgSent := make(map[string][]*targetgroup.Group)
@@ -594,7 +594,7 @@ func TestManagerTargetsUpdates(t *testing.T) {
}
select {
- case ts <- tgSent:
+ case targetSetsCh <- tgSent:
case <-time.After(10 * time.Millisecond):
require.Fail(t, "Scrape manager's channel remained blocked after the set threshold.")
}
@@ -631,7 +631,7 @@ global:
opts := Options{}
testRegistry := prometheus.NewRegistry()
- scrapeManager, err := NewManager(&opts, nil, nil, nil, testRegistry)
+ scrapeManager, err := NewManager(&opts, nil, nil, nil, teststorage.NewAppendable(), testRegistry)
require.NoError(t, err)
// Load the first config.
@@ -684,7 +684,7 @@ scrape_configs:
_, cancel := context.WithCancel(context.Background())
defer cancel()
- sp := newTestScrapePool(t, newLoop)
+ sp := newTestScrapePool(t, nil, false, newLoop)
sp.loops[1] = noopLoop()
sp.config = cfg1.ScrapeConfigs[0]
sp.metrics = scrapeManager.metrics
@@ -701,7 +701,7 @@ scrape_configs:
}
opts := Options{}
- scrapeManager, err := NewManager(&opts, nil, nil, nil, testRegistry)
+ scrapeManager, err := NewManager(&opts, nil, nil, nil, teststorage.NewAppendable(), testRegistry)
require.NoError(t, err)
reload(scrapeManager, cfg1)
@@ -735,6 +735,8 @@ func setupTestServer(t *testing.T, typ string, toWrite []byte) *httptest.Server
}
// TestManagerSTZeroIngestion tests scrape manager for various ST cases.
+// NOTE(bwplotka): There is no AppenderV2 test for this STZeroIngestion feature as in V2 flow it's
+// moved to AppenderV2 implementation (e.g. storage) and it's tested there, e.g. tsdb.TestHeadAppenderV2_Append_EnableSTAsZeroSample.
func TestManagerSTZeroIngestion(t *testing.T) {
t.Parallel()
const (
@@ -766,7 +768,7 @@ func TestManagerSTZeroIngestion(t *testing.T) {
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{
EnableStartTimestampZeroIngestion: testSTZeroIngest,
skipOffsetting: true,
- }, app)
+ }, app, nil)
defer scrapeManager.Stop()
server := setupTestServer(t, config.ScrapeProtocolsHeaders[testFormat], encoded)
@@ -905,6 +907,8 @@ func generateTestHistogram(i int) *dto.Histogram {
return h
}
+// NOTE(bwplotka): There is no AppenderV2 test for this STZeroIngestion feature as in V2 flow it's
+// moved to AppenderV2 implementation (e.g. storage) and it's tested there, e.g. tsdb.TestHeadAppenderV2_Append_EnableSTAsZeroSample.
func TestManagerSTZeroIngestionHistogram(t *testing.T) {
t.Parallel()
const mName = "expected_histogram"
@@ -950,7 +954,7 @@ func TestManagerSTZeroIngestionHistogram(t *testing.T) {
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{
EnableStartTimestampZeroIngestion: tc.enableSTZeroIngestion,
skipOffsetting: true,
- }, app)
+ }, app, nil)
defer scrapeManager.Stop()
once := sync.Once{}
@@ -1030,7 +1034,7 @@ func TestUnregisterMetrics(t *testing.T) {
// Check that all metrics can be unregistered, allowing a second manager to be created.
for range 2 {
opts := Options{}
- manager, err := NewManager(&opts, nil, nil, nil, reg)
+ manager, err := NewManager(&opts, nil, nil, nil, teststorage.NewAppendable(), reg)
require.NotNil(t, manager)
require.NoError(t, err)
// Unregister all metrics.
@@ -1043,6 +1047,9 @@ func TestUnregisterMetrics(t *testing.T) {
// This test addresses issue #17216 by ensuring the previously blocking check has been removed.
// The test verifies that the presence of exemplars in the input does not cause errors,
// although exemplars are not preserved during NHCB conversion (as documented below).
+//
+// NOTE(bwplotka): There is no AppenderV2 test for this STZeroIngestion feature as in V2 flow it's
+// moved to AppenderV2 implementation (e.g. storage) and it's tested there, e.g. tsdb.TestHeadAppenderV2_Append_EnableSTAsZeroSample.
func TestNHCBAndSTZeroIngestion(t *testing.T) {
t.Parallel()
@@ -1059,7 +1066,7 @@ func TestNHCBAndSTZeroIngestion(t *testing.T) {
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{
EnableStartTimestampZeroIngestion: true,
skipOffsetting: true,
- }, app)
+ }, app, nil)
defer scrapeManager.Stop()
once := sync.Once{}
@@ -1153,16 +1160,13 @@ func applyConfig(
require.NoError(t, discoveryManager.ApplyConfig(c))
}
-func runManagers(t *testing.T, ctx context.Context, opts *Options, app storage.Appendable) (*discovery.Manager, *Manager) {
+func runManagers(t *testing.T, ctx context.Context, opts *Options, app storage.Appendable, appV2 storage.AppendableV2) (*discovery.Manager, *Manager) {
t.Helper()
if opts == nil {
opts = &Options{}
}
opts.DiscoveryReloadInterval = model.Duration(100 * time.Millisecond)
- if app == nil {
- app = teststorage.NewAppendable()
- }
reg := prometheus.NewRegistry()
sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg))
@@ -1178,7 +1182,7 @@ func runManagers(t *testing.T, ctx context.Context, opts *Options, app storage.A
opts,
nil,
nil,
- app,
+ app, appV2,
prometheus.NewRegistry(),
)
require.NoError(t, err)
@@ -1251,7 +1255,7 @@ scrape_configs:
- files: ['%s']
`
- discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil)
+ discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil, teststorage.NewAppendable())
defer scrapeManager.Stop()
applyConfig(
@@ -1350,7 +1354,7 @@ scrape_configs:
file_sd_configs:
- files: ['%s', '%s']
`
- discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil)
+ discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil, teststorage.NewAppendable())
defer scrapeManager.Stop()
applyConfig(
@@ -1409,7 +1413,7 @@ scrape_configs:
file_sd_configs:
- files: ['%s']
`
- discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil)
+ discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil, teststorage.NewAppendable())
defer scrapeManager.Stop()
applyConfig(
@@ -1475,7 +1479,7 @@ scrape_configs:
- targets: ['%s']
`
- discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil)
+ discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil, teststorage.NewAppendable())
defer scrapeManager.Stop()
// Apply the initial config with an existing file
@@ -1559,7 +1563,7 @@ scrape_configs:
cfg := loadConfiguration(t, cfgText)
- m, err := NewManager(&Options{}, nil, nil, teststorage.NewAppendable(), prometheus.NewRegistry())
+ m, err := NewManager(&Options{}, nil, nil, nil, teststorage.NewAppendable(), prometheus.NewRegistry())
require.NoError(t, err)
defer m.Stop()
require.NoError(t, m.ApplyConfig(cfg))
diff --git a/scrape/scrape.go b/scrape/scrape.go
index 58df858b3d..d5a9ba72b4 100644
--- a/scrape/scrape.go
+++ b/scrape/scrape.go
@@ -82,11 +82,12 @@ type FailureLogger interface {
// scrapePool manages scrapes for sets of targets.
type scrapePool struct {
- appendable storage.Appendable
- logger *slog.Logger
- ctx context.Context
- cancel context.CancelFunc
- options *Options
+ appendable storage.Appendable
+ appendableV2 storage.AppendableV2
+ logger *slog.Logger
+ ctx context.Context
+ cancel context.CancelFunc
+ options *Options
// mtx must not be taken after targetMtx.
mtx sync.Mutex
@@ -139,6 +140,7 @@ type scrapeLoopAppendAdapter interface {
func newScrapePool(
cfg *config.ScrapeConfig,
appendable storage.Appendable,
+ appendableV2 storage.AppendableV2,
offsetSeed uint64,
logger *slog.Logger,
buffers *pool.Pool,
@@ -171,6 +173,7 @@ func newScrapePool(
ctx, cancel := context.WithCancel(context.Background())
sp := &scrapePool{
appendable: appendable,
+ appendableV2: appendableV2,
logger: logger,
ctx: ctx,
cancel: cancel,
@@ -842,11 +845,12 @@ type scrapeLoop struct {
scraper scraper
// Static params per scrapePool.
- appendable storage.Appendable
- buffers *pool.Pool
- offsetSeed uint64
- symbolTable *labels.SymbolTable
- metrics *scrapeMetrics
+ appendable storage.Appendable
+ appendableV2 storage.AppendableV2
+ buffers *pool.Pool
+ offsetSeed uint64
+ symbolTable *labels.SymbolTable
+ metrics *scrapeMetrics
// Options from config.ScrapeConfig.
sampleLimit int
@@ -1190,11 +1194,12 @@ func newScrapeLoop(opts scrapeLoopOptions) *scrapeLoop {
scraper: opts.scraper,
// Static params per scrapePool.
- appendable: opts.sp.appendable,
- buffers: opts.sp.buffers,
- offsetSeed: opts.sp.offsetSeed,
- symbolTable: opts.sp.symbolTable,
- metrics: opts.sp.metrics,
+ appendable: opts.sp.appendable,
+ appendableV2: opts.sp.appendableV2,
+ buffers: opts.sp.buffers,
+ offsetSeed: opts.sp.offsetSeed,
+ symbolTable: opts.sp.symbolTable,
+ metrics: opts.sp.metrics,
// config.ScrapeConfig.
sampleLimit: int(opts.sp.config.SampleLimit),
@@ -1303,7 +1308,9 @@ mainLoop:
}
func (sl *scrapeLoop) appender() scrapeLoopAppendAdapter {
- // NOTE(bwplotka): Add AppenderV2 implementation, see https://github.com/prometheus/prometheus/issues/17632.
+ if sl.appendableV2 != nil {
+ return &scrapeLoopAppenderV2{scrapeLoop: sl, AppenderV2: sl.appendableV2.AppenderV2(sl.appenderCtx)}
+ }
return &scrapeLoopAppender{scrapeLoop: sl, Appender: sl.appendable.Appender(sl.appenderCtx)}
}
@@ -1637,7 +1644,7 @@ loop:
break
}
switch et {
- // TODO(bwplotka): Consider changing parser to give metadata at once instead of type, help and unit in separation, ideally on `Series()/Histogram()
+ // TODO(bwplotka): Consider changing parser to give metadata at once instead of type, help and unit in separation, ideally on `Series()/Histogram()`
// otherwise we can expose metadata without series on metadata API.
case textparse.EntryType:
// TODO(bwplotka): Build meta entry directly instead of locking and updating the map. This will
@@ -1753,7 +1760,7 @@ loop:
}
}
- sampleAdded, err = sl.checkAddError(met, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
+ sampleAdded, err = sl.checkAddError(met, nil, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
if err != nil {
if !errors.Is(err, storage.ErrNotFound) {
sl.l.Debug("Unexpected error", "series", string(met), "err", err)
@@ -1829,7 +1836,7 @@ loop:
if !seriesCached || lastMeta.lastIterChange == sl.cache.iter {
// In majority cases we can trust that the current series/histogram is matching the lastMeta and lastMFName.
// However, optional TYPE etc metadata and broken OM text can break this, detect those cases here.
- // TODO(bwplotka): Consider moving this to parser as many parser users end up doing this (e.g. ST and NHCB parsing).
+ // TODO(https://github.com/prometheus/prometheus/issues/17900): Move this to text and OM parser.
if isSeriesPartOfFamily(lset.Get(model.MetricNameLabel), lastMFName, lastMeta.Type) {
if _, merr := app.UpdateMetadata(ref, lset, lastMeta.Metadata); merr != nil {
// No need to fail the scrape on errors appending metadata.
@@ -1871,6 +1878,7 @@ loop:
return total, added, seriesAdded, err
}
+// TODO(https://github.com/prometheus/prometheus/issues/17900): Move this to text and OM parser.
func isSeriesPartOfFamily(mName string, mfName []byte, typ model.MetricType) bool {
mfNameStr := yoloString(mfName)
if !strings.HasPrefix(mName, mfNameStr) { // Fast path.
@@ -1942,7 +1950,7 @@ func isSeriesPartOfFamily(mName string, mfName []byte, typ model.MetricType) boo
// during normal operation (e.g., accidental cardinality explosion, sudden traffic spikes).
// Current case ordering prevents exercising other cases when limits are exceeded.
// Remaining error cases typically occur only a few times, often during initial setup.
-func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (sampleAdded bool, _ error) {
+func (sl *scrapeLoop) checkAddError(met []byte, exemplars []exemplar.Exemplar, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (sampleAdded bool, _ error) {
switch {
case err == nil:
return true, nil
@@ -1974,6 +1982,26 @@ func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucke
case errors.Is(err, storage.ErrNotFound):
return false, storage.ErrNotFound
default:
+ // If nothing from the above, check for partial errors. Do this here to not alloc the pErr on a hot path.
+ var pErr *storage.AppendPartialError
+ if errors.As(err, &pErr) {
+ outOfOrderExemplars := 0
+ for _, e := range pErr.ExemplarErrors {
+ if errors.Is(e, storage.ErrOutOfOrderExemplar) {
+ outOfOrderExemplars++
+ }
+ // Since exemplar storage is still experimental, we don't fail or check other errors.
+ // Debug log is emitted in TSDB already.
+ }
+ if outOfOrderExemplars > 0 && outOfOrderExemplars == len(exemplars) {
+ // Only report out of order exemplars if all are out of order, otherwise this was a partial update
+ // to some existing set of exemplars.
+ appErrs.numExemplarOutOfOrder += outOfOrderExemplars
+ sl.l.Debug("Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1]))
+ sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars))
+ }
+ return true, nil
+ }
return false, err
}
}
diff --git a/scrape/scrape_append_v2.go b/scrape/scrape_append_v2.go
new file mode 100644
index 0000000000..64969707e1
--- /dev/null
+++ b/scrape/scrape_append_v2.go
@@ -0,0 +1,416 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scrape
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "slices"
+ "time"
+
+ "github.com/prometheus/common/model"
+
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/textparse"
+ "github.com/prometheus/prometheus/model/timestamp"
+ "github.com/prometheus/prometheus/model/value"
+ "github.com/prometheus/prometheus/storage"
+)
+
+// appenderWithLimits returns an appender with additional validation.
+func appenderV2WithLimits(app storage.AppenderV2, sampleLimit, bucketLimit int, maxSchema int32) storage.AppenderV2 {
+ app = &timeLimitAppenderV2{
+ AppenderV2: app,
+ maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
+ }
+
+ // The sampleLimit is applied after metrics are potentially dropped via relabeling.
+ if sampleLimit > 0 {
+ app = &limitAppenderV2{
+ AppenderV2: app,
+ limit: sampleLimit,
+ }
+ }
+
+ if bucketLimit > 0 {
+ app = &bucketLimitAppenderV2{
+ AppenderV2: app,
+ limit: bucketLimit,
+ }
+ }
+
+ if maxSchema < histogram.ExponentialSchemaMax {
+ app = &maxSchemaAppenderV2{
+ AppenderV2: app,
+ maxSchema: maxSchema,
+ }
+ }
+
+ return app
+}
+
+func (sl *scrapeLoop) updateStaleMarkersV2(app storage.AppenderV2, defTime int64) (err error) {
+ sl.cache.forEachStale(func(ref storage.SeriesRef, lset labels.Labels) bool {
+ // Series no longer exposed, mark it stale.
+ _, err = app.Append(ref, lset, 0, defTime, math.Float64frombits(value.StaleNaN), nil, nil, storage.AOptions{RejectOutOfOrder: true})
+ switch {
+ case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
+ // Do not count these in logging, as this is expected if a target
+ // goes away and comes back again with a new scrape loop.
+ err = nil
+ }
+ return err == nil
+ })
+ return err
+}
+
+type scrapeLoopAppenderV2 struct {
+ *scrapeLoop
+
+ storage.AppenderV2
+}
+
+var _ scrapeLoopAppendAdapter = &scrapeLoopAppenderV2{}
+
+func (sl *scrapeLoopAppenderV2) append(b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
+ defTime := timestamp.FromTime(ts)
+
+ if len(b) == 0 {
+ // Empty scrape. Just update the stale makers and swap the cache (but don't flush it).
+ err = sl.updateStaleMarkersV2(sl.AppenderV2, defTime)
+ sl.cache.iterDone(false)
+ return total, added, seriesAdded, err
+ }
+
+ p, err := textparse.New(b, contentType, sl.symbolTable, textparse.ParserOptions{
+ EnableTypeAndUnitLabels: sl.enableTypeAndUnitLabels,
+ IgnoreNativeHistograms: !sl.enableNativeHistogramScraping,
+ ConvertClassicHistogramsToNHCB: sl.convertClassicHistToNHCB,
+ KeepClassicOnClassicAndNativeHistograms: sl.alwaysScrapeClassicHist,
+ OpenMetricsSkipSTSeries: sl.enableSTZeroIngestion,
+ FallbackContentType: sl.fallbackScrapeProtocol,
+ })
+ if p == nil {
+ sl.l.Error(
+ "Failed to determine correct type of scrape target.",
+ "content_type", contentType,
+ "fallback_media_type", sl.fallbackScrapeProtocol,
+ "err", err,
+ )
+ return total, added, seriesAdded, err
+ }
+ if err != nil {
+ sl.l.Debug(
+ "Invalid content type on scrape, using fallback setting.",
+ "content_type", contentType,
+ "fallback_media_type", sl.fallbackScrapeProtocol,
+ "err", err,
+ )
+ }
+ var (
+ appErrs = appendErrors{}
+ sampleLimitErr error
+ bucketLimitErr error
+ lset labels.Labels // Escapes to heap so hoisted out of loop.
+ e exemplar.Exemplar // Escapes to heap so hoisted out of loop.
+ lastMeta *metaEntry
+ lastMFName []byte
+ )
+
+ exemplars := make([]exemplar.Exemplar, 0, 1)
+
+ // Take an appender with limits.
+ app := appenderV2WithLimits(sl.AppenderV2, sl.sampleLimit, sl.bucketLimit, sl.maxSchema)
+
+ defer func() {
+ if err != nil {
+ return
+ }
+ // Flush and swap the cache as the scrape was non-empty.
+ sl.cache.iterDone(true)
+ }()
+
+loop:
+ for {
+ var (
+ et textparse.Entry
+ sampleAdded, isHistogram bool
+ met []byte
+ parsedTimestamp *int64
+ val float64
+ h *histogram.Histogram
+ fh *histogram.FloatHistogram
+ )
+ if et, err = p.Next(); err != nil {
+ if errors.Is(err, io.EOF) {
+ err = nil
+ }
+ break
+ }
+ switch et {
+ // TODO(bwplotka): Consider changing parser to give metadata at once instead of type, help and unit in separation, ideally on `Series()/Histogram()
+ // otherwise we can expose metadata without series on metadata API.
+ case textparse.EntryType:
+ // TODO(bwplotka): Build meta entry directly instead of locking and updating the map. This will
+ // allow to properly update metadata when e.g unit was added, then removed;
+ lastMFName, lastMeta = sl.cache.setType(p.Type())
+ continue
+ case textparse.EntryHelp:
+ lastMFName, lastMeta = sl.cache.setHelp(p.Help())
+ continue
+ case textparse.EntryUnit:
+ lastMFName, lastMeta = sl.cache.setUnit(p.Unit())
+ continue
+ case textparse.EntryComment:
+ continue
+ case textparse.EntryHistogram:
+ isHistogram = true
+ default:
+ }
+ total++
+
+ t := defTime
+ if isHistogram {
+ met, parsedTimestamp, h, fh = p.Histogram()
+ } else {
+ met, parsedTimestamp, val = p.Series()
+ }
+ if !sl.honorTimestamps {
+ parsedTimestamp = nil
+ }
+ if parsedTimestamp != nil {
+ t = *parsedTimestamp
+ }
+
+ if sl.cache.getDropped(met) {
+ continue
+ }
+ ce, seriesCached, seriesAlreadyScraped := sl.cache.get(met)
+ var (
+ ref storage.SeriesRef
+ hash uint64
+ )
+
+ if seriesCached {
+ ref = ce.ref
+ lset = ce.lset
+ hash = ce.hash
+ } else {
+ p.Labels(&lset)
+ hash = lset.Hash()
+
+ // Hash label set as it is seen local to the target. Then add target labels
+ // and relabeling and store the final label set.
+ lset = sl.sampleMutator(lset)
+
+ // The label set may be set to empty to indicate dropping.
+ if lset.IsEmpty() {
+ sl.cache.addDropped(met)
+ continue
+ }
+
+ if !lset.Has(model.MetricNameLabel) {
+ err = errNameLabelMandatory
+ break loop
+ }
+ if !lset.IsValid(sl.validationScheme) {
+ err = fmt.Errorf("invalid metric name or label names: %s", lset.String())
+ break loop
+ }
+
+ // If any label limits is exceeded the scrape should fail.
+ if err = verifyLabelLimits(lset, sl.labelLimits); err != nil {
+ sl.metrics.targetScrapePoolExceededLabelLimits.Inc()
+ break loop
+ }
+ }
+
+ exemplars = exemplars[:0] // Reset and reuse the exemplar slice.
+
+ if seriesAlreadyScraped && parsedTimestamp == nil {
+ err = storage.ErrDuplicateSampleForTimestamp
+ } else {
+ // Double check we don't append float 0 for
+ // histogram case where parser returns bad data.
+ // This can only happen when parser has a bug.
+ if isHistogram && h == nil && fh == nil {
+ err = fmt.Errorf("parser returned nil histogram/float histogram for a histogram entry type for %v series; parser bug; aborting", lset.String())
+ break loop
+ }
+
+ st := int64(0)
+ if sl.enableSTZeroIngestion {
+ // p.StartTimestamp() tend to be expensive (e.g. OM1). Do it only if we care.
+ st = p.StartTimestamp()
+ }
+
+ for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
+ if !e.HasTs {
+ if isHistogram {
+ // We drop exemplars for native histograms if they don't have a timestamp.
+ // Missing timestamps are deliberately not supported as we want to start
+ // enforcing timestamps for exemplars as otherwise proper deduplication
+ // is inefficient and purely based on heuristics: we cannot distinguish
+ // between repeated exemplars and new instances with the same values.
+ // This is done silently without logs as it is not an error but out of spec.
+ // This does not affect classic histograms so that behaviour is unchanged.
+ e = exemplar.Exemplar{} // Reset for the next fetch.
+ continue
+ }
+ e.Ts = t
+ }
+ exemplars = append(exemplars, e)
+ e = exemplar.Exemplar{} // Reset for the next fetch.
+ }
+
+ // Prepare append call.
+ appOpts := storage.AOptions{}
+ if len(exemplars) > 0 {
+ // Sort so that checking for duplicates / out of order is more efficient during validation.
+ slices.SortFunc(exemplars, exemplar.Compare)
+ appOpts.Exemplars = exemplars
+ }
+
+ // Metadata path mimicks the scrape appender V1 flow. Once we remove v2
+ // flow we should rename "appendMetadataToWAL" flag to "passMetadata" because for v2 flow
+ // the metadata storage detail is behind the appendableV2 contract. V2 also means we always pass the metadata,
+ // we don't check if it changed (that code can be removed).
+ //
+ // Long term, we should always attach the metadata without any flag. Unfortunately because of the limitation
+ // of the TEXT and OpenMetrics 1.0 (hopefully fixed in OpenMetrics 2.0) there are edge cases around unknown
+ // metadata + suffixes that is expensive (isSeriesPartOfFamily) or in some cases impossible to detect. For this
+ // reason metadata (appendMetadataToWAL=true) appender V2 flow scrape might taking ~3% more CPU in our benchmarks.
+ //
+ // TODO(https://github.com/prometheus/prometheus/issues/17900): Optimize this, notably move this check to parsers that require this (ensuring parser
+ // interface always yields correct metadata), deliver OpenMetrics 2.0 that removes suffixes.
+ if sl.appendMetadataToWAL && lastMeta != nil {
+ // In majority cases we can trust that the current series/histogram is matching the lastMeta and lastMFName.
+ // However, optional TYPE, etc metadata and broken OM text can break this, detect those cases here.
+ if !isSeriesPartOfFamily(lset.Get(model.MetricNameLabel), lastMFName, lastMeta.Type) {
+ lastMeta = nil // Don't pass knowingly broken metadata, now, nor on the next line.
+ }
+ if lastMeta != nil {
+ // Metric family name has the same source as metadata.
+ appOpts.MetricFamilyName = yoloString(lastMFName)
+ appOpts.Metadata = lastMeta.Metadata
+ }
+ }
+
+ // Append sample to the storage.
+ ref, err = app.Append(ref, lset, st, t, val, h, fh, appOpts)
+ }
+ sampleAdded, err = sl.checkAddError(met, exemplars, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
+ if err != nil {
+ if !errors.Is(err, storage.ErrNotFound) {
+ sl.l.Debug("Unexpected error", "series", string(met), "err", err)
+ }
+ break loop
+ }
+ if (parsedTimestamp == nil || sl.trackTimestampsStaleness) && ce != nil {
+ sl.cache.trackStaleness(ce.ref, ce)
+ }
+
+ // If series wasn't cached (is new, not seen on previous scrape) we need to add it to the scrape cache.
+ // But we only do this for series that were appended to TSDB without errors.
+ // If a series was new, but we didn't append it due to sample_limit or other errors then we don't need
+ // it in the scrape cache because we don't need to emit StaleNaNs for it when it disappears.
+ if !seriesCached && sampleAdded {
+ ce = sl.cache.addRef(met, ref, lset, hash)
+ if ce != nil && (parsedTimestamp == nil || sl.trackTimestampsStaleness) {
+ // Bypass staleness logic if there is an explicit timestamp.
+ // But make sure we only do this if we have a cache entry (ce) for our series.
+ sl.cache.trackStaleness(ref, ce)
+ }
+ if sampleLimitErr == nil && bucketLimitErr == nil {
+ seriesAdded++
+ }
+ }
+
+ // Increment added even if there's an error so we correctly report the
+ // number of samples remaining after relabeling.
+ // We still report duplicated samples here since this number should be the exact number
+ // of time series exposed on a scrape after relabelling.
+ added++
+ }
+ if sampleLimitErr != nil {
+ if err == nil {
+ err = sampleLimitErr
+ }
+ // We only want to increment this once per scrape, so this is Inc'd outside the loop.
+ sl.metrics.targetScrapeSampleLimit.Inc()
+ }
+ if bucketLimitErr != nil {
+ if err == nil {
+ err = bucketLimitErr // If sample limit is hit, that error takes precedence.
+ }
+ // We only want to increment this once per scrape, so this is Inc'd outside the loop.
+ sl.metrics.targetScrapeNativeHistogramBucketLimit.Inc()
+ }
+ if appErrs.numOutOfOrder > 0 {
+ sl.l.Warn("Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder)
+ }
+ if appErrs.numDuplicates > 0 {
+ sl.l.Warn("Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates)
+ }
+ if appErrs.numOutOfBounds > 0 {
+ sl.l.Warn("Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds)
+ }
+ if appErrs.numExemplarOutOfOrder > 0 {
+ sl.l.Warn("Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder)
+ }
+ if err == nil {
+ err = sl.updateStaleMarkersV2(app, defTime)
+ }
+ return total, added, seriesAdded, err
+}
+
+func (sl *scrapeLoopAppenderV2) addReportSample(s reportSample, t int64, v float64, b *labels.Builder, rejectOOO bool) (err error) {
+ ce, ok, _ := sl.cache.get(s.name)
+ var ref storage.SeriesRef
+ var lset labels.Labels
+ if ok {
+ ref = ce.ref
+ lset = ce.lset
+ } else {
+ // The constants are suffixed with the invalid \xff unicode rune to avoid collisions
+ // with scraped metrics in the cache.
+ // We have to drop it when building the actual metric.
+ b.Reset(labels.EmptyLabels())
+ b.Set(model.MetricNameLabel, string(s.name[:len(s.name)-1]))
+ lset = sl.reportSampleMutator(b.Labels())
+ }
+
+ ref, err = sl.Append(ref, lset, 0, t, v, nil, nil, storage.AOptions{
+ MetricFamilyName: yoloString(s.name),
+ Metadata: s.Metadata,
+ RejectOutOfOrder: rejectOOO,
+ })
+ switch {
+ case err == nil:
+ if !ok {
+ sl.cache.addRef(s.name, ref, lset, lset.Hash())
+ }
+ return nil
+ case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
+ // Do not log here, as this is expected if a target goes away and comes back
+ // again with a new scrape loop.
+ return nil
+ default:
+ return err
+ }
+}
diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go
index c2b2ae132c..cab2b2918a 100644
--- a/scrape/scrape_test.go
+++ b/scrape/scrape_test.go
@@ -36,8 +36,6 @@ import (
"time"
"github.com/gogo/protobuf/proto"
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
"github.com/grafana/regexp"
"github.com/prometheus/client_golang/prometheus"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
@@ -65,6 +63,7 @@ import (
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/pool"
"github.com/prometheus/prometheus/util/teststorage"
@@ -88,42 +87,65 @@ func newTestScrapeMetrics(t testing.TB) *scrapeMetrics {
}
func TestNewScrapePool(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testNewScrapePool(t, appV2)
+ })
+}
+
+func testNewScrapePool(t *testing.T, appV2 bool) {
var (
app = teststorage.NewAppendable()
+ sa = selectAppendable(app, appV2)
cfg = &config.ScrapeConfig{
MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
}
- sp, err = newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sp, err = newScrapePool(cfg, sa.V1(), sa.V2(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
)
require.NoError(t, err)
+ if appV2 {
+ a, ok := sp.appendableV2.(*teststorage.Appendable)
+ require.True(t, ok, "Failure to append.")
+ require.Equal(t, app, a, "Wrong sample AppenderV2.")
+ require.Equal(t, cfg, sp.config, "Wrong scrape config.")
+
+ require.Nil(t, sp.appendable)
+ return
+ }
a, ok := sp.appendable.(*teststorage.Appendable)
require.True(t, ok, "Failure to append.")
require.Equal(t, app, a, "Wrong sample appender.")
require.Equal(t, cfg, sp.config, "Wrong scrape config.")
+
+ require.Nil(t, sp.appendableV2)
}
func TestStorageHandlesOutOfOrderTimestamps(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testStorageHandlesOutOfOrderTimestamps(t, appV2)
+ })
+}
+
+func testStorageHandlesOutOfOrderTimestamps(t *testing.T, appV2 bool) {
// Test with default OutOfOrderTimeWindow (0)
t.Run("Out-Of-Order Sample Disabled", func(t *testing.T) {
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
-
- runScrapeLoopTest(t, s, false)
+ runScrapeLoopTest(t, appV2, s, false)
})
// Test with specific OutOfOrderTimeWindow (600000)
t.Run("Out-Of-Order Sample Enabled", func(t *testing.T) {
- s := teststorage.New(t, 600000)
- t.Cleanup(func() { _ = s.Close() })
+ s := teststorage.New(t, func(opt *tsdb.Options) {
+ opt.OutOfOrderTimeWindow = 600000
+ })
- runScrapeLoopTest(t, s, true)
+ runScrapeLoopTest(t, appV2, s, true)
})
}
-func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrder bool) {
- sl, _ := newTestScrapeLoop(t, withAppendable(s))
+func runScrapeLoopTest(t *testing.T, appV2 bool, s *teststorage.TestStorage, expectOutOfOrder bool) {
+ sl, _ := newTestScrapeLoop(t, withAppendable(s, appV2))
// Current time for generating timestamps.
now := time.Now()
@@ -184,14 +206,20 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde
}
if expectOutOfOrder {
- require.NotEqual(t, want, results, "Expected results to include out-of-order sample:\n%s", results)
+ teststorage.RequireNotEqual(t, want, results, "Expected results to include out-of-order sample:\n%s", results)
} else {
- require.Equal(t, want, results, "Appended samples not as expected:\n%s", results)
+ teststorage.RequireEqual(t, want, results, "Appended samples not as expected:\n%s", results)
}
}
// Regression test against https://github.com/prometheus/prometheus/issues/15831.
func TestScrapeAppend_MetadataUpdate(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeAppendMetadataUpdate(t, appV2)
+ })
+}
+
+func testScrapeAppendMetadataUpdate(t *testing.T, appV2 bool) {
const (
scrape1 = `# TYPE test_metric counter
# HELP test_metric some help text
@@ -215,32 +243,40 @@ test_metric2{foo="bar"} 22
)
appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2))
now := time.Now()
app := sl.appender()
_, _, _, err := app.append([]byte(scrape1), "application/openmetrics-text", now)
require.NoError(t, err)
require.NoError(t, app.Commit())
- testutil.RequireEqual(t, []sample{
+ teststorage.RequireEqual(t, []sample{
{L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
{L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}},
}, appTest.ResultMetadata())
appTest.ResultReset()
- // Next (the same) scrape should not new metadata entries.
app = sl.appender()
_, _, _, err = app.append([]byte(scrape1), "application/openmetrics-text", now.Add(15*time.Second))
require.NoError(t, err)
require.NoError(t, app.Commit())
- require.Empty(t, appTest.ResultMetadata())
+ if appV2 {
+ // Next (the same) scrape should pass new metadata entries as per always-on metadata Appendable V2 contract.
+ teststorage.RequireEqual(t, []sample{
+ {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}},
+ }, appTest.ResultMetadata())
+ } else {
+ // Next (the same) scrape should not add new metadata entries.
+ require.Empty(t, appTest.ResultMetadata())
+ }
appTest.ResultReset()
app = sl.appender()
_, _, _, err = app.append([]byte(scrape2), "application/openmetrics-text", now.Add(15*time.Second))
require.NoError(t, err)
require.NoError(t, app.Commit())
- testutil.RequireEqual(t, []sample{
+ teststorage.RequireEqual(t, []sample{
{L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "different help text"}}, // Here, technically we should have no unit, but it's a known limitation of the current implementation.
{L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "metric2", Help: "other help text"}},
}, appTest.ResultMetadata())
@@ -248,14 +284,20 @@ test_metric2{foo="bar"} 22
}
func TestScrapeReportMetadata(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeReportMetadata(t, appV2)
+ })
+}
+
+func testScrapeReportMetadata(t *testing.T, appV2 bool) {
appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2))
app := sl.appender()
now := time.Now()
require.NoError(t, sl.report(app, now, 2*time.Second, 1, 1, 1, 512, nil))
require.NoError(t, app.Commit())
- testutil.RequireEqual(t, []sample{
+ teststorage.RequireEqual(t, []sample{
{L: labels.FromStrings("__name__", "up"), M: scrapeHealthMetric.Metadata},
{L: labels.FromStrings("__name__", "scrape_duration_seconds"), M: scrapeDurationMetric.Metadata},
{L: labels.FromStrings("__name__", "scrape_samples_scraped"), M: scrapeSamplesMetric.Metadata},
@@ -313,6 +355,12 @@ func TestIsSeriesPartOfFamily(t *testing.T) {
}
func TestDroppedTargetsList(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testDroppedTargetsList(t, appV2)
+ })
+}
+
+func testDroppedTargetsList(t *testing.T, appV2 bool) {
var (
app = teststorage.NewAppendable()
cfg = &config.ScrapeConfig{
@@ -337,7 +385,8 @@ func TestDroppedTargetsList(t *testing.T) {
},
},
}
- sp, _ = newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sa = selectAppendable(app, appV2)
+ sp, _ = newScrapePool(cfg, sa.V1(), sa.V2(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}"
expectedLength = 2
)
@@ -358,7 +407,7 @@ func TestDroppedTargetsList(t *testing.T) {
// TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated
// even when new labels don't affect the target `hash`.
func TestDiscoveredLabelsUpdate(t *testing.T) {
- sp := newTestScrapePool(t, nil)
+ sp := newTestScrapePool(t, nil, false, nil)
// These are used when syncing so need this to avoid a panic.
sp.config = &config.ScrapeConfig{
@@ -430,7 +479,7 @@ func (*testLoop) getCache() *scrapeCache {
func TestScrapePoolStop(t *testing.T) {
t.Parallel()
- sp := newTestScrapePool(t, nil)
+ sp := newTestScrapePool(t, nil, false, nil)
var mtx sync.Mutex
stopped := map[uint64]bool{}
@@ -530,7 +579,7 @@ func TestScrapePoolReload(t *testing.T) {
// Create test pool.
reg, metrics := newTestRegistryAndScrapeMetrics(t)
- sp := newTestScrapePool(t, newLoopCfg1)
+ sp := newTestScrapePool(t, nil, false, newLoopCfg1)
sp.metrics = metrics
// Prefill pool with 20 loops, simulating 20 scrape targets.
@@ -592,7 +641,7 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
return l
}
reg, metrics := newTestRegistryAndScrapeMetrics(t)
- sp := newTestScrapePool(t, newLoop)
+ sp := newTestScrapePool(t, nil, false, newLoop)
sp.activeTargets[1] = &Target{
labels: labels.FromStrings(model.ScrapeIntervalLabel, "5s", model.ScrapeTimeoutLabel, "3s"),
}
@@ -644,7 +693,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
return l
}
- sp := newTestScrapePool(t, newLoop)
+ sp := newTestScrapePool(t, nil, false, newLoop)
var tgs []*targetgroup.Group
for i := range 50 {
@@ -756,7 +805,9 @@ func TestScrapePoolAppenderWithLimits(t *testing.T) {
baseAppender := struct{ storage.Appender }{}
appendable := appendableFunc(func(context.Context) storage.Appender { return baseAppender })
- sl, _ := newTestScrapeLoop(t, withAppendable(appendable))
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appendable
+ })
wrapped := appenderWithLimits(sl.appendable.Appender(context.Background()), 0, 0, histogram.ExponentialSchemaMax)
tl, ok := wrapped.(*timeLimitAppender)
@@ -809,7 +860,77 @@ func TestScrapePoolAppenderWithLimits(t *testing.T) {
require.Equal(t, baseAppender, tl.Appender, "Expected base appender but got %T", tl.Appender)
}
+type appendableV2Func func(ctx context.Context) storage.AppenderV2
+
+func (a appendableV2Func) AppenderV2(ctx context.Context) storage.AppenderV2 { return a(ctx) }
+
+func TestScrapePoolAppenderWithLimits_AppendV2(t *testing.T) {
+ // Create a unique value, to validate the correct chain of appenders.
+ baseAppender := struct{ storage.AppenderV2 }{}
+ appendable := appendableV2Func(func(context.Context) storage.AppenderV2 { return baseAppender })
+
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendableV2 = appendable
+ })
+ wrapped := appenderV2WithLimits(sl.appendableV2.AppenderV2(context.Background()), 0, 0, histogram.ExponentialSchemaMax)
+
+ tl, ok := wrapped.(*timeLimitAppenderV2)
+ require.True(t, ok, "Expected timeLimitAppenderV2 but got %T", wrapped)
+
+ require.Equal(t, baseAppender, tl.AppenderV2, "Expected base AppenderV2 but got %T", tl.AppenderV2)
+
+ sampleLimit := 100
+ sl, _ = newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendableV2 = appendable
+ sl.sampleLimit = sampleLimit
+ })
+ wrapped = appenderV2WithLimits(sl.appendableV2.AppenderV2(context.Background()), sampleLimit, 0, histogram.ExponentialSchemaMax)
+
+ la, ok := wrapped.(*limitAppenderV2)
+ require.True(t, ok, "Expected limitAppenderV2 but got %T", wrapped)
+
+ tl, ok = la.AppenderV2.(*timeLimitAppenderV2)
+ require.True(t, ok, "Expected timeLimitAppenderV2 but got %T", la.AppenderV2)
+
+ require.Equal(t, baseAppender, tl.AppenderV2, "Expected base AppenderV2 but got %T", tl.AppenderV2)
+
+ wrapped = appenderV2WithLimits(sl.appendableV2.AppenderV2(context.Background()), sampleLimit, 100, histogram.ExponentialSchemaMax)
+
+ bl, ok := wrapped.(*bucketLimitAppenderV2)
+ require.True(t, ok, "Expected bucketLimitAppenderV2 but got %T", wrapped)
+
+ la, ok = bl.AppenderV2.(*limitAppenderV2)
+ require.True(t, ok, "Expected limitAppenderV2 but got %T", bl)
+
+ tl, ok = la.AppenderV2.(*timeLimitAppenderV2)
+ require.True(t, ok, "Expected timeLimitAppenderV2 but got %T", la.AppenderV2)
+
+ require.Equal(t, baseAppender, tl.AppenderV2, "Expected base AppenderV2 but got %T", tl.AppenderV2)
+
+ wrapped = appenderV2WithLimits(sl.appendableV2.AppenderV2(context.Background()), sampleLimit, 100, 0)
+
+ ml, ok := wrapped.(*maxSchemaAppenderV2)
+ require.True(t, ok, "Expected maxSchemaAppenderV2 but got %T", wrapped)
+
+ bl, ok = ml.AppenderV2.(*bucketLimitAppenderV2)
+ require.True(t, ok, "Expected bucketLimitAppenderV2 but got %T", wrapped)
+
+ la, ok = bl.AppenderV2.(*limitAppenderV2)
+ require.True(t, ok, "Expected limitAppenderV2 but got %T", bl)
+
+ tl, ok = la.AppenderV2.(*timeLimitAppenderV2)
+ require.True(t, ok, "Expected timeLimitAppenderV2 but got %T", la.AppenderV2)
+
+ require.Equal(t, baseAppender, tl.AppenderV2, "Expected base AppenderV2 but got %T", tl.AppenderV2)
+}
+
func TestScrapePoolRaces(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapePoolRaces(t, appV2)
+ })
+}
+
+func testScrapePoolRaces(t *testing.T, appV2 bool) {
t.Parallel()
interval, _ := model.ParseDuration("1s")
timeout, _ := model.ParseDuration("500ms")
@@ -821,7 +942,8 @@ func TestScrapePoolRaces(t *testing.T) {
MetricNameEscapingScheme: model.AllowUTF8,
}
}
- sp, _ := newScrapePool(newConfig(), teststorage.NewAppendable(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sa := selectAppendable(teststorage.NewAppendable(), appV2)
+ sp, _ := newScrapePool(newConfig(), sa.V1(), sa.V2(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
tgts := []*targetgroup.Group{
{
Targets: []model.LabelSet{
@@ -853,6 +975,12 @@ func TestScrapePoolRaces(t *testing.T) {
}
func TestScrapePoolScrapeLoopsStarted(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapePoolScrapeLoopsStarted(t, appV2)
+ })
+}
+
+func testScrapePoolScrapeLoopsStarted(t *testing.T, appV2 bool) {
var wg sync.WaitGroup
newLoop := func(scrapeLoopOptions) loop {
wg.Add(1)
@@ -864,7 +992,7 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) {
}
return l
}
- sp := newTestScrapePool(t, newLoop)
+ sp := newTestScrapePool(t, teststorage.NewAppendable(), appV2, newLoop)
tgs := []*targetgroup.Group{
{
@@ -946,11 +1074,16 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
func nopMutator(l labels.Labels) labels.Labels { return l }
func TestScrapeLoopStop(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopStop(t, appV2)
+ })
+}
+
+func testScrapeLoopStop(t *testing.T, appV2 bool) {
signal := make(chan struct{}, 1)
appTest := teststorage.NewAppendable()
- sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = appTest
+ sl, scraper := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
// Since we're writing samples directly below we need to provide a protocol fallback.
sl.fallbackScrapeProtocol = "text/plain"
})
@@ -1000,6 +1133,12 @@ func TestScrapeLoopStop(t *testing.T) {
}
func TestScrapeLoopRun(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopRun(t, appV2)
+ })
+}
+
+func testScrapeLoopRun(t *testing.T, appV2 bool) {
t.Parallel()
var (
signal = make(chan struct{}, 1)
@@ -1007,7 +1146,7 @@ func TestScrapeLoopRun(t *testing.T) {
)
ctx, cancel := context.WithCancel(t.Context())
- sl, scraper := newTestScrapeLoop(t, withCtx(ctx))
+ sl, scraper := newTestScrapeLoop(t, withCtx(ctx), withAppendable(teststorage.NewAppendable(), appV2))
// The loop must terminate during the initial offset if the context
// is canceled.
scraper.offsetDur = time.Hour
@@ -1030,7 +1169,7 @@ func TestScrapeLoopRun(t *testing.T) {
}
ctx, cancel = context.WithCancel(t.Context())
- sl, scraper = newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, scraper = newTestScrapeLoop(t, withAppendable(teststorage.NewAppendable(), appV2), func(sl *scrapeLoop) {
sl.ctx = ctx
sl.timeout = 100 * time.Millisecond
})
@@ -1076,13 +1215,19 @@ func TestScrapeLoopRun(t *testing.T) {
}
func TestScrapeLoopForcedErr(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopForcedErr(t, appV2)
+ })
+}
+
+func testScrapeLoopForcedErr(t *testing.T, appV2 bool) {
var (
signal = make(chan struct{}, 1)
errc = make(chan error)
)
ctx, cancel := context.WithCancel(t.Context())
- sl, scraper := newTestScrapeLoop(t, withCtx(ctx))
+ sl, scraper := newTestScrapeLoop(t, withCtx(ctx), withAppendable(teststorage.NewAppendable(), appV2))
forcedErr := errors.New("forced err")
sl.setForcedError(forcedErr)
@@ -1113,6 +1258,12 @@ func TestScrapeLoopForcedErr(t *testing.T) {
}
func TestScrapeLoopRun_ContextCancelTerminatesBlockedSend(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopRunContextCancelTerminatesBlockedSend(t, appV2)
+ })
+}
+
+func testScrapeLoopRunContextCancelTerminatesBlockedSend(t *testing.T, appV2 bool) {
// Regression test for issue #17553
defer goleak.VerifyNone(t)
@@ -1122,7 +1273,7 @@ func TestScrapeLoopRun_ContextCancelTerminatesBlockedSend(t *testing.T) {
)
ctx, cancel := context.WithCancel(t.Context())
- sl, scraper := newTestScrapeLoop(t, withCtx(ctx))
+ sl, scraper := newTestScrapeLoop(t, withCtx(ctx), withAppendable(teststorage.NewAppendable(), appV2))
forcedErr := errors.New("forced err")
sl.setForcedError(forcedErr)
@@ -1149,7 +1300,13 @@ func TestScrapeLoopRun_ContextCancelTerminatesBlockedSend(t *testing.T) {
}
func TestScrapeLoopMetadata(t *testing.T) {
- sl, _ := newTestScrapeLoop(t)
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopMetadata(t, appV2)
+ })
+}
+
+func testScrapeLoopMetadata(t *testing.T, appV2 bool) {
+ sl, _ := newTestScrapeLoop(t, withAppendable(teststorage.NewAppendable(), appV2))
app := sl.appender()
total, _, _, err := app.append([]byte(`# TYPE test_metric counter
@@ -1183,7 +1340,13 @@ test_metric_total 1
}
func TestScrapeLoopSeriesAdded(t *testing.T) {
- sl, _ := newTestScrapeLoop(t)
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopSeriesAdded(t, appV2)
+ })
+}
+
+func testScrapeLoopSeriesAdded(t *testing.T, appV2 bool) {
+ sl, _ := newTestScrapeLoop(t, withAppendable(teststorage.NewAppendable(), appV2))
app := sl.appender()
total, added, seriesAdded, err := app.append([]byte("test_metric 1\n"), "text/plain", time.Time{})
@@ -1203,6 +1366,12 @@ func TestScrapeLoopSeriesAdded(t *testing.T) {
}
func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopFailWithInvalidLabelsAfterRelabel(t, appV2)
+ })
+}
+
+func testScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T, appV2 bool) {
target := &Target{
labels: labels.FromStrings("pod_label_invalid_012\xff", "test"),
}
@@ -1213,7 +1382,7 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
Replacement: "$1",
NameValidationScheme: model.UTF8Validation,
}}
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, _ := newTestScrapeLoop(t, withAppendable(teststorage.NewAppendable(), appV2), func(sl *scrapeLoop) {
sl.sampleMutator = func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, target, true, relabelConfig)
}
@@ -1229,7 +1398,13 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
}
func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) {
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopFailLegacyUnderUTF8(t, appV2)
+ })
+}
+
+func testScrapeLoopFailLegacyUnderUTF8(t *testing.T, appV2 bool) {
+ sl, _ := newTestScrapeLoop(t, withAppendable(teststorage.NewAppendable(), appV2), func(sl *scrapeLoop) {
sl.validationScheme = model.LegacyValidation
})
@@ -1242,7 +1417,7 @@ func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) {
require.Equal(t, 0, seriesAdded)
// When scrapeloop has validation set to UTF-8, the metric is allowed.
- sl, _ = newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, _ = newTestScrapeLoop(t, withAppendable(teststorage.NewAppendable(), appV2), func(sl *scrapeLoop) {
sl.validationScheme = model.UTF8Validation
})
@@ -1261,7 +1436,9 @@ func readTextParseTestMetrics(t testing.TB) []byte {
if err != nil {
t.Fatal(err)
}
- return b
+
+ // Replace all Carriage Return chars that appear when testing on windows.
+ return bytes.ReplaceAll(b, []byte{'\r'}, nil)
}
func makeTestGauges(n int) []byte {
@@ -1275,13 +1452,50 @@ func makeTestGauges(n int) []byte {
return sb.Bytes()
}
+func makeTestHistogramsWithExemplars(n int) []byte {
+ sb := bytes.Buffer{}
+ for i := range n {
+ sb.WriteString(strings.ReplaceAll(`# HELP rpc_durations_histogram%d_seconds RPC latency distributions.
+# TYPE rpc_durations_histogram%d_seconds histogram
+rpc_durations_histogram%d_seconds_bucket{le="-0.00099"} 0
+rpc_durations_histogram%d_seconds_bucket{le="-0.00089"} 1 # {dummyID="1242"} -0.00091 1.7268398142239082e+09
+rpc_durations_histogram%d_seconds_bucket{le="-0.0007899999999999999"} 1 # {dummyID="17783"} -0.0003825067330956884 1.7268398142239082e+09
+rpc_durations_histogram%d_seconds_bucket{le="-0.0006899999999999999"} 2 # {dummyID="17783"} -0.0003825067330956884 1.7268398142239082e+09
+rpc_durations_histogram%d_seconds_bucket{le="-0.0005899999999999998"} 3 # {dummyID="17783"} -0.0003825067330956884 1.7268398142239082e+09
+rpc_durations_histogram%d_seconds_bucket{le="-0.0004899999999999998"} 4 # {dummyID="17783"} -0.0003825067330956884 1.7268398142239082e+09
+rpc_durations_histogram%d_seconds_bucket{le="-0.0003899999999999998"} 5 # {dummyID="17783"} -0.0003825067330956884 1.7268398142239082e+09
+rpc_durations_histogram%d_seconds_bucket{le="-0.0002899999999999998"} 6 # {dummyID="17783"} -0.0003825067330956884 1.7268398142239082e+09
+rpc_durations_histogram%d_seconds_bucket{le="-0.0001899999999999998"} 7 # {dummyID="84741"} -0.00020178290006788965 1.726839814829977e+09
+rpc_durations_histogram%d_seconds_bucket{le="-8.999999999999979e-05"} 7
+rpc_durations_histogram%d_seconds_bucket{le="1.0000000000000216e-05"} 8 # {dummyID="19206"} -4.6156147425468016e-05 1.7268398151337721e+09
+rpc_durations_histogram%d_seconds_bucket{le="0.00011000000000000022"} 9 # {dummyID="3974"} 9.528436760156754e-05 1.726839814526797e+09
+rpc_durations_histogram%d_seconds_bucket{le="0.00021000000000000023"} 11 # {dummyID="29640"} 0.00017459624183458996 1.7268398139220061e+09
+rpc_durations_histogram%d_seconds_bucket{le="0.0003100000000000002"} 15 # {dummyID="9818"} 0.0002791130914009552 1.7268398149821382e+09
+rpc_durations_histogram%d_seconds_bucket{le="0.0004100000000000002"} 15
+rpc_durations_histogram%d_seconds_bucket{le="0.0005100000000000003"} 15
+rpc_durations_histogram%d_seconds_bucket{le="0.0006100000000000003"} 15
+rpc_durations_histogram%d_seconds_bucket{le="0.0007100000000000003"} 15
+rpc_durations_histogram%d_seconds_bucket{le="0.0008100000000000004"} 15
+rpc_durations_histogram%d_seconds_bucket{le="0.0009100000000000004"} 15
+rpc_durations_histogram%d_seconds_bucket{le="+Inf"} 15
+rpc_durations_histogram%d_seconds_sum -8.452185437166741e-05
+rpc_durations_histogram%d_seconds_count 15
+rpc_durations_histogram%d_seconds_created 1.726839813016302e+09
+`, "%d", strconv.Itoa(i)))
+ }
+ sb.WriteString("# EOF\n")
+ return sb.Bytes()
+}
+
+// promTextToProto converts Prometheus text to proto.
+// Given expfmt decoding limitations, it does not support OpenMetrics fully (e.g. exemplars).
func promTextToProto(tb testing.TB, text []byte) []byte {
tb.Helper()
p := expfmt.NewTextParser(model.UTF8Validation)
fams, err := p.TextToMetricFamilies(bytes.NewReader(text))
if err != nil {
- tb.Fatal(err)
+ tb.Fatal("TextToMetricFamilies:", err)
}
// Order by name for the deterministic tests.
var names []string
@@ -1307,8 +1521,7 @@ func promTextToProto(tb testing.TB, text []byte) []byte {
func TestPromTextToProto(t *testing.T) {
metricsText := readTextParseTestMetrics(t)
- // TODO(bwplotka): Windows adds \r for new lines which is
- // not handled correctly in the expfmt parser, fix it.
+ // On windows \r is added when reading, but parsers do not support this. Kill it.
metricsText = bytes.ReplaceAll(metricsText, []byte("\r"), nil)
metricsProto := promTextToProto(t, metricsText)
@@ -1332,73 +1545,326 @@ func TestPromTextToProto(t *testing.T) {
require.Equal(t, "promhttp_metric_handler_requests_total", got[236])
}
-// BenchmarkScrapeLoopAppend benchmarks a core append function in a scrapeLoop
-// that creates a new parser and goes through a byte slice from a single scrape.
-// Benchmark compares append function run across 2 dimensions:
-// *`data`: different sizes of metrics scraped e.g. one big gauge metric family
+// TestScrapeLoopAppend_WithStorage tests appends and storage integration for the
+// large input files that are also used in benchmarks.
+func TestScrapeLoopAppend_WithStorage(t *testing.T) {
+ ts := time.Now()
+
+ for _, appV2 := range []bool{false, true} {
+ for _, tc := range []struct {
+ name string
+ parsableText []byte
+
+ expectedSamplesLen int
+ testAppendedSamples func(t *testing.T, committed []sample)
+ testExemplars func(t *testing.T, er []exemplar.QueryResult)
+ }{
+ {
+ name: "1Fam2000Gauges",
+ parsableText: makeTestGauges(2000),
+
+ expectedSamplesLen: 2000,
+ testAppendedSamples: func(t *testing.T, committed []sample) {
+ var expectedMF string
+ if appV2 {
+ expectedMF = "metric_a" // Only AppenderV2 supports metric family passing.
+ }
+ // Verify a few samples.
+ testutil.RequireEqual(t, sample{
+ MF: expectedMF,
+ M: metadata.Metadata{Type: model.MetricTypeGauge, Help: "help text"},
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a", "foo", "0", "bar", "0"), V: 1, T: timestamp.FromTime(ts),
+ }, committed[0])
+ testutil.RequireEqual(t, sample{
+ MF: expectedMF,
+ M: metadata.Metadata{Type: model.MetricTypeGauge, Help: "help text"},
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a", "foo", "1245", "bar", "124500"), V: 1, T: timestamp.FromTime(ts),
+ }, committed[1245])
+ testutil.RequireEqual(t, sample{
+ MF: expectedMF,
+ M: metadata.Metadata{Type: model.MetricTypeGauge, Help: "help text"},
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a", "foo", "1999", "bar", "199900"), V: 1, T: timestamp.FromTime(ts),
+ }, committed[len(committed)-1])
+ },
+ },
+ {
+ name: "237FamsAllTypes",
+ parsableText: readTextParseTestMetrics(t),
+
+ expectedSamplesLen: 1857,
+ testAppendedSamples: func(t *testing.T, committed []sample) {
+ // Verify a few samples.
+ testutil.RequireEqual(t, sample{
+ MF: func() string {
+ if !appV2 {
+ return ""
+ }
+ return "go_gc_gomemlimit_bytes"
+ }(),
+ M: metadata.Metadata{Type: model.MetricTypeGauge, Help: "Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes"},
+ L: labels.FromStrings(model.MetricNameLabel, "go_gc_gomemlimit_bytes"), V: 9.03676723e+08, T: timestamp.FromTime(ts),
+ }, committed[11])
+ testutil.RequireEqual(t, sample{
+ MF: func() string {
+ if !appV2 {
+ return "" // Only AppenderV2 supports metric family passing.
+ }
+ return "prometheus_http_request_duration_seconds"
+ }(),
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Help: "Histogram of latencies for HTTP requests."},
+ L: labels.FromStrings(model.MetricNameLabel, "prometheus_http_request_duration_seconds_bucket", "handler", "/api/v1/query_range", "le", "120.0"), V: 118157, T: timestamp.FromTime(ts),
+ }, committed[448])
+ testutil.RequireEqual(t, sample{
+ MF: func() string {
+ if !appV2 {
+ return "" // Only AppenderV2 supports metric family passing.
+ }
+ return "promhttp_metric_handler_requests_total"
+ }(),
+ M: metadata.Metadata{Type: model.MetricTypeCounter, Help: "Total number of scrapes by HTTP status code."},
+ L: labels.FromStrings(model.MetricNameLabel, "promhttp_metric_handler_requests_total", "code", "503"), V: 0, T: timestamp.FromTime(ts),
+ }, committed[len(committed)-1])
+ },
+ },
+ {
+ name: "100HistsWithExemplars",
+ parsableText: makeTestHistogramsWithExemplars(100),
+
+ expectedSamplesLen: 24 * 100,
+ testAppendedSamples: func(t *testing.T, committed []sample) {
+ // Verify a few samples.
+ m := metadata.Metadata{Type: model.MetricTypeHistogram, Help: "RPC latency distributions."}
+ testutil.RequireEqual(t, sample{
+ MF: func() string {
+ if !appV2 {
+ return "" // Only AppenderV2 supports metric family passing.
+ }
+ return "rpc_durations_histogram0_seconds"
+ }(),
+ M: m, L: labels.FromStrings(model.MetricNameLabel, "rpc_durations_histogram0_seconds_bucket", "le", "0.0003100000000000002"), V: 15, T: timestamp.FromTime(ts),
+ ES: []exemplar.Exemplar{
+ {Labels: labels.FromStrings("dummyID", "9818"), Value: 0.0002791130914009552, Ts: 1726839814982, HasTs: true},
+ },
+ }, committed[13])
+ testutil.RequireEqual(t, sample{
+ MF: func() string {
+ if !appV2 {
+ return "" // Only AppenderV2 supports metric family passing.
+ }
+ return "rpc_durations_histogram49_seconds"
+ }(),
+ M: m, L: labels.FromStrings(model.MetricNameLabel, "rpc_durations_histogram49_seconds_sum"), V: -8.452185437166741e-05, T: timestamp.FromTime(ts),
+ }, committed[24*50-3])
+
+ // This series does not have metadata, nor metric family, because of isSeriesPartOfFamily bug and OpenMetric 1.0 limitations around _created series.
+ // TODO(bwplotka): Fix with https://github.com/prometheus/prometheus/issues/17900
+ testutil.RequireEqual(t, sample{
+ L: labels.FromStrings(model.MetricNameLabel, "rpc_durations_histogram99_seconds_created"), V: 1.726839813016302e+09, T: timestamp.FromTime(ts),
+ }, committed[len(committed)-1])
+ },
+ testExemplars: func(t *testing.T, er []exemplar.QueryResult) {
+ // 12 out of 24 histogram series have exemplars.
+ require.Len(t, er, 12*100)
+ testutil.RequireEqual(t, exemplar.QueryResult{
+ SeriesLabels: labels.FromStrings(model.MetricNameLabel, "rpc_durations_histogram0_seconds_bucket", "le", "0.0003100000000000002"),
+ Exemplars: []exemplar.Exemplar{
+ {Labels: labels.FromStrings("dummyID", "9818"), Value: 0.0002791130914009552, Ts: 1726839814982, HasTs: true},
+ },
+ }, er[10])
+ testutil.RequireEqual(t, exemplar.QueryResult{
+ SeriesLabels: labels.FromStrings(model.MetricNameLabel, "rpc_durations_histogram9_seconds_bucket", "le", "1.0000000000000216e-05"),
+ Exemplars: []exemplar.Exemplar{
+ {Labels: labels.FromStrings("dummyID", "19206"), Value: -4.6156147425468016e-05, Ts: 1726839815133, HasTs: true},
+ },
+ }, er[len(er)-1])
+ },
+ },
+ } {
+ t.Run(fmt.Sprintf("appV2=%v/data=%v", appV2, tc.name), func(t *testing.T) {
+ s := teststorage.New(t, func(opt *tsdb.Options) {
+ opt.EnableMetadataWALRecords = true
+ })
+
+ appTest := teststorage.NewAppendable().Then(s)
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2))
+ app := sl.appender()
+
+ _, _, _, err := app.append(tc.parsableText, "application/openmetrics-text", ts)
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // Check the recorded samples on the Appender layer.
+ require.Nil(t, appTest.PendingSamples())
+ require.Nil(t, appTest.RolledbackSamples())
+
+ got := appTest.ResultSamples()
+ require.Len(t, got, tc.expectedSamplesLen)
+ tc.testAppendedSamples(t, got)
+
+ // Check basic storage stats.
+ stats := s.Head().Stats(model.MetricNameLabel, 2000)
+ require.Equal(t, tc.expectedSamplesLen, int(stats.NumSeries))
+
+ // Check exemplars.
+ eq, err := s.ExemplarQuerier(t.Context())
+ require.NoError(t, err)
+
+ er, err := eq.Select(math.MinInt64, math.MaxInt64, nil)
+ require.NoError(t, err)
+
+ if tc.testExemplars != nil {
+ tc.testExemplars(t, er)
+ } else {
+ // Expect no exemplars.
+ require.Empty(t, er, "%v is not empty", er)
+ }
+ })
+ }
+ }
+}
+
+// BenchmarkScrapeLoopAppend benchmarks scrape appends for typical cases.
+//
+// Benchmark compares append function run across 5 dimensions:
+// * `withStorage`: without storage isolates the benchmark to the scrape loop append code. With storage is an
+// integration benchmark with the TSDB head appender code. For acceptance criteria run with storage, without for debugging.
+// * `appV2`: appender V1 or V2.
+// * `appendMetadataToWAL`: metadata-wal-records feature enabled or not (problematic feature we might need to change
+// soon, see https://github.com/prometheus/prometheus/issues/15911.
+// * `data`: different sizes of metrics scraped e.g. one big gauge metric family
// with a thousand series and more realistic scenario with common types.
-// *`fmt`: different scrape formats which will benchmark different parsers e.g.
+// * `fmt`: different scrape formats which will benchmark different parsers e.g.
// promtext, omtext and promproto.
//
-// Recommended CLI invocation:
+// NOTE: withStorage=true uses sync.Pool buffers which is heavily non-deterministic and shared across go routines.
+// As a result, it's recommended to run dimensions you want to compare with in e.g. separate go tool invocations.
+// Recommended CLI invocation(s):
/*
- export bench=append && go test ./scrape/... \
- -run '^$' -bench '^BenchmarkScrapeLoopAppend' \
- -benchtime 5s -count 6 -cpu 2 -timeout 999m \
+ # Acceptance: With storage with V1 and V2 in separate process:
+ export bench=appendV1 && go test ./scrape/... \
+ -run '^$' -bench '^BenchmarkScrapeLoopAppend/withStorage=true/appV2=false/$' \
+ -benchtime 2s -count 6 -cpu 2 -timeout 999m \
+ | tee ${bench}.txt
+
+ export bench=appendV2 && go test ./scrape/... \
+ -run '^$' -bench '^BenchmarkScrapeLoopAppend/withStorage=true/appV2=true/$' \
+ -benchtime 2s -count 6 -cpu 2 -timeout 999m \
+ | tee ${bench}.txt
+
+ # For debugging scrape overheads:
+ export bench=appendNoStorage && go test ./scrape/... \
+ -run '^$' -bench '^BenchmarkScrapeLoopAppend/withStorage=false/$' \
+ -benchtime 2s -count 6 -cpu 2 -timeout 999m \
| tee ${bench}.txt
*/
func BenchmarkScrapeLoopAppend(b *testing.B) {
- for _, data := range []struct {
- name string
- parsableText []byte
- }{
- {name: "1Fam1000Gauges", parsableText: makeTestGauges(2000)}, // ~68.1 KB, ~77.9 KB in proto.
- {name: "237FamsAllTypes", parsableText: readTextParseTestMetrics(b)}, // ~185.7 KB, ~70.6 KB in proto.
- } {
- b.Run(fmt.Sprintf("data=%v", data.name), func(b *testing.B) {
- metricsProto := promTextToProto(b, data.parsableText)
+ for _, withStorage := range []bool{false, true} {
+ for _, appV2 := range []bool{false, true} {
+ for _, appendMetadataToWAL := range []bool{false, true} {
+ for _, data := range []struct {
+ name string
+ parsableText []byte
+ }{
+ {name: "1Fam2000Gauges", parsableText: makeTestGauges(2000)}, // ~68.1 KB, ~77.9 KB in proto.
+ {name: "237FamsAllTypes", parsableText: readTextParseTestMetrics(b)}, // ~185.7 KB, ~70.6 KB in proto.
+ } {
+ b.Run(fmt.Sprintf("withStorage=%v/appV2=%v/appendMetadataToWAL=%v/data=%v", withStorage, appV2, appendMetadataToWAL, data.name), func(b *testing.B) {
+ metricsProto := promTextToProto(b, data.parsableText)
- for _, bcase := range []struct {
- name string
- contentType string
- parsable []byte
- }{
- {name: "PromText", contentType: "text/plain", parsable: data.parsableText},
- {name: "OMText", contentType: "application/openmetrics-text", parsable: data.parsableText},
- {name: "PromProto", contentType: "application/vnd.google.protobuf", parsable: metricsProto},
- } {
- b.Run(fmt.Sprintf("fmt=%v", bcase.name), func(b *testing.B) {
- // Need a full storage for correct Add/AddFast semantics.
- s := teststorage.New(b)
- b.Cleanup(func() { _ = s.Close() })
-
- sl, _ := newTestScrapeLoop(b, withAppendable(s))
- app := sl.appender()
- ts := time.Time{}
-
- b.ReportAllocs()
- b.ResetTimer()
- for b.Loop() {
- ts = ts.Add(time.Second)
- _, _, _, err := app.append(bcase.parsable, bcase.contentType, ts)
- if err != nil {
- b.Fatal(err)
+ for _, bcase := range []struct {
+ name string
+ contentType string
+ parsable []byte
+ }{
+ {name: "PromText", contentType: "text/plain", parsable: data.parsableText},
+ {name: "OMText", contentType: "application/openmetrics-text", parsable: data.parsableText},
+ {name: "PromProto", contentType: "application/vnd.google.protobuf", parsable: metricsProto},
+ } {
+ b.Run(fmt.Sprintf("fmt=%v", bcase.name), func(b *testing.B) {
+ benchScrapeLoopAppend(b, withStorage, appV2, bcase.parsable, bcase.contentType, appendMetadataToWAL, false)
+ })
}
- }
- })
+ })
+ }
}
+ }
+ }
+}
+
+func benchScrapeLoopAppend(
+ b *testing.B,
+ withStorage bool,
+ appV2 bool,
+ parsable []byte,
+ contentType string,
+ appendMetadataToWAL bool,
+ enableExemplarStorage bool,
+) {
+ var a compatAppendable = teststorage.NewAppendable().SkipRecording(true) // Make it noop for benchmark purposes.
+ if withStorage {
+ a = teststorage.New(b, func(opt *tsdb.Options) {
+ opt.EnableMetadataWALRecords = appendMetadataToWAL
+ if enableExemplarStorage {
+ opt.EnableExemplarStorage = true
+ opt.MaxExemplars = 1e5
+ }
+ })
+ }
+ sl, _ := newTestScrapeLoop(b, withAppendable(a, appV2), func(sl *scrapeLoop) {
+ sl.appendMetadataToWAL = appendMetadataToWAL
+ })
+ ts := time.Time{}
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for b.Loop() {
+ app := sl.appender()
+ ts = ts.Add(time.Second)
+ _, _, _, err := app.append(parsable, contentType, ts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ // Reset the appender so it doesn't grow indefinitely, and it mimics what prod scrape will do.
+ // We do rollback, because it's cheaper than Commit.
+ if err := app.Rollback(); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+// BenchmarkScrapeLoopAppend_HistogramsWithExemplars benchmarks OM scrapes with histograms full of exemplars.
+//
+// For e2e TSDB impact, we enable the TSDB exemplar storage
+//
+// Recommended CLI invocation:
+/*
+ export bench=appendHistWithExemplars && go test ./scrape/... \
+ -run '^$' -bench '^BenchmarkScrapeLoopAppend_HistogramsWithExemplars' \
+ -benchtime 5s -count 6 -cpu 2 -timeout 999m \
+ | tee ${bench}.txt
+*/
+func BenchmarkScrapeLoopAppend_HistogramsWithExemplars(b *testing.B) {
+ for _, appV2 := range []bool{false, true} {
+ b.Run(fmt.Sprintf("appV2=%v", appV2), func(b *testing.B) {
+ parsable := makeTestHistogramsWithExemplars(100) // ~255.8 KB in OM text.
+ benchScrapeLoopAppend(b, true, appV2, parsable, "application/openmetrics-text", false, true)
})
}
}
func TestScrapeLoopScrapeAndReport(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopScrapeAndReport(t, appV2)
+ })
+}
+
+func testScrapeLoopScrapeAndReport(t *testing.T, appV2 bool) {
parsableText := readTextParseTestMetrics(t)
// On windows \r is added when reading, but parsers do not support this. Kill it.
parsableText = bytes.ReplaceAll(parsableText, []byte("\r"), nil)
appTest := teststorage.NewAppendable()
- sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = appTest
+ sl, scraper := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.fallbackScrapeProtocol = "application/openmetrics-text"
})
scraper.scrapeFunc = func(_ context.Context, writer io.Writer) error {
@@ -1418,39 +1884,48 @@ func TestScrapeLoopScrapeAndReport(t *testing.T) {
// Recommended CLI invocation:
/*
export bench=scrapeAndReport && go test ./scrape/... \
- -run '^$' -bench '^BenchmarkScrapeLoopScrapeAndReport' \
+ -run '^$' -bench '^BenchmarkScrapeLoopScrapeAndReport$' \
-benchtime 5s -count 6 -cpu 2 -timeout 999m \
| tee ${bench}.txt
*/
func BenchmarkScrapeLoopScrapeAndReport(b *testing.B) {
- parsableText := readTextParseTestMetrics(b)
+ for _, appV2 := range []bool{false, true} {
+ b.Run(fmt.Sprintf("appV2=%v", appV2), func(b *testing.B) {
+ parsableText := readTextParseTestMetrics(b)
- s := teststorage.New(b)
- b.Cleanup(func() { _ = s.Close() })
+ s := teststorage.New(b)
- sl, scraper := newTestScrapeLoop(b, func(sl *scrapeLoop) {
- sl.appendable = s
- sl.fallbackScrapeProtocol = "application/openmetrics-text"
- })
- scraper.scrapeFunc = func(_ context.Context, writer io.Writer) error {
- _, err := writer.Write(parsableText)
- return err
- }
+ sl, scraper := newTestScrapeLoop(b, withAppendable(s, appV2), func(sl *scrapeLoop) {
+ sl.fallbackScrapeProtocol = "application/openmetrics-text"
+ })
+ scraper.scrapeFunc = func(_ context.Context, writer io.Writer) error {
+ _, err := writer.Write(parsableText)
+ return err
+ }
- ts := time.Time{}
+ ts := time.Time{}
- b.ReportAllocs()
- b.ResetTimer()
- for b.Loop() {
- ts = ts.Add(time.Second)
- sl.scrapeAndReport(time.Time{}, ts, nil)
- require.NoError(b, scraper.lastError)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for b.Loop() {
+ ts = ts.Add(time.Second)
+ sl.scrapeAndReport(time.Time{}, ts, nil)
+ require.NoError(b, scraper.lastError)
+ }
+ })
}
}
func TestSetOptionsHandlingStaleness(t *testing.T) {
- s := teststorage.New(t, 600000)
- t.Cleanup(func() { _ = s.Close() })
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testSetOptionsHandlingStaleness(t, appV2)
+ })
+}
+
+func testSetOptionsHandlingStaleness(t *testing.T, appV2 bool) {
+ s := teststorage.New(t, func(opt *tsdb.Options) {
+ opt.OutOfOrderTimeWindow = 600000
+ })
signal := make(chan struct{}, 1)
ctx, cancel := context.WithCancel(t.Context())
@@ -1458,9 +1933,8 @@ func TestSetOptionsHandlingStaleness(t *testing.T) {
// Function to run the scrape loop
runScrapeLoop := func(ctx context.Context, t *testing.T, cue int, action func(*scrapeLoop)) {
- sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, scraper := newTestScrapeLoop(t, withAppendable(s, appV2), func(sl *scrapeLoop) {
sl.ctx = ctx
- sl.appendable = s
})
numScrapes := 0
@@ -1524,17 +1998,22 @@ func TestSetOptionsHandlingStaleness(t *testing.T) {
c++
}
}
- require.Equal(t, 0, c, "invalid count of staleness markers after stopping the engine")
+ require.Zero(t, c, "invalid count of staleness markers after stopping the engine")
}
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t, appV2)
+ })
+}
+
+func testScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T, appV2 bool) {
signal := make(chan struct{}, 1)
ctx, cancel := context.WithCancel(t.Context())
appTest := teststorage.NewAppendable()
- sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, scraper := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.ctx = ctx
- sl.appendable = appTest
// Since we're writing samples directly below we need to provide a protocol fallback.
sl.fallbackScrapeProtocol = "text/plain"
})
@@ -1576,13 +2055,18 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
}
func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopRunCreatesStaleMarkersOnParseFailure(t, appV2)
+ })
+}
+
+func testScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T, appV2 bool) {
signal := make(chan struct{}, 1)
ctx, cancel := context.WithCancel(t.Context())
appTest := teststorage.NewAppendable()
- sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, scraper := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.ctx = ctx
- sl.appendable = appTest
// Since we're writing samples directly below we need to provide a protocol fallback.
sl.fallbackScrapeProtocol = "text/plain"
})
@@ -1630,13 +2114,18 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
// If we have a target with sample_limit set and scrape initially works, but then we hit the sample_limit error,
// then we don't expect to see any StaleNaNs appended for the series that disappeared due to sample_limit error.
func TestScrapeLoopRunCreatesStaleMarkersOnSampleLimit(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopRunCreatesStaleMarkersOnSampleLimit(t, appV2)
+ })
+}
+
+func testScrapeLoopRunCreatesStaleMarkersOnSampleLimit(t *testing.T, appV2 bool) {
signal := make(chan struct{}, 1)
ctx, cancel := context.WithCancel(t.Context())
appTest := teststorage.NewAppendable()
- sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, scraper := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.ctx = ctx
- sl.appendable = appTest
// Since we're writing samples directly below we need to provide a protocol fallback.
sl.fallbackScrapeProtocol = "text/plain"
sl.sampleLimit = 4
@@ -1700,17 +2189,21 @@ func TestScrapeLoopRunCreatesStaleMarkersOnSampleLimit(t *testing.T) {
}
func TestScrapeLoopCache(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopCache(t, appV2)
+ })
+}
+
+func testScrapeLoopCache(t *testing.T, appV2 bool) {
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
signal := make(chan struct{}, 1)
ctx, cancel := context.WithCancel(t.Context())
appTest := teststorage.NewAppendable().Then(s)
- sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, scraper := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.ctx = ctx
sl.l = promslog.New(&promslog.Config{})
- sl.appendable = appTest
// Since we're writing samples directly below we need to provide a protocol fallback.
sl.fallbackScrapeProtocol = "text/plain"
// Decreasing the scrape interval could make the test fail, as multiple scrapes might be initiated at identical millisecond timestamps.
@@ -1765,13 +2258,18 @@ func TestScrapeLoopCache(t *testing.T) {
}
func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopCacheMemoryExhaustionProtection(t, appV2)
+ })
+}
+
+func testScrapeLoopCacheMemoryExhaustionProtection(t *testing.T, appV2 bool) {
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
signal := make(chan struct{}, 1)
ctx, cancel := context.WithCancel(t.Context())
- sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, scraper := newTestScrapeLoop(t, withAppendable(teststorage.NewAppendable().Then(s), appV2), func(sl *scrapeLoop) {
sl.ctx = ctx
})
numScrapes := 0
@@ -1803,138 +2301,124 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
require.LessOrEqual(t, len(sl.cache.series), 2000, "More than 2000 series cached.")
}
-func TestScrapeLoopAppend(t *testing.T) {
- tests := []struct {
- title string
- honorLabels bool
- scrapeLabels string
- discoveryLabels []string
- expLset labels.Labels
- expValue float64
- }{
- {
- // When "honor_labels" is not set
- // label name collision is handler by adding a prefix.
- title: "Label name collision",
- honorLabels: false,
- scrapeLabels: `metric{n="1"} 0`,
- discoveryLabels: []string{"n", "2"},
- expLset: labels.FromStrings("__name__", "metric", "exported_n", "1", "n", "2"),
- expValue: 0,
- }, {
- // When "honor_labels" is not set
- // exported label from discovery don't get overwritten
- title: "Label name collision",
- honorLabels: false,
- scrapeLabels: `metric 0`,
- discoveryLabels: []string{"n", "2", "exported_n", "2"},
- expLset: labels.FromStrings("__name__", "metric", "n", "2", "exported_n", "2"),
- expValue: 0,
- }, {
- // Labels with no value need to be removed as these should not be ingested.
- title: "Delete Empty labels",
- honorLabels: false,
- scrapeLabels: `metric{n=""} 0`,
- discoveryLabels: nil,
- expLset: labels.FromStrings("__name__", "metric"),
- expValue: 0,
- }, {
- // Honor Labels should ignore labels with the same name.
- title: "Honor Labels",
- honorLabels: true,
- scrapeLabels: `metric{n1="1", n2="2"} 0`,
- discoveryLabels: []string{"n1", "0"},
- expLset: labels.FromStrings("__name__", "metric", "n1", "1", "n2", "2"),
- expValue: 0,
- }, {
- title: "Stale - NaN",
- honorLabels: false,
- scrapeLabels: `metric NaN`,
- discoveryLabels: nil,
- expLset: labels.FromStrings("__name__", "metric"),
- expValue: math.Float64frombits(value.NormalNaN),
- },
- }
-
- for _, test := range tests {
- discoveryLabels := &Target{
- labels: labels.FromStrings(test.discoveryLabels...),
- }
-
- appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = appTest
- sl.sampleMutator = func(l labels.Labels) labels.Labels {
- return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil)
- }
- sl.reportSampleMutator = func(l labels.Labels) labels.Labels {
- return mutateReportSampleLabels(l, discoveryLabels)
- }
- })
-
- now := time.Now()
-
- app := sl.appender()
- _, _, _, err := app.append([]byte(test.scrapeLabels), "text/plain", now)
- require.NoError(t, err)
- require.NoError(t, app.Commit())
-
- expected := []sample{
- {
- L: test.expLset,
- T: timestamp.FromTime(now),
- V: test.expValue,
- },
- }
-
- t.Logf("Test:%s", test.title)
- requireEqual(t, expected, appTest.ResultSamples())
- }
+func TestScrapeLoopAppend_HonorLabels(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppendHonorLabels(t, appV2)
+ })
}
-func requireEqual(t *testing.T, expected, actual any, msgAndArgs ...any) {
- t.Helper()
- testutil.RequireEqualWithOptions(t, expected, actual,
- []cmp.Option{
- cmp.Comparer(func(a, b sample) bool { return a.Equals(b) }),
- // StaleNaN samples are generated by iterating over a map, which means that the order
- // of samples might be different on every test run. Sort series by label to avoid
- // test failures because of that.
- cmpopts.SortSlices(func(a, b sample) int {
- return labels.Compare(a.L, b.L)
- }),
+func testScrapeLoopAppendHonorLabels(t *testing.T, appV2 bool) {
+ for _, test := range []struct {
+ title string
+ honorLabels bool
+ scrapeText string
+ discoveryLabels []string
+ expLset labels.Labels
+ }{
+ {
+ // On label collision, when "honor_labels" is not set, prefix is added.
+ title: "HonorLabels=false",
+ scrapeText: `metric{n="1"} 1`,
+ discoveryLabels: []string{"n", "2"},
+ expLset: labels.FromStrings("__name__", "metric", "exported_n", "1", "n", "2"),
},
- msgAndArgs...)
+ {
+ // Case where SD already has the prefixed label - it shouldn't be overridden.
+ title: "HonorLabels=false;exported prefix already exists in SD",
+ scrapeText: `metric{n="1"} 1`,
+ discoveryLabels: []string{"n", "2", "exported_n", "2"},
+ expLset: labels.FromStrings("__name__", "metric", "n", "2", "exported_n", "2", "exported_exported_n", "1"),
+ },
+ {
+ // Labels with no value need to be removed as these should not be ingested.
+ title: "HonorLabels=false;empty label",
+ scrapeText: `metric{n=""} 1`,
+ discoveryLabels: nil,
+ expLset: labels.FromStrings("__name__", "metric"),
+ },
+ {
+ // On label collision, when "honor_labels" is true, label is overridden.
+ title: "HonorLabels=true",
+ honorLabels: true,
+ scrapeText: `metric{n="1"} 1`,
+ discoveryLabels: []string{"n", "2"},
+ expLset: labels.FromStrings("__name__", "metric", "n", "1"),
+ },
+ } {
+ t.Run(test.title, func(t *testing.T) {
+ discoveryLabels := &Target{
+ labels: labels.FromStrings(test.discoveryLabels...),
+ }
+
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
+ sl.sampleMutator = func(l labels.Labels) labels.Labels {
+ return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil)
+ }
+ sl.reportSampleMutator = func(l labels.Labels) labels.Labels {
+ return mutateReportSampleLabels(l, discoveryLabels)
+ }
+ })
+
+ now := time.Now()
+
+ app := sl.appender()
+ _, _, _, err := app.append([]byte(test.scrapeText), "text/plain", now)
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ expected := []sample{
+ {
+ L: test.expLset,
+ T: timestamp.FromTime(now),
+ V: 1,
+ },
+ }
+ teststorage.RequireEqual(t, expected, appTest.ResultSamples())
+ })
+ }
}
func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
- testcases := map[string]struct {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppendForConflictingPrefixedLabels(t, appV2)
+ })
+}
+
+func testScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T, appV2 bool) {
+ for _, tc := range []struct {
+ name string
targetLabels []string
exposedLabels string
expected []string
}{
- "One target label collides with existing label": {
+ {
+ name: "One target label collides with existing label",
targetLabels: []string{"foo", "2"},
exposedLabels: `metric{foo="1"} 0`,
expected: []string{"__name__", "metric", "exported_foo", "1", "foo", "2"},
},
- "One target label collides with existing label, plus target label already with prefix 'exported'": {
+ {
+ name: "One target label collides with existing label, plus target label already with prefix 'exported'",
targetLabels: []string{"foo", "2", "exported_foo", "3"},
exposedLabels: `metric{foo="1"} 0`,
expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_foo", "3", "foo", "2"},
},
- "One target label collides with existing label, plus existing label already with prefix 'exported": {
+ {
+ name: "One target label collides with existing label, plus existing label already with prefix 'exported",
targetLabels: []string{"foo", "3"},
exposedLabels: `metric{foo="1", exported_foo="2"} 0`,
expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_foo", "2", "foo", "3"},
},
- "One target label collides with existing label, both already with prefix 'exported'": {
+ {
+ name: "One target label collides with existing label, both already with prefix 'exported'",
targetLabels: []string{"exported_foo", "2"},
exposedLabels: `metric{exported_foo="1"} 0`,
expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_foo", "2"},
},
- "Two target labels collide with existing labels, both with and without prefix 'exported'": {
+ {
+ name: "Two target labels collide with existing labels, both with and without prefix 'exported'",
targetLabels: []string{"foo", "3", "exported_foo", "4"},
exposedLabels: `metric{foo="1", exported_foo="2"} 0`,
expected: []string{
@@ -1942,7 +2426,8 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
"2", "exported_foo", "4", "foo", "3",
},
},
- "Extreme example": {
+ {
+ name: "Extreme example",
targetLabels: []string{"foo", "0", "exported_exported_foo", "1", "exported_exported_exported_foo", "2"},
exposedLabels: `metric{foo="3", exported_foo="4", exported_exported_exported_foo="5"} 0`,
expected: []string{
@@ -1955,13 +2440,10 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
"foo", "0",
},
},
- }
-
- for name, tc := range testcases {
- t.Run(name, func(t *testing.T) {
+ } {
+ t.Run(tc.name, func(t *testing.T) {
appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = appTest
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.sampleMutator = func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil)
}
@@ -1973,7 +2455,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
require.NoError(t, app.Commit())
- requireEqual(t, []sample{
+ teststorage.RequireEqual(t, []sample{
{
L: labels.FromStrings(tc.expected...),
T: timestamp.FromTime(time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)),
@@ -1985,8 +2467,14 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
}
func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppendCacheEntryButErrNotFound(t, appV2)
+ })
+}
+
+func testScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T, appV2 bool) {
appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2))
fakeRef := storage.SeriesRef(1)
expValue := float64(1)
@@ -2017,8 +2505,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
V: expValue,
},
}
-
- require.Equal(t, expected, appTest.ResultSamples())
+ teststorage.RequireEqual(t, expected, appTest.ResultSamples())
}
type appendableFunc func(ctx context.Context) storage.Appender
@@ -2026,12 +2513,26 @@ type appendableFunc func(ctx context.Context) storage.Appender
func (a appendableFunc) Appender(ctx context.Context) storage.Appender { return a(ctx) }
func TestScrapeLoopAppendSampleLimit(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppendSampleLimit(t, appV2)
+ })
+}
+
+func testScrapeLoopAppendSampleLimit(t *testing.T, appV2 bool) {
appTest := teststorage.NewAppendable()
sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = appendableFunc(func(ctx context.Context) storage.Appender {
- // Chain appTest to verify what samples passed through.
- return &limitAppender{Appender: appTest.Appender(ctx), limit: 1}
- })
+ if appV2 {
+ sl.appendableV2 = appendableV2Func(func(ctx context.Context) storage.AppenderV2 {
+ // Chain appTest to verify what samples passed through.
+ return &limitAppenderV2{AppenderV2: appTest.AppenderV2(ctx), limit: 1}
+ })
+ } else {
+ sl.appendable = appendableFunc(func(ctx context.Context) storage.Appender {
+ // Chain appTest to verify what samples passed through.
+ return &limitAppender{Appender: appTest.Appender(ctx), limit: 1}
+ })
+ }
+
sl.sampleMutator = func(l labels.Labels) labels.Labels {
if l.Has("deleteme") {
return labels.EmptyLabels()
@@ -2075,7 +2576,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
V: 1,
},
}
- requireEqual(t, want, appTest.RolledbackSamples(), "Appended samples not as expected:\n%s", appTest)
+ teststorage.RequireEqual(t, want, appTest.RolledbackSamples(), "Appended samples not as expected:\n%s", appTest)
now = time.Now()
app = sl.appender()
@@ -2088,10 +2589,23 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
}
func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopHistogramBucketLimit(t, appV2)
+ })
+}
+
+func testScrapeLoopHistogramBucketLimit(t *testing.T, appV2 bool) {
sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = appendableFunc(func(ctx context.Context) storage.Appender {
- return &bucketLimitAppender{Appender: teststorage.NewAppendable().Appender(ctx), limit: 2}
- })
+ if appV2 {
+ sl.appendableV2 = appendableV2Func(func(ctx context.Context) storage.AppenderV2 {
+ return &bucketLimitAppenderV2{AppenderV2: teststorage.NewAppendable().AppenderV2(ctx), limit: 2}
+ })
+ } else {
+ sl.appendable = appendableFunc(func(ctx context.Context) storage.Appender {
+ return &bucketLimitAppender{Appender: teststorage.NewAppendable().Appender(ctx), limit: 2}
+ })
+ }
+
sl.enableNativeHistogramScraping = true
sl.sampleMutator = func(l labels.Labels) labels.Labels {
if l.Has("deleteme") {
@@ -2197,11 +2711,17 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
}
func TestScrapeLoop_ChangingMetricString(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopChangingMetricString(t, appV2)
+ })
+}
+
+func testScrapeLoopChangingMetricString(t *testing.T, appV2 bool) {
// This is a regression test for the scrape loop cache not properly maintaining
// IDs when the string representation of a metric changes across a scrape. Thus,
// we use a real storage appender here.
appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2))
now := time.Now()
app := sl.appender()
@@ -2226,11 +2746,17 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
V: 2,
},
}
- require.Equal(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
+ teststorage.RequireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
func TestScrapeLoopAppendFailsWithNoContentType(t *testing.T) {
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppendFailsWithNoContentType(t, appV2)
+ })
+}
+
+func testScrapeLoopAppendFailsWithNoContentType(t *testing.T, appV2 bool) {
+ sl, _ := newTestScrapeLoop(t, withAppendable(teststorage.NewAppendable(), appV2), func(sl *scrapeLoop) {
// Explicitly setting the lack of fallback protocol here to make it obvious.
sl.fallbackScrapeProtocol = ""
})
@@ -2244,7 +2770,13 @@ func TestScrapeLoopAppendFailsWithNoContentType(t *testing.T) {
// TestScrapeLoopAppendEmptyWithNoContentType ensures we there are no errors when we get a blank scrape or just want to append a stale marker.
func TestScrapeLoopAppendEmptyWithNoContentType(t *testing.T) {
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppendEmptyWithNoContentType(t, appV2)
+ })
+}
+
+func testScrapeLoopAppendEmptyWithNoContentType(t *testing.T, appV2 bool) {
+ sl, _ := newTestScrapeLoop(t, withAppendable(teststorage.NewAppendable(), appV2), func(sl *scrapeLoop) {
// Explicitly setting the lack of fallback protocol here to make it obvious.
sl.fallbackScrapeProtocol = ""
})
@@ -2257,8 +2789,14 @@ func TestScrapeLoopAppendEmptyWithNoContentType(t *testing.T) {
}
func TestScrapeLoopAppendStaleness(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppendStaleness(t, appV2)
+ })
+}
+
+func testScrapeLoopAppendStaleness(t *testing.T, appV2 bool) {
appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2))
now := time.Now()
app := sl.appender()
@@ -2283,12 +2821,18 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
V: math.Float64frombits(value.StaleNaN),
},
}
- requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
+ teststorage.RequireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppendNoStalenessIfTimestamp(t, appV2)
+ })
+}
+
+func testScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T, appV2 bool) {
appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2))
now := time.Now()
app := sl.appender()
_, _, _, err := app.append([]byte("metric_a 1 1000\n"), "text/plain", now)
@@ -2307,13 +2851,18 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
V: 1,
},
}
- require.Equal(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
+ teststorage.RequireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppendStalenessIfTrackTimestampStaleness(t, appV2)
+ })
+}
+
+func testScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T, appV2 bool) {
appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = appTest
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.trackTimestampsStaleness = true
})
@@ -2340,11 +2889,18 @@ func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) {
V: math.Float64frombits(value.StaleNaN),
},
}
- requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
+ teststorage.RequireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
-func TestScrapeLoopAppendExemplar(t *testing.T) {
- tests := []struct {
+// TestScrapeLoopAppend is the main table test testing the scrape appends, including histograms, exemplar and metadata.
+func TestScrapeLoopAppend(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppend(t, appV2)
+ })
+}
+
+func testScrapeLoopAppend(t *testing.T, appV2 bool) {
+ for _, test := range []struct {
title string
alwaysScrapeClassicHist bool
enableNativeHistogramsIngestion bool
@@ -2353,6 +2909,15 @@ func TestScrapeLoopAppendExemplar(t *testing.T) {
discoveryLabels []string
samples []sample
}{
+ {
+ title: "Normal NaN scraped",
+ scrapeText: "metric_total{n=\"1\"} NaN\n# EOF",
+ contentType: "application/openmetrics-text",
+ samples: []sample{{
+ L: labels.FromStrings("__name__", "metric_total", "n", "1"),
+ V: math.Float64frombits(value.NormalNaN),
+ }},
+ },
{
title: "Metric without exemplars",
scrapeText: "metric_total{n=\"1\"} 0\n# EOF",
@@ -2612,22 +3177,6 @@ metric: <
alwaysScrapeClassicHist: true,
contentType: "application/vnd.google.protobuf",
samples: []sample{
- {L: labels.FromStrings("__name__", "test_histogram_count"), T: 1234568, V: 175},
- {L: labels.FromStrings("__name__", "test_histogram_sum"), T: 1234568, V: 0.0008280461746287094},
- {L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), T: 1234568, V: 2},
- {
- L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), T: 1234568, V: 4,
- ES: []exemplar.Exemplar{{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}},
- },
- {
- L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), T: 1234568, V: 16,
- ES: []exemplar.Exemplar{{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}},
- },
- {
- L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0001899999999999998"), T: 1234568, V: 32,
- ES: []exemplar.Exemplar{{Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}},
- },
- {L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), T: 1234568, V: 175},
{
T: 1234568,
L: labels.FromStrings("__name__", "test_histogram"),
@@ -2655,6 +3204,22 @@ metric: <
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true},
},
},
+ {L: labels.FromStrings("__name__", "test_histogram_count"), T: 1234568, V: 175},
+ {L: labels.FromStrings("__name__", "test_histogram_sum"), T: 1234568, V: 0.0008280461746287094},
+ {L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), T: 1234568, V: 2},
+ {
+ L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), T: 1234568, V: 4,
+ ES: []exemplar.Exemplar{{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}},
+ },
+ {
+ L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), T: 1234568, V: 16,
+ ES: []exemplar.Exemplar{{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}},
+ },
+ {
+ L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0001899999999999998"), T: 1234568, V: 32,
+ ES: []exemplar.Exemplar{{Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}},
+ },
+ {L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), T: 1234568, V: 175},
},
},
{
@@ -2836,17 +3401,14 @@ metric: <
{L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), T: 1234568, V: 175},
},
},
- }
-
- for _, test := range tests {
+ } {
t.Run(test.title, func(t *testing.T) {
discoveryLabels := &Target{
labels: labels.FromStrings(test.discoveryLabels...),
}
appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = appTest
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.enableNativeHistogramScraping = test.enableNativeHistogramsIngestion
sl.sampleMutator = func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, discoveryLabels, false, nil)
@@ -2855,15 +3417,23 @@ metric: <
return mutateReportSampleLabels(l, discoveryLabels)
}
sl.alwaysScrapeClassicHist = test.alwaysScrapeClassicHist
- // This test does not care about metadata. Having this true would mean we need to add metadata to sample
+ // This test does not care about metadata.
+ // Having this true would mean we need to add metadata to sample
// expectations.
+ // TODO(bwplotka): Add cases for append metadata to WAL and pass metadata
sl.appendMetadataToWAL = false
})
app := sl.appender()
now := time.Now()
+ // Process expected samples.
for i := range test.samples {
+ if !appV2 && test.samples[i].MF != "" {
+ // AppenderV1 does not support metric family passing.
+ test.samples[i].MF = ""
+ }
+
if test.samples[i].T != 0 {
continue
}
@@ -2887,7 +3457,7 @@ metric: <
_, _, _, err := app.append(buf.Bytes(), test.contentType, now)
require.NoError(t, err)
require.NoError(t, app.Commit())
- requireEqual(t, test.samples, appTest.ResultSamples())
+ teststorage.RequireEqual(t, test.samples, appTest.ResultSamples())
})
}
}
@@ -2913,6 +3483,12 @@ func textToProto(text string, buf *bytes.Buffer) error {
}
func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppendExemplarSeries(t, appV2)
+ })
+}
+
+func testScrapeLoopAppendExemplarSeries(t *testing.T, appV2 bool) {
scrapeText := []string{`metric_total{n="1"} 1 # {t="1"} 1.0 10000
# EOF`, `metric_total{n="1"} 2 # {t="2"} 2.0 20000
# EOF`}
@@ -2934,8 +3510,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
}
appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = appTest
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.sampleMutator = func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, discoveryLabels, false, nil)
}
@@ -2960,15 +3535,20 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
require.NoError(t, app.Commit())
}
- requireEqual(t, samples, appTest.ResultSamples())
+ teststorage.RequireEqual(t, samples, appTest.ResultSamples())
}
func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopRunReportsTargetDownOnScrapeError(t, appV2)
+ })
+}
+
+func testScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T, appV2 bool) {
ctx, cancel := context.WithCancel(t.Context())
appTest := teststorage.NewAppendable()
- sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, scraper := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.ctx = ctx
- sl.appendable = appTest
})
scraper.scrapeFunc = func(context.Context, io.Writer) error {
cancel()
@@ -2980,11 +3560,16 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
}
func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopRunReportsTargetDownOnInvalidUTF8(t, appV2)
+ })
+}
+
+func testScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T, appV2 bool) {
ctx, cancel := context.WithCancel(t.Context())
appTest := teststorage.NewAppendable()
- sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, scraper := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.ctx = ctx
- sl.appendable = appTest
})
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
cancel()
@@ -2997,6 +3582,12 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
}
func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t, appV2)
+ })
+}
+
+func testScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T, appV2 bool) {
appTest := teststorage.NewAppendable().WithErrs(
func(ls labels.Labels) error {
switch ls.Get(model.MetricNameLabel) {
@@ -3010,7 +3601,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
return nil
}
}, nil, nil)
- sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2))
now := time.Unix(1, 0)
app := sl.appender()
@@ -3025,21 +3616,36 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
V: 1,
},
}
- requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
+ teststorage.RequireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
require.Equal(t, 4, total)
require.Equal(t, 4, added)
require.Equal(t, 1, seriesAdded)
}
func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
- sl, _ := newTestScrapeLoop(t, withAppendable(
- appendableFunc(func(ctx context.Context) storage.Appender {
- return &timeLimitAppender{
- Appender: teststorage.NewAppendable().Appender(ctx),
- maxTime: timestamp.FromTime(time.Now().Add(10 * time.Minute)),
- }
- }),
- ))
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopOutOfBoundsTimeError(t, appV2)
+ })
+}
+
+func testScrapeLoopOutOfBoundsTimeError(t *testing.T, appV2 bool) {
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ if appV2 {
+ sl.appendableV2 = appendableV2Func(func(ctx context.Context) storage.AppenderV2 {
+ return &timeLimitAppenderV2{
+ AppenderV2: teststorage.NewAppendable().AppenderV2(ctx),
+ maxTime: timestamp.FromTime(time.Now().Add(10 * time.Minute)),
+ }
+ })
+ } else {
+ sl.appendable = appendableFunc(func(ctx context.Context) storage.Appender {
+ return &timeLimitAppender{
+ Appender: teststorage.NewAppendable().Appender(ctx),
+ maxTime: timestamp.FromTime(time.Now().Add(10 * time.Minute)),
+ }
+ })
+ }
+ })
now := time.Now().Add(20 * time.Minute)
app := sl.appender()
@@ -3461,11 +4067,16 @@ func (ts *testScraper) readResponse(ctx context.Context, _ *http.Response, w io.
}
func TestScrapeLoop_RespectTimestamps(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopRespectTimestamps(t, appV2)
+ })
+}
+
+func testScrapeLoopRespectTimestamps(t *testing.T, appV2 bool) {
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
appTest := teststorage.NewAppendable().Then(s)
- sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2))
now := time.Now()
app := sl.appender()
@@ -3480,16 +4091,20 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
V: 1,
},
}
- require.Equal(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
+ teststorage.RequireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopDiscardTimestamps(t, appV2)
+ })
+}
+
+func testScrapeLoopDiscardTimestamps(t *testing.T, appV2 bool) {
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
appTest := teststorage.NewAppendable().Then(s)
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = appTest
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.honorTimestamps = false
})
@@ -3506,15 +4121,20 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
V: 1,
},
}
- require.Equal(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
+ teststorage.RequireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopDiscardDuplicateLabels(t, appV2)
+ })
+}
+
+func testScrapeLoopDiscardDuplicateLabels(t *testing.T, appV2 bool) {
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
appTest := teststorage.NewAppendable().Then(s)
- sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2))
// We add a good and a bad metric to check that both are discarded.
app := sl.appender()
@@ -3546,12 +4166,16 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
}
func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopDiscardUnnamedMetrics(t, appV2)
+ })
+}
+
+func testScrapeLoopDiscardUnnamedMetrics(t *testing.T, appV2 bool) {
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
appTest := teststorage.NewAppendable().Then(s)
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = appTest
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.sampleMutator = func(l labels.Labels) labels.Labels {
if l.Has("drop") {
return labels.FromStrings("no", "name") // This label set will trigger an error.
@@ -3641,6 +4265,12 @@ func TestReusableConfig(t *testing.T) {
}
func TestReuseScrapeCache(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testReuseScrapeCache(t, appV2)
+ })
+}
+
+func testReuseScrapeCache(t *testing.T, appV2 bool) {
var (
app = teststorage.NewAppendable()
cfg = &config.ScrapeConfig{
@@ -3651,7 +4281,8 @@ func TestReuseScrapeCache(t *testing.T) {
MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
}
- sp, _ = newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sa = selectAppendable(app, appV2)
+ sp, _ = newScrapePool(cfg, sa.V1(), sa.V2(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
t1 = &Target{
labels: labels.FromStrings("labelNew", "nameNew", "labelNew1", "nameNew1", "labelNew2", "nameNew2"),
scrapeConfig: &config.ScrapeConfig{
@@ -3825,10 +4456,15 @@ func TestReuseScrapeCache(t *testing.T) {
}
func TestScrapeAddFast(t *testing.T) {
- s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeAddFast(t, appV2)
+ })
+}
- sl, _ := newTestScrapeLoop(t, withAppendable(s))
+func testScrapeAddFast(t *testing.T, appV2 bool) {
+ s := teststorage.New(t)
+
+ sl, _ := newTestScrapeLoop(t, withAppendable(s, appV2))
app := sl.appender()
_, _, _, err := app.append([]byte("up 1\n"), "text/plain", time.Time{})
@@ -3848,6 +4484,12 @@ func TestScrapeAddFast(t *testing.T) {
}
func TestReuseCacheRace(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testReuseCacheRace(t, appV2)
+ })
+}
+
+func testReuseCacheRace(t *testing.T, appV2 bool) {
var (
cfg = &config.ScrapeConfig{
JobName: "Prometheus",
@@ -3858,7 +4500,8 @@ func TestReuseCacheRace(t *testing.T) {
MetricNameEscapingScheme: model.AllowUTF8,
}
buffers = pool.New(1e3, 100e6, 3, func(sz int) any { return make([]byte, 0, sz) })
- sp, _ = newScrapePool(cfg, teststorage.NewAppendable(), 0, nil, buffers, &Options{}, newTestScrapeMetrics(t))
+ sa = selectAppendable(teststorage.NewAppendable(), appV2)
+ sp, _ = newScrapePool(cfg, sa.V1(), sa.V2(), 0, nil, buffers, &Options{}, newTestScrapeMetrics(t))
t1 = &Target{
labels: labels.FromStrings("labelNew", "nameNew"),
scrapeConfig: &config.ScrapeConfig{},
@@ -3888,23 +4531,26 @@ func TestCheckAddError(t *testing.T) {
var appErrs appendErrors
sl, _ := newTestScrapeLoop(t)
// TODO: Check err etc
- _, _ = sl.checkAddError(nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs)
+ _, _ = sl.checkAddError(nil, nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs)
require.Equal(t, 1, appErrs.numOutOfOrder)
-
// TODO(bwplotka): Test partial error check and other cases
}
func TestScrapeReportSingleAppender(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeReportSingleAppender(t, appV2)
+ })
+}
+
+func testScrapeReportSingleAppender(t *testing.T, appV2 bool) {
t.Parallel()
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
signal := make(chan struct{}, 1)
ctx, cancel := context.WithCancel(t.Context())
- sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, scraper := newTestScrapeLoop(t, withAppendable(s, appV2), func(sl *scrapeLoop) {
sl.ctx = ctx
- sl.appendable = s
// Since we're writing samples directly below we need to provide a protocol fallback.
sl.fallbackScrapeProtocol = "text/plain"
})
@@ -3951,8 +4597,13 @@ func TestScrapeReportSingleAppender(t *testing.T) {
}
func TestScrapeReportLimit(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeReportLimit(t, appV2)
+ })
+}
+
+func testScrapeReportLimit(t *testing.T, appV2 bool) {
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
cfg := &config.ScrapeConfig{
JobName: "test",
@@ -3967,7 +4618,8 @@ func TestScrapeReportLimit(t *testing.T) {
ts, scrapedTwice := newScrapableServer("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n")
defer ts.Close()
- sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sa := selectAppendable(s, appV2)
+ sp, err := newScrapePool(cfg, sa.V1(), sa.V2(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
defer sp.stop()
@@ -4007,8 +4659,13 @@ func TestScrapeReportLimit(t *testing.T) {
}
func TestScrapeUTF8(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeUTF8(t, appV2)
+ })
+}
+
+func testScrapeUTF8(t *testing.T, appV2 bool) {
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
cfg := &config.ScrapeConfig{
JobName: "test",
@@ -4021,7 +4678,8 @@ func TestScrapeUTF8(t *testing.T) {
ts, scrapedTwice := newScrapableServer("{\"with.dots\"} 42\n")
defer ts.Close()
- sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sa := selectAppendable(s, appV2)
+ sp, err := newScrapePool(cfg, sa.V1(), sa.V2(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
defer sp.stop()
@@ -4051,7 +4709,13 @@ func TestScrapeUTF8(t *testing.T) {
}
func TestScrapeLoopLabelLimit(t *testing.T) {
- tests := []struct {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopLabelLimit(t, appV2)
+ })
+}
+
+func testScrapeLoopLabelLimit(t *testing.T, appV2 bool) {
+ for _, test := range []struct {
title string
scrapeLabels string
discoveryLabels []string
@@ -4113,14 +4777,12 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
labelLimits: labelLimits{labelValueLengthLimit: 10},
expectErr: true,
},
- }
-
- for _, test := range tests {
+ } {
discoveryLabels := &Target{
labels: labels.FromStrings(test.discoveryLabels...),
}
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, _ := newTestScrapeLoop(t, withAppendable(teststorage.NewAppendable(), appV2), func(sl *scrapeLoop) {
sl.sampleMutator = func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, discoveryLabels, false, nil)
}
@@ -4144,6 +4806,12 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
}
func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testTargetScrapeIntervalAndTimeoutRelabel(t, appV2)
+ })
+}
+
+func testTargetScrapeIntervalAndTimeoutRelabel(t *testing.T, appV2 bool) {
interval, _ := model.ParseDuration("2s")
timeout, _ := model.ParseDuration("500ms")
cfg := &config.ScrapeConfig{
@@ -4170,7 +4838,9 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
},
},
}
- sp, _ := newScrapePool(cfg, teststorage.NewAppendable(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+
+ sa := selectAppendable(teststorage.NewAppendable(), appV2)
+ sp, _ := newScrapePool(cfg, sa.V1(), sa.V2(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
tgts := []*targetgroup.Group{
{
Targets: []model.LabelSet{{model.AddressLabel: "127.0.0.1:9090"}},
@@ -4186,8 +4856,13 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
// Testing whether we can remove trailing .0 from histogram 'le' and summary 'quantile' labels.
func TestLeQuantileReLabel(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testLeQuantileReLabel(t, appV2)
+ })
+}
+
+func testLeQuantileReLabel(t *testing.T, appV2 bool) {
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
cfg := &config.ScrapeConfig{
JobName: "test",
@@ -4256,7 +4931,8 @@ test_summary_count 199
ts, scrapedTwice := newScrapableServer(metricsText)
defer ts.Close()
- sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sa := selectAppendable(s, appV2)
+ sp, err := newScrapePool(cfg, sa.V1(), sa.V2(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
defer sp.stop()
@@ -4305,6 +4981,12 @@ test_summary_count 199
// Testing whether we can automatically convert scraped classic histograms into native histograms with custom buckets.
func TestConvertClassicHistogramsToNHCB(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testConvertClassicHistogramsToNHCB(t, appV2)
+ })
+}
+
+func testConvertClassicHistogramsToNHCB(t *testing.T, appV2 bool) {
t.Parallel()
genTestCounterText := func(name string) string {
@@ -4707,10 +5389,8 @@ metric: <
t.Run(fmt.Sprintf("%s with %s", name, metricsTextName), func(t *testing.T) {
t.Parallel()
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = s
+ sl, _ := newTestScrapeLoop(t, withAppendable(s, appV2), func(sl *scrapeLoop) {
sl.alwaysScrapeClassicHist = tc.alwaysScrapeClassicHistograms
sl.convertClassicHistToNHCB = tc.convertClassicHistToNHCB
sl.enableNativeHistogramScraping = true
@@ -4789,8 +5469,13 @@ metric: <
}
func TestTypeUnitReLabel(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testTypeUnitReLabel(t, appV2)
+ })
+}
+
+func testTypeUnitReLabel(t *testing.T, appV2 bool) {
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
cfg := &config.ScrapeConfig{
JobName: "test",
@@ -4837,7 +5522,8 @@ disk_usage_bytes 456
ts, scrapedTwice := newScrapableServer(metricsText)
defer ts.Close()
- sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sa := selectAppendable(s, appV2)
+ sp, err := newScrapePool(cfg, sa.V1(), sa.V2(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
defer sp.stop()
@@ -4875,13 +5561,18 @@ disk_usage_bytes 456
}
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t, appV2)
+ })
+}
+
+func testScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *testing.T, appV2 bool) {
signal := make(chan struct{}, 1)
ctx, cancel := context.WithCancel(t.Context())
appTest := teststorage.NewAppendable()
- sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.ctx = ctx
- sl.appendable = appTest // Since we're writing samples directly below we need to provide a protocol fallback.
+ sl, scraper := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
+ sl.ctx = ctx // Since we're writing samples directly below we need to provide a protocol fallback.
sl.fallbackScrapeProtocol = "text/plain"
sl.trackTimestampsStaleness = true
})
@@ -4922,8 +5613,13 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *
}
func TestScrapeLoopCompression(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopCompression(t, appV2)
+ })
+}
+
+func testScrapeLoopCompression(t *testing.T, appV2 bool) {
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
metricsText := makeTestGauges(10)
@@ -4961,7 +5657,8 @@ func TestScrapeLoopCompression(t *testing.T) {
MetricNameEscapingScheme: model.AllowUTF8,
}
- sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sa := selectAppendable(s, appV2)
+ sp, err := newScrapePool(cfg, sa.V1(), sa.V2(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
defer sp.stop()
@@ -5133,7 +5830,13 @@ func BenchmarkTargetScraperGzip(b *testing.B) {
// When a scrape contains multiple instances for the same time series we should increment
// prometheus_target_scrapes_sample_duplicate_timestamp_total metric.
func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) {
- sl, _ := newTestScrapeLoop(t)
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopSeriesAddedDuplicates(t, appV2)
+ })
+}
+
+func testScrapeLoopSeriesAddedDuplicates(t *testing.T, appV2 bool) {
+ sl, _ := newTestScrapeLoop(t, withAppendable(teststorage.NewAppendable(), appV2))
app := sl.appender()
total, added, seriesAdded, err := app.append([]byte("test_metric 1\ntest_metric 2\ntest_metric 3\n"), "text/plain", time.Time{})
@@ -5168,32 +5871,37 @@ func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) {
// This tests running a full scrape loop and checking that the scrape option
// `native_histogram_min_bucket_factor` is used correctly.
func TestNativeHistogramMaxSchemaSet(t *testing.T) {
- testcases := map[string]struct {
- minBucketFactor string
- expectedSchema int32
- }{
- "min factor not specified": {
- minBucketFactor: "",
- expectedSchema: 3, // Factor 1.09.
- },
- "min factor 1": {
- minBucketFactor: "native_histogram_min_bucket_factor: 1",
- expectedSchema: 3, // Factor 1.09.
- },
- "min factor 2": {
- minBucketFactor: "native_histogram_min_bucket_factor: 2",
- expectedSchema: 0, // Factor 2.00.
- },
- }
- for name, tc := range testcases {
- t.Run(name, func(t *testing.T) {
- t.Parallel()
- testNativeHistogramMaxSchemaSet(t, tc.minBucketFactor, tc.expectedSchema)
- })
- }
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ for _, tc := range []struct {
+ name string
+ minBucketFactor string
+ expectedSchema int32
+ }{
+ {
+ name: "min factor not specified",
+ minBucketFactor: "",
+ expectedSchema: 3, // Factor 1.09.
+ },
+ {
+ name: "min factor 1",
+ minBucketFactor: "native_histogram_min_bucket_factor: 1",
+ expectedSchema: 3, // Factor 1.09.
+ },
+ {
+ name: "min factor 2",
+ minBucketFactor: "native_histogram_min_bucket_factor: 2",
+ expectedSchema: 0, // Factor 2.00.
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ testNativeHistogramMaxSchemaSet(t, tc.minBucketFactor, tc.expectedSchema, appV2)
+ })
+ }
+ })
}
-func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expectedSchema int32) {
+func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expectedSchema int32, appV2 bool) {
// Create a ProtoBuf message to serve as a Prometheus metric.
nativeHistogram := prometheus.NewHistogram(
prometheus.HistogramOpts{
@@ -5241,11 +5949,12 @@ scrape_configs:
`, minBucketFactor, strings.ReplaceAll(metricsServer.URL, "http://", ""))
s := teststorage.New(t)
- t.Cleanup(func() { _ = s.Close() })
reg := prometheus.NewRegistry()
- mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond)}, nil, nil, s, reg)
+ sa := selectAppendable(s, appV2)
+ mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond)}, nil, nil, sa.V1(), sa.V2(), reg)
require.NoError(t, err)
+
cfg, err := config.Load(configStr, promslog.NewNopLogger())
require.NoError(t, err)
require.NoError(t, mng.ApplyConfig(cfg))
@@ -5301,6 +6010,12 @@ scrape_configs:
}
func TestTargetScrapeConfigWithLabels(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testTargetScrapeConfigWithLabels(t, appV2)
+ })
+}
+
+func testTargetScrapeConfigWithLabels(t *testing.T, appV2 bool) {
t.Parallel()
const (
configTimeout = 1500 * time.Millisecond
@@ -5346,7 +6061,8 @@ func TestTargetScrapeConfigWithLabels(t *testing.T) {
}
}
- sp, err := newScrapePool(cfg, teststorage.NewAppendable(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sa := selectAppendable(teststorage.NewAppendable(), appV2)
+ sp, err := newScrapePool(cfg, sa.V1(), sa.V2(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
t.Cleanup(sp.stop)
@@ -5484,6 +6200,12 @@ func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice cha
// Regression test for the panic fixed in https://github.com/prometheus/prometheus/pull/15523.
func TestScrapePoolScrapeAfterReload(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapePoolScrapeAfterReload(t, appV2)
+ })
+}
+
+func testScrapePoolScrapeAfterReload(t *testing.T, appV2 bool) {
h := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, _ *http.Request) {
_, _ = w.Write([]byte{0x42, 0x42})
@@ -5509,7 +6231,8 @@ func TestScrapePoolScrapeAfterReload(t *testing.T) {
},
}
- p, err := newScrapePool(cfg, teststorage.NewAppendable(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sa := selectAppendable(teststorage.NewAppendable(), appV2)
+ p, err := newScrapePool(cfg, sa.V1(), sa.V2(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
t.Cleanup(p.stop)
@@ -5529,6 +6252,12 @@ func TestScrapePoolScrapeAfterReload(t *testing.T) {
// The first scrape fails with a parsing error, but the second should
// succeed and cause `metric_1=11` to appear in the appender.
func TestScrapeAppendWithParseError(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeAppendWithParseError(t, appV2)
+ })
+}
+
+func testScrapeAppendWithParseError(t *testing.T, appV2 bool) {
const (
scrape1 = `metric_a 1
`
@@ -5537,7 +6266,7 @@ func TestScrapeAppendWithParseError(t *testing.T) {
)
appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2))
now := time.Now()
app := sl.appender()
@@ -5563,17 +6292,22 @@ func TestScrapeAppendWithParseError(t *testing.T) {
V: 11,
},
}
- requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
+ teststorage.RequireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
// This test covers a case where there's a target with sample_limit set and some samples
// changes between scrapes.
func TestScrapeLoopAppendSampleLimitWithDisappearingSeries(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppendSampleLimitWithDisappearingSeries(t, appV2)
+ })
+}
+
+func testScrapeLoopAppendSampleLimitWithDisappearingSeries(t *testing.T, appV2 bool) {
const sampleLimit = 4
appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = appTest
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.sampleLimit = sampleLimit
})
@@ -5607,7 +6341,7 @@ func TestScrapeLoopAppendSampleLimitWithDisappearingSeries(t *testing.T) {
V: 1,
},
}
- requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
+ teststorage.RequireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
now = now.Add(time.Minute)
app = sl.appender()
@@ -5622,7 +6356,7 @@ func TestScrapeLoopAppendSampleLimitWithDisappearingSeries(t *testing.T) {
require.Equal(t, 6, samplesScraped)
require.Equal(t, 6, samplesAfterRelabel)
require.Equal(t, 1, createdSeries) // We've added one series before hitting the limit.
- requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
+ testutil.RequireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
sl.cache.iterDone(false)
now = now.Add(time.Minute)
@@ -5666,17 +6400,22 @@ func TestScrapeLoopAppendSampleLimitWithDisappearingSeries(t *testing.T) {
V: math.Float64frombits(value.StaleNaN),
},
}...)
- requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
+ teststorage.RequireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
// This test covers a case where there's a target with sample_limit set and each scrape sees a completely
// different set of samples.
func TestScrapeLoopAppendSampleLimitReplaceAllSamples(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopAppendSampleLimitReplaceAllSamples(t, appV2)
+ })
+}
+
+func testScrapeLoopAppendSampleLimitReplaceAllSamples(t *testing.T, appV2 bool) {
const sampleLimit = 4
appTest := teststorage.NewAppendable()
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
- sl.appendable = appTest
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest, appV2), func(sl *scrapeLoop) {
sl.sampleLimit = sampleLimit
})
@@ -5715,7 +6454,7 @@ func TestScrapeLoopAppendSampleLimitReplaceAllSamples(t *testing.T) {
V: 1,
},
}
- requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
+ teststorage.RequireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
now = now.Add(time.Minute)
app = sl.appender()
@@ -5776,14 +6515,20 @@ func TestScrapeLoopAppendSampleLimitReplaceAllSamples(t *testing.T) {
V: math.Float64frombits(value.StaleNaN),
},
}...)
- requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
+ teststorage.RequireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
}
func TestScrapeLoopDisableStalenessMarkerInjection(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testScrapeLoopDisableStalenessMarkerInjection(t, appV2)
+ })
+}
+
+func testScrapeLoopDisableStalenessMarkerInjection(t *testing.T, appV2 bool) {
loopDone := atomic.NewBool(false)
appTest := teststorage.NewAppendable()
- sl, scraper := newTestScrapeLoop(t, withAppendable(appTest))
+ sl, scraper := newTestScrapeLoop(t, withAppendable(appTest, appV2))
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
if _, err := w.Write([]byte("metric_a 42\n")); err != nil {
return err
@@ -5832,6 +6577,7 @@ func BenchmarkScrapePoolRestartLoops(b *testing.B) {
ScrapeTimeout: model.Duration(1 * time.Hour),
},
nil,
+ nil,
0,
nil,
nil,
@@ -5857,6 +6603,12 @@ func BenchmarkScrapePoolRestartLoops(b *testing.B) {
// TestNewScrapeLoopHonorLabelsWiring verifies that newScrapeLoop correctly wires
// HonorLabels (not HonorTimestamps) to the sampleMutator.
func TestNewScrapeLoopHonorLabelsWiring(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testNewScrapeLoopHonorLabelsWiring(t, appV2)
+ })
+}
+
+func testNewScrapeLoopHonorLabelsWiring(t *testing.T, appV2 bool) {
// Scraped metric has label "lbl" with value "scraped".
// Discovery target has label "lbl" with value "discovery".
// With honor_labels=true, the scraped value should win.
@@ -5888,7 +6640,6 @@ func TestNewScrapeLoopHonorLabelsWiring(t *testing.T) {
require.NoError(t, err)
s := teststorage.New(t)
- defer s.Close()
cfg := &config.ScrapeConfig{
JobName: "test",
@@ -5900,7 +6651,8 @@ func TestNewScrapeLoopHonorLabelsWiring(t *testing.T) {
MetricNameValidationScheme: model.UTF8Validation,
}
- sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{skipOffsetting: true}, newTestScrapeMetrics(t))
+ sa := selectAppendable(s, appV2)
+ sp, err := newScrapePool(cfg, sa.V1(), sa.V2(), 0, nil, nil, &Options{skipOffsetting: true}, newTestScrapeMetrics(t))
require.NoError(t, err)
defer sp.stop()
@@ -5934,6 +6686,12 @@ func TestNewScrapeLoopHonorLabelsWiring(t *testing.T) {
}
func TestDropsSeriesFromMetricRelabeling(t *testing.T) {
+ foreachAppendable(t, func(t *testing.T, appV2 bool) {
+ testDropsSeriesFromMetricRelabeling(t, appV2)
+ })
+}
+
+func testDropsSeriesFromMetricRelabeling(t *testing.T, appV2 bool) {
target := &Target{}
relabelConfig := []*relabel.Config{
{
@@ -5949,7 +6707,7 @@ func TestDropsSeriesFromMetricRelabeling(t *testing.T) {
NameValidationScheme: model.UTF8Validation,
},
}
- sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl, _ := newTestScrapeLoop(t, withAppendable(teststorage.NewAppendable(), appV2), func(sl *scrapeLoop) {
sl.sampleMutator = func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, target, true, relabelConfig)
}
diff --git a/scrape/target.go b/scrape/target.go
index 4265f9e782..1040241bd3 100644
--- a/scrape/target.go
+++ b/scrape/target.go
@@ -454,6 +454,105 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels
return ref, nil
}
+// limitAppender limits the number of total appended samples in a batch.
+type limitAppenderV2 struct {
+ storage.AppenderV2
+
+ limit int
+ i int
+}
+
+func (app *limitAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
+ // Bypass sample_limit checks only if we have a staleness marker for a known series (ref value is non-zero).
+ // This ensures that if a series is already in TSDB then we always write the marker.
+ if ref == 0 || !value.IsStaleNaN(v) {
+ app.i++
+ if app.i > app.limit {
+ return 0, errSampleLimit
+ }
+ }
+ return app.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
+}
+
+type timeLimitAppenderV2 struct {
+ storage.AppenderV2
+
+ maxTime int64
+}
+
+func (app *timeLimitAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
+ if t > app.maxTime {
+ return 0, storage.ErrOutOfBounds
+ }
+
+ return app.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
+}
+
+// bucketLimitAppender limits the number of total appended samples in a batch.
+type bucketLimitAppenderV2 struct {
+ storage.AppenderV2
+
+ limit int
+}
+
+func (app *bucketLimitAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (_ storage.SeriesRef, err error) {
+ if h != nil {
+ // Return with an early error if the histogram has too many buckets and the
+ // schema is not exponential, in which case we can't reduce the resolution.
+ if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(h.Schema) {
+ return 0, errBucketLimit
+ }
+ for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit {
+ if h.Schema <= histogram.ExponentialSchemaMin {
+ return 0, errBucketLimit
+ }
+ if err = h.ReduceResolution(h.Schema - 1); err != nil {
+ return 0, err
+ }
+ }
+ }
+ if fh != nil {
+ // Return with an early error if the histogram has too many buckets and the
+ // schema is not exponential, in which case we can't reduce the resolution.
+ if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(fh.Schema) {
+ return 0, errBucketLimit
+ }
+ for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit {
+ if fh.Schema <= histogram.ExponentialSchemaMin {
+ return 0, errBucketLimit
+ }
+ if err = fh.ReduceResolution(fh.Schema - 1); err != nil {
+ return 0, err
+ }
+ }
+ }
+ return app.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
+}
+
+type maxSchemaAppenderV2 struct {
+ storage.AppenderV2
+
+ maxSchema int32
+}
+
+func (app *maxSchemaAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (_ storage.SeriesRef, err error) {
+ if h != nil {
+ if histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > app.maxSchema {
+ if err = h.ReduceResolution(app.maxSchema); err != nil {
+ return 0, err
+ }
+ }
+ }
+ if fh != nil {
+ if histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > app.maxSchema {
+ if err = fh.ReduceResolution(app.maxSchema); err != nil {
+ return 0, err
+ }
+ }
+ }
+ return app.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
+}
+
// PopulateDiscoveredLabels sets base labels on lb from target and group labels and scrape configuration, before relabeling.
func PopulateDiscoveredLabels(lb *labels.Builder, cfg *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) {
lb.Reset(labels.EmptyLabels())
diff --git a/scrape/target_test.go b/scrape/target_test.go
index 06227da816..ea0aa2009f 100644
--- a/scrape/target_test.go
+++ b/scrape/target_test.go
@@ -35,6 +35,7 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
+ "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/teststorage"
)
@@ -610,37 +611,65 @@ func TestBucketLimitAppender(t *testing.T) {
},
}
- appTest := teststorage.NewAppendable()
-
for _, c := range cases {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
- app := &bucketLimitAppender{Appender: appTest.Appender(t.Context()), limit: c.limit}
- ts := int64(10 * time.Minute / time.Millisecond)
- lbls := labels.FromStrings("__name__", "sparse_histogram_series")
- var err error
- if floatHisto {
- fh := c.h.Copy().ToFloat(nil)
- _, err = app.AppendHistogram(0, lbls, ts, nil, fh)
- if c.expectError {
- require.Error(t, err)
+ t.Run("appV2=false", func(t *testing.T) {
+ app := &bucketLimitAppender{Appender: teststorage.NewAppendable().Appender(t.Context()), limit: c.limit}
+ ts := int64(10 * time.Minute / time.Millisecond)
+ lbls := labels.FromStrings("__name__", "sparse_histogram_series")
+ var err error
+ if floatHisto {
+ fh := c.h.Copy().ToFloat(nil)
+ _, err = app.AppendHistogram(0, lbls, ts, nil, fh)
+ if c.expectError {
+ require.Error(t, err)
+ } else {
+ require.Equal(t, c.expectSchema, fh.Schema)
+ require.Equal(t, c.expectBucketCount, len(fh.NegativeBuckets)+len(fh.PositiveBuckets))
+ require.NoError(t, err)
+ }
} else {
- require.Equal(t, c.expectSchema, fh.Schema)
- require.Equal(t, c.expectBucketCount, len(fh.NegativeBuckets)+len(fh.PositiveBuckets))
- require.NoError(t, err)
+ h := c.h.Copy()
+ _, err = app.AppendHistogram(0, lbls, ts, h, nil)
+ if c.expectError {
+ require.Error(t, err)
+ } else {
+ require.Equal(t, c.expectSchema, h.Schema)
+ require.Equal(t, c.expectBucketCount, len(h.NegativeBuckets)+len(h.PositiveBuckets))
+ require.NoError(t, err)
+ }
}
- } else {
- h := c.h.Copy()
- _, err = app.AppendHistogram(0, lbls, ts, h, nil)
- if c.expectError {
- require.Error(t, err)
+ require.NoError(t, app.Commit())
+ })
+ t.Run("appV2=true", func(t *testing.T) {
+ app := &bucketLimitAppenderV2{AppenderV2: teststorage.NewAppendable().AppenderV2(t.Context()), limit: c.limit}
+ ts := int64(10 * time.Minute / time.Millisecond)
+ lbls := labels.FromStrings("__name__", "sparse_histogram_series")
+ var err error
+ if floatHisto {
+ fh := c.h.Copy().ToFloat(nil)
+ _, err = app.Append(0, lbls, 0, ts, 0, nil, fh, storage.AOptions{})
+ if c.expectError {
+ require.Error(t, err)
+ } else {
+ require.Equal(t, c.expectSchema, fh.Schema)
+ require.Equal(t, c.expectBucketCount, len(fh.NegativeBuckets)+len(fh.PositiveBuckets))
+ require.NoError(t, err)
+ }
} else {
- require.Equal(t, c.expectSchema, h.Schema)
- require.Equal(t, c.expectBucketCount, len(h.NegativeBuckets)+len(h.PositiveBuckets))
- require.NoError(t, err)
+ h := c.h.Copy()
+ _, err = app.Append(0, lbls, 0, ts, 0, h, nil, storage.AOptions{})
+ if c.expectError {
+ require.Error(t, err)
+ } else {
+ require.Equal(t, c.expectSchema, h.Schema)
+ require.Equal(t, c.expectBucketCount, len(h.NegativeBuckets)+len(h.PositiveBuckets))
+ require.NoError(t, err)
+ }
}
- }
- require.NoError(t, app.Commit())
+ require.NoError(t, app.Commit())
+ })
})
}
}
@@ -696,27 +725,45 @@ func TestMaxSchemaAppender(t *testing.T) {
},
}
- appTest := teststorage.NewAppendable()
-
for _, c := range cases {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
- app := &maxSchemaAppender{Appender: appTest.Appender(t.Context()), maxSchema: c.maxSchema}
- ts := int64(10 * time.Minute / time.Millisecond)
- lbls := labels.FromStrings("__name__", "sparse_histogram_series")
- var err error
- if floatHisto {
- fh := c.h.Copy().ToFloat(nil)
- _, err = app.AppendHistogram(0, lbls, ts, nil, fh)
- require.Equal(t, c.expectSchema, fh.Schema)
- require.NoError(t, err)
- } else {
- h := c.h.Copy()
- _, err = app.AppendHistogram(0, lbls, ts, h, nil)
- require.Equal(t, c.expectSchema, h.Schema)
- require.NoError(t, err)
- }
- require.NoError(t, app.Commit())
+ t.Run("appV2=false", func(t *testing.T) {
+ app := &maxSchemaAppender{Appender: teststorage.NewAppendable().Appender(t.Context()), maxSchema: c.maxSchema}
+ ts := int64(10 * time.Minute / time.Millisecond)
+ lbls := labels.FromStrings("__name__", "sparse_histogram_series")
+ var err error
+ if floatHisto {
+ fh := c.h.Copy().ToFloat(nil)
+ _, err = app.AppendHistogram(0, lbls, ts, nil, fh)
+ require.Equal(t, c.expectSchema, fh.Schema)
+ require.NoError(t, err)
+ } else {
+ h := c.h.Copy()
+ _, err = app.AppendHistogram(0, lbls, ts, h, nil)
+ require.Equal(t, c.expectSchema, h.Schema)
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ })
+ t.Run("appV2=true", func(t *testing.T) {
+ app := &maxSchemaAppenderV2{AppenderV2: teststorage.NewAppendable().AppenderV2(t.Context()), maxSchema: c.maxSchema}
+ ts := int64(10 * time.Minute / time.Millisecond)
+ lbls := labels.FromStrings("__name__", "sparse_histogram_series")
+ var err error
+ if floatHisto {
+ fh := c.h.Copy().ToFloat(nil)
+ _, err = app.Append(0, lbls, 0, ts, 0, nil, fh, storage.AOptions{})
+ require.Equal(t, c.expectSchema, fh.Schema)
+ require.NoError(t, err)
+ } else {
+ h := c.h.Copy()
+ _, err = app.Append(0, lbls, 0, ts, 0, h, nil, storage.AOptions{})
+ require.Equal(t, c.expectSchema, h.Schema)
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ })
})
}
}
@@ -724,32 +771,65 @@ func TestMaxSchemaAppender(t *testing.T) {
// Test sample_limit when a scrape contains Native Histograms.
func TestAppendWithSampleLimitAndNativeHistogram(t *testing.T) {
- appTest := teststorage.NewAppendable()
-
now := time.Now()
- app := appenderWithLimits(appTest.Appender(t.Context()), 2, 0, histogram.ExponentialSchemaMax)
+ t.Run("appV2=false", func(t *testing.T) {
+ app := appenderWithLimits(teststorage.NewAppendable().Appender(t.Context()), 2, 0, histogram.ExponentialSchemaMax)
- // sample_limit is set to 2, so first two scrapes should work
- _, err := app.Append(0, labels.FromStrings(model.MetricNameLabel, "foo"), timestamp.FromTime(now), 1)
- require.NoError(t, err)
+ // sample_limit is set to 2, so first two scrapes should work
+ _, err := app.Append(0, labels.FromStrings(model.MetricNameLabel, "foo"), timestamp.FromTime(now), 1)
+ require.NoError(t, err)
- // Second sample, should be ok.
- _, err = app.AppendHistogram(
- 0,
- labels.FromStrings(model.MetricNameLabel, "my_histogram1"),
- timestamp.FromTime(now),
- &histogram.Histogram{},
- nil,
- )
- require.NoError(t, err)
+ // Second sample, should be ok.
+ _, err = app.AppendHistogram(
+ 0,
+ labels.FromStrings(model.MetricNameLabel, "my_histogram1"),
+ timestamp.FromTime(now),
+ &histogram.Histogram{},
+ nil,
+ )
+ require.NoError(t, err)
- // This is third sample with sample_limit=2, it should trigger errSampleLimit.
- _, err = app.AppendHistogram(
- 0,
- labels.FromStrings(model.MetricNameLabel, "my_histogram2"),
- timestamp.FromTime(now),
- &histogram.Histogram{},
- nil,
- )
- require.ErrorIs(t, err, errSampleLimit)
+ // This is third sample with sample_limit=2, it should trigger errSampleLimit.
+ _, err = app.AppendHistogram(
+ 0,
+ labels.FromStrings(model.MetricNameLabel, "my_histogram2"),
+ timestamp.FromTime(now),
+ &histogram.Histogram{},
+ nil,
+ )
+ require.ErrorIs(t, err, errSampleLimit)
+ })
+ t.Run("appV2=true", func(t *testing.T) {
+ app := appenderV2WithLimits(teststorage.NewAppendable().AppenderV2(t.Context()), 2, 0, histogram.ExponentialSchemaMax)
+
+ // sample_limit is set to 2, so first two scrapes should work
+ _, err := app.Append(0, labels.FromStrings(model.MetricNameLabel, "foo"), 0, timestamp.FromTime(now), 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ // Second sample, should be ok.
+ _, err = app.Append(
+ 0,
+ labels.FromStrings(model.MetricNameLabel, "my_histogram1"),
+ 0,
+ timestamp.FromTime(now),
+ 0,
+ &histogram.Histogram{},
+ nil,
+ storage.AOptions{},
+ )
+ require.NoError(t, err)
+
+ // This is third sample with sample_limit=2, it should trigger errSampleLimit.
+ _, err = app.Append(
+ 0,
+ labels.FromStrings(model.MetricNameLabel, "my_histogram2"),
+ 0,
+ timestamp.FromTime(now),
+ 0,
+ &histogram.Histogram{},
+ nil,
+ storage.AOptions{},
+ )
+ require.ErrorIs(t, err, errSampleLimit)
+ })
}
diff --git a/scripts/check-go-mod-version.sh b/scripts/check-go-mod-version.sh
index d651a62036..4fd60b86b9 100755
--- a/scripts/check-go-mod-version.sh
+++ b/scripts/check-go-mod-version.sh
@@ -1,12 +1,71 @@
#!/usr/bin/env bash
+#
+# Description: Validate `go` directive in various Go mod files.
-readarray -t mod_files < <(find . -type f -name go.mod)
+set -u -o pipefail
+
+echo "Checking version support"
+
+version_url='https://go.dev/dl/?mode=json'
+get_supported_version() {
+ curl -s -f "${version_url}" \
+ | jq -r '.[].version' \
+ | sed 's/^go//' \
+ | cut -f2 -d'.' \
+ | sort -V \
+ | head -n1
+}
+
+get_current_version() {
+ awk '$1 == "go" {print $2}' go.mod \
+ | cut -f2 -d'.'
+}
+
+supported_version="$(get_supported_version)"
+if [[ "${supported_version}" -le 0 ]]; then
+ echo "Error getting supported version from '${version_url}'"
+ exit 1
+fi
+current_version="$(get_current_version)"
+if [[ "${current_version}" -le 0 ]]; then
+ echo "Error getting current version from go.mod"
+ exit 1
+fi
+
+if [[ "${current_version}" -gt "${supported_version}" ]] ; then
+ echo "Go mod version (1.${current_version}) is newer than upstream supported version (1.${supported_version})"
+ exit 1
+fi
+
+readarray -t mod_files < <(git ls-files go.mod go.work '*/go.mod' || find . -type f -name go.mod -or -name go.work)
echo "Checking files ${mod_files[@]}"
matches=$(awk '$1 == "go" {print $2}' "${mod_files[@]}" | sort -u | wc -l)
if [[ "${matches}" -ne 1 ]]; then
- echo 'Not all go.mod files have matching go versions'
+ echo 'Not all go.mod/go.work files have matching go versions'
exit 1
fi
+
+ci_workflow=".github/workflows/ci.yml"
+if [[ -f "${ci_workflow}" ]] && yq -e '.jobs.test_go_oldest' "${ci_workflow}" > /dev/null 2>&1; then
+ echo "Checking CI workflow test_go_oldest uses N-1 Go version"
+
+ # Extract Go version from test_go_oldest job.
+ get_test_go_oldest_version() {
+ yq '.jobs.test_go_oldest.container.image' "${ci_workflow}" \
+ | grep -oP 'golang-builder:1\.\K[0-9]+'
+ }
+
+ test_go_oldest_version="$(get_test_go_oldest_version)"
+ if [[ -z "${test_go_oldest_version}" || "${test_go_oldest_version}" -le 0 ]]; then
+ echo "Error: Could not extract Go version from test_go_oldest job in ${ci_workflow}"
+ exit 1
+ fi
+
+ if [[ "${test_go_oldest_version}" -ne "${supported_version}" ]]; then
+ echo "Error: test_go_oldest uses Go 1.${test_go_oldest_version}, but should use Go 1.${supported_version} (oldest supported version)"
+ exit 1
+ fi
+fi
diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml
index ae5fdc80ec..16467b897e 100644
--- a/scripts/golangci-lint.yml
+++ b/scripts/golangci-lint.yml
@@ -28,7 +28,7 @@ jobs:
with:
persist-credentials: false
- name: Install Go
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
+ uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: 1.25.x
- name: Install snmp_exporter/generator dependencies
diff --git a/storage/buffer.go b/storage/buffer.go
index 223c4fa42b..cdf8879f21 100644
--- a/storage/buffer.go
+++ b/storage/buffer.go
@@ -119,13 +119,16 @@ func (b *BufferedSeriesIterator) Next() chunkenc.ValueType {
return chunkenc.ValNone
case chunkenc.ValFloat:
t, f := b.it.At()
- b.buf.addF(fSample{t: t, f: f})
+ st := b.it.AtST()
+ b.buf.addF(fSample{st: st, t: t, f: f})
case chunkenc.ValHistogram:
t, h := b.it.AtHistogram(&b.hReader)
- b.buf.addH(hSample{t: t, h: h})
+ st := b.it.AtST()
+ b.buf.addH(hSample{st: st, t: t, h: h})
case chunkenc.ValFloatHistogram:
t, fh := b.it.AtFloatHistogram(&b.fhReader)
- b.buf.addFH(fhSample{t: t, fh: fh})
+ st := b.it.AtST()
+ b.buf.addFH(fhSample{st: st, t: t, fh: fh})
default:
panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType))
}
@@ -157,20 +160,29 @@ func (b *BufferedSeriesIterator) AtT() int64 {
return b.it.AtT()
}
+// AtST returns the current sample's start timestamp of the iterator.
+func (b *BufferedSeriesIterator) AtST() int64 {
+ return b.it.AtST()
+}
+
// Err returns the last encountered error.
func (b *BufferedSeriesIterator) Err() error {
return b.it.Err()
}
type fSample struct {
- t int64
- f float64
+ st, t int64
+ f float64
}
func (s fSample) T() int64 {
return s.t
}
+func (s fSample) ST() int64 {
+ return s.st
+}
+
func (s fSample) F() float64 {
return s.f
}
@@ -192,14 +204,18 @@ func (s fSample) Copy() chunks.Sample {
}
type hSample struct {
- t int64
- h *histogram.Histogram
+ st, t int64
+ h *histogram.Histogram
}
func (s hSample) T() int64 {
return s.t
}
+func (s hSample) ST() int64 {
+ return s.st
+}
+
func (hSample) F() float64 {
panic("F() called for hSample")
}
@@ -217,18 +233,22 @@ func (hSample) Type() chunkenc.ValueType {
}
func (s hSample) Copy() chunks.Sample {
- return hSample{t: s.t, h: s.h.Copy()}
+ return hSample{st: s.st, t: s.t, h: s.h.Copy()}
}
type fhSample struct {
- t int64
- fh *histogram.FloatHistogram
+ st, t int64
+ fh *histogram.FloatHistogram
}
func (s fhSample) T() int64 {
return s.t
}
+func (s fhSample) ST() int64 {
+ return s.st
+}
+
func (fhSample) F() float64 {
panic("F() called for fhSample")
}
@@ -246,7 +266,7 @@ func (fhSample) Type() chunkenc.ValueType {
}
func (s fhSample) Copy() chunks.Sample {
- return fhSample{t: s.t, fh: s.fh.Copy()}
+ return fhSample{st: s.st, t: s.t, fh: s.fh.Copy()}
}
type sampleRing struct {
@@ -329,6 +349,7 @@ func (r *sampleRing) iterator() *SampleRingIterator {
type SampleRingIterator struct {
r *sampleRing
i int
+ st int64
t int64
f float64
h *histogram.Histogram
@@ -350,21 +371,25 @@ func (it *SampleRingIterator) Next() chunkenc.ValueType {
switch it.r.bufInUse {
case fBuf:
s := it.r.atF(it.i)
+ it.st = s.st
it.t = s.t
it.f = s.f
return chunkenc.ValFloat
case hBuf:
s := it.r.atH(it.i)
+ it.st = s.st
it.t = s.t
it.h = s.h
return chunkenc.ValHistogram
case fhBuf:
s := it.r.atFH(it.i)
+ it.st = s.st
it.t = s.t
it.fh = s.fh
return chunkenc.ValFloatHistogram
}
s := it.r.at(it.i)
+ it.st = s.ST()
it.t = s.T()
switch s.Type() {
case chunkenc.ValHistogram:
@@ -410,6 +435,10 @@ func (it *SampleRingIterator) AtT() int64 {
return it.t
}
+func (it *SampleRingIterator) AtST() int64 {
+ return it.st
+}
+
func (r *sampleRing) at(i int) chunks.Sample {
j := (r.f + i) % len(r.iBuf)
return r.iBuf[j]
@@ -651,6 +680,7 @@ func addH(s hSample, buf []hSample, r *sampleRing) []hSample {
}
buf[r.i].t = s.t
+ buf[r.i].st = s.st
if buf[r.i].h == nil {
buf[r.i].h = s.h.Copy()
} else {
@@ -695,6 +725,7 @@ func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample {
}
buf[r.i].t = s.t
+ buf[r.i].st = s.st
if buf[r.i].fh == nil {
buf[r.i].fh = s.fh.Copy()
} else {
diff --git a/storage/buffer_test.go b/storage/buffer_test.go
index fc6603d4a5..61d1601bc0 100644
--- a/storage/buffer_test.go
+++ b/storage/buffer_test.go
@@ -61,10 +61,9 @@ func TestSampleRing(t *testing.T) {
input := []fSample{}
for _, t := range c.input {
- input = append(input, fSample{
- t: t,
- f: float64(rand.Intn(100)),
- })
+ // Randomize start timestamp to make sure it does not affect the
+ // outcome.
+ input = append(input, fSample{st: rand.Int63(), t: t, f: float64(rand.Intn(100))})
}
for i, s := range input {
@@ -90,6 +89,24 @@ func TestSampleRing(t *testing.T) {
}
}
+func TestSampleRingFloatST(t *testing.T) {
+ r := newSampleRing(10, 5, chunkenc.ValNone)
+ require.Empty(t, r.fBuf)
+ require.Empty(t, r.hBuf)
+ require.Empty(t, r.fhBuf)
+ require.Empty(t, r.iBuf)
+
+ r.addF(fSample{st: 100, t: 11, f: 3.14})
+ it := r.iterator()
+
+ require.Equal(t, chunkenc.ValFloat, it.Next())
+ ts, f := it.At()
+ require.Equal(t, int64(11), ts)
+ require.Equal(t, 3.14, f)
+ require.Equal(t, int64(100), it.AtST())
+ require.Equal(t, chunkenc.ValNone, it.Next())
+}
+
func TestSampleRingMixed(t *testing.T) {
h1 := tsdbutil.GenerateTestHistogram(1)
h2 := tsdbutil.GenerateTestHistogram(2)
@@ -102,39 +119,43 @@ func TestSampleRingMixed(t *testing.T) {
require.Empty(t, r.iBuf)
// But then mixed adds should work as expected.
- r.addF(fSample{t: 1, f: 3.14})
- r.addH(hSample{t: 2, h: h1})
+ r.addF(fSample{st: 10, t: 11, f: 3.14})
+ r.addH(hSample{st: 20, t: 21, h: h1})
it := r.iterator()
require.Equal(t, chunkenc.ValFloat, it.Next())
ts, f := it.At()
- require.Equal(t, int64(1), ts)
+ require.Equal(t, int64(11), ts)
require.Equal(t, 3.14, f)
+ require.Equal(t, int64(10), it.AtST())
require.Equal(t, chunkenc.ValHistogram, it.Next())
var h *histogram.Histogram
ts, h = it.AtHistogram()
- require.Equal(t, int64(2), ts)
+ require.Equal(t, int64(21), ts)
require.Equal(t, h1, h)
+ require.Equal(t, int64(20), it.AtST())
require.Equal(t, chunkenc.ValNone, it.Next())
r.reset()
it = r.iterator()
require.Equal(t, chunkenc.ValNone, it.Next())
- r.addF(fSample{t: 3, f: 4.2})
- r.addH(hSample{t: 4, h: h2})
+ r.addF(fSample{st: 30, t: 31, f: 4.2})
+ r.addH(hSample{st: 40, t: 41, h: h2})
it = r.iterator()
require.Equal(t, chunkenc.ValFloat, it.Next())
ts, f = it.At()
- require.Equal(t, int64(3), ts)
+ require.Equal(t, int64(31), ts)
require.Equal(t, 4.2, f)
+ require.Equal(t, int64(30), it.AtST())
require.Equal(t, chunkenc.ValHistogram, it.Next())
ts, h = it.AtHistogram()
- require.Equal(t, int64(4), ts)
+ require.Equal(t, int64(41), ts)
require.Equal(t, h2, h)
+ require.Equal(t, int64(40), it.AtST())
require.Equal(t, chunkenc.ValNone, it.Next())
}
@@ -160,44 +181,50 @@ func TestSampleRingAtFloatHistogram(t *testing.T) {
it := r.iterator()
require.Equal(t, chunkenc.ValNone, it.Next())
- r.addFH(fhSample{t: 1, fh: fh1})
- r.addFH(fhSample{t: 2, fh: fh2})
+ r.addFH(fhSample{st: 10, t: 11, fh: fh1})
+ r.addFH(fhSample{st: 20, t: 21, fh: fh2})
it = r.iterator()
require.Equal(t, chunkenc.ValFloatHistogram, it.Next())
ts, fh = it.AtFloatHistogram(fh)
- require.Equal(t, int64(1), ts)
+ require.Equal(t, int64(11), ts)
require.Equal(t, fh1, fh)
+ require.Equal(t, int64(10), it.AtST())
require.Equal(t, chunkenc.ValFloatHistogram, it.Next())
ts, fh = it.AtFloatHistogram(fh)
- require.Equal(t, int64(2), ts)
+ require.Equal(t, int64(21), ts)
require.Equal(t, fh2, fh)
+ require.Equal(t, int64(20), it.AtST())
require.Equal(t, chunkenc.ValNone, it.Next())
r.reset()
it = r.iterator()
require.Equal(t, chunkenc.ValNone, it.Next())
- r.addH(hSample{t: 3, h: h1})
- r.addH(hSample{t: 4, h: h2})
+ r.addH(hSample{st: 30, t: 31, h: h1})
+ r.addH(hSample{st: 40, t: 41, h: h2})
it = r.iterator()
require.Equal(t, chunkenc.ValHistogram, it.Next())
ts, h = it.AtHistogram()
- require.Equal(t, int64(3), ts)
+ require.Equal(t, int64(31), ts)
require.Equal(t, h1, h)
+ require.Equal(t, int64(30), it.AtST())
ts, fh = it.AtFloatHistogram(fh)
- require.Equal(t, int64(3), ts)
+ require.Equal(t, int64(31), ts)
require.Equal(t, h1.ToFloat(nil), fh)
+ require.Equal(t, int64(30), it.AtST())
require.Equal(t, chunkenc.ValHistogram, it.Next())
ts, h = it.AtHistogram()
- require.Equal(t, int64(4), ts)
+ require.Equal(t, int64(41), ts)
require.Equal(t, h2, h)
+ require.Equal(t, int64(40), it.AtST())
ts, fh = it.AtFloatHistogram(fh)
- require.Equal(t, int64(4), ts)
+ require.Equal(t, int64(41), ts)
require.Equal(t, h2.ToFloat(nil), fh)
+ require.Equal(t, int64(40), it.AtST())
require.Equal(t, chunkenc.ValNone, it.Next())
}
@@ -209,59 +236,63 @@ func TestBufferedSeriesIterator(t *testing.T) {
bit := it.Buffer()
for bit.Next() == chunkenc.ValFloat {
t, f := bit.At()
- b = append(b, fSample{t: t, f: f})
+ st := bit.AtST()
+ b = append(b, fSample{st: st, t: t, f: f})
}
require.Equal(t, exp, b, "buffer mismatch")
}
- sampleEq := func(ets int64, ev float64) {
+ sampleEq := func(est, ets int64, ev float64) {
ts, v := it.At()
+ st := it.AtST()
+ require.Equal(t, est, st, "start timestamp mismatch")
require.Equal(t, ets, ts, "timestamp mismatch")
require.Equal(t, ev, v, "value mismatch")
}
- prevSampleEq := func(ets int64, ev float64, eok bool) {
+ prevSampleEq := func(est, ets int64, ev float64, eok bool) {
s, ok := it.PeekBack(1)
require.Equal(t, eok, ok, "exist mismatch")
+ require.Equal(t, est, s.ST(), "start timestamp mismatch")
require.Equal(t, ets, s.T(), "timestamp mismatch")
require.Equal(t, ev, s.F(), "value mismatch")
}
it = NewBufferIterator(NewListSeriesIterator(samples{
- fSample{t: 1, f: 2},
- fSample{t: 2, f: 3},
- fSample{t: 3, f: 4},
- fSample{t: 4, f: 5},
- fSample{t: 5, f: 6},
- fSample{t: 99, f: 8},
- fSample{t: 100, f: 9},
- fSample{t: 101, f: 10},
+ fSample{st: -1, t: 1, f: 2},
+ fSample{st: 1, t: 2, f: 3},
+ fSample{st: 2, t: 3, f: 4},
+ fSample{st: 3, t: 4, f: 5},
+ fSample{st: 3, t: 5, f: 6},
+ fSample{st: 50, t: 99, f: 8},
+ fSample{st: 99, t: 100, f: 9},
+ fSample{st: 100, t: 101, f: 10},
}), 2)
require.Equal(t, chunkenc.ValFloat, it.Seek(-123), "seek failed")
- sampleEq(1, 2)
- prevSampleEq(0, 0, false)
+ sampleEq(-1, 1, 2)
+ prevSampleEq(0, 0, 0, false)
bufferEq(nil)
require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed")
- sampleEq(2, 3)
- prevSampleEq(1, 2, true)
- bufferEq([]fSample{{t: 1, f: 2}})
+ sampleEq(1, 2, 3)
+ prevSampleEq(-1, 1, 2, true)
+ bufferEq([]fSample{{st: -1, t: 1, f: 2}})
require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed")
require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed")
require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed")
- sampleEq(5, 6)
- prevSampleEq(4, 5, true)
- bufferEq([]fSample{{t: 2, f: 3}, {t: 3, f: 4}, {t: 4, f: 5}})
+ sampleEq(3, 5, 6)
+ prevSampleEq(3, 4, 5, true)
+ bufferEq([]fSample{{st: 1, t: 2, f: 3}, {st: 2, t: 3, f: 4}, {st: 3, t: 4, f: 5}})
require.Equal(t, chunkenc.ValFloat, it.Seek(5), "seek failed")
- sampleEq(5, 6)
- prevSampleEq(4, 5, true)
- bufferEq([]fSample{{t: 2, f: 3}, {t: 3, f: 4}, {t: 4, f: 5}})
+ sampleEq(3, 5, 6)
+ prevSampleEq(3, 4, 5, true)
+ bufferEq([]fSample{{st: 1, t: 2, f: 3}, {st: 2, t: 3, f: 4}, {st: 3, t: 4, f: 5}})
require.Equal(t, chunkenc.ValFloat, it.Seek(101), "seek failed")
- sampleEq(101, 10)
- prevSampleEq(100, 9, true)
- bufferEq([]fSample{{t: 99, f: 8}, {t: 100, f: 9}})
+ sampleEq(100, 101, 10)
+ prevSampleEq(99, 100, 9, true)
+ bufferEq([]fSample{{st: 50, t: 99, f: 8}, {st: 99, t: 100, f: 9}})
require.Equal(t, chunkenc.ValNone, it.Next(), "next succeeded unexpectedly")
require.Equal(t, chunkenc.ValNone, it.Seek(1024), "seek succeeded unexpectedly")
@@ -402,6 +433,10 @@ func (*mockSeriesIterator) AtT() int64 {
return 0 // Not really mocked.
}
+func (*mockSeriesIterator) AtST() int64 {
+ return 0 // Not really mocked.
+}
+
type fakeSeriesIterator struct {
nsamples int64
step int64
@@ -428,6 +463,10 @@ func (it *fakeSeriesIterator) AtT() int64 {
return it.idx * it.step
}
+func (*fakeSeriesIterator) AtST() int64 {
+ return 0 // No start timestamps in this fake iterator.
+}
+
func (it *fakeSeriesIterator) Next() chunkenc.ValueType {
it.idx++
if it.idx >= it.nsamples {
diff --git a/storage/errors_test.go b/storage/errors_test.go
index 0e7277bf8b..706719d137 100644
--- a/storage/errors_test.go
+++ b/storage/errors_test.go
@@ -20,6 +20,24 @@ import (
"github.com/stretchr/testify/require"
)
+func TestAppendPartialErrorToError(t *testing.T) {
+ // nil receiver returns nil.
+ var nilErr *AppendPartialError
+ require.NoError(t, nilErr.ToError())
+
+ // Empty ExemplarErrors returns nil.
+ emptyErr := &AppendPartialError{}
+ require.NoError(t, emptyErr.ToError())
+
+ // Also test explicitly empty slice.
+ emptySliceErr := &AppendPartialError{ExemplarErrors: []error{}}
+ require.NoError(t, emptySliceErr.ToError())
+
+ // Non-empty ExemplarErrors returns the error.
+ nonEmptyErr := &AppendPartialError{ExemplarErrors: []error{ErrOutOfOrderExemplar}}
+ require.ErrorIs(t, nonEmptyErr.ToError(), nonEmptyErr)
+}
+
func TestErrDuplicateSampleForTimestamp(t *testing.T) {
// All errDuplicateSampleForTimestamp are ErrDuplicateSampleForTimestamp
require.ErrorIs(t, ErrDuplicateSampleForTimestamp, errDuplicateSampleForTimestamp{})
diff --git a/storage/fanout.go b/storage/fanout.go
index afcf993b3f..21f5f715e4 100644
--- a/storage/fanout.go
+++ b/storage/fanout.go
@@ -136,6 +136,19 @@ func (f *fanout) Appender(ctx context.Context) Appender {
}
}
+func (f *fanout) AppenderV2(ctx context.Context) AppenderV2 {
+ primary := f.primary.AppenderV2(ctx)
+ secondaries := make([]AppenderV2, 0, len(f.secondaries))
+ for _, storage := range f.secondaries {
+ secondaries = append(secondaries, storage.AppenderV2(ctx))
+ }
+ return &fanoutAppenderV2{
+ logger: f.logger,
+ primary: primary,
+ secondaries: secondaries,
+ }
+}
+
// Close closes the storage and all its underlying resources.
func (f *fanout) Close() error {
errs := []error{
@@ -276,5 +289,61 @@ func (f *fanoutAppender) Rollback() (err error) {
f.logger.Error("Squashed rollback error on rollback", "err", rollbackErr)
}
}
- return nil
+ return err
+}
+
+type fanoutAppenderV2 struct {
+ logger *slog.Logger
+
+ primary AppenderV2
+ secondaries []AppenderV2
+}
+
+func (f *fanoutAppenderV2) Append(ref SeriesRef, l labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts AOptions) (SeriesRef, error) {
+ var partialErr *AppendPartialError
+
+ ref, err := f.primary.Append(ref, l, st, t, v, h, fh, opts)
+ partialErr, err = partialErr.Handle(err)
+ if err != nil {
+ return ref, err
+ }
+
+ for _, appender := range f.secondaries {
+ _, serr := appender.Append(ref, l, st, t, v, h, fh, opts)
+ partialErr, serr = partialErr.Handle(serr)
+ if serr != nil {
+ return ref, serr
+ }
+ }
+ return ref, partialErr.ToError()
+}
+
+func (f *fanoutAppenderV2) Commit() (err error) {
+ err = f.primary.Commit()
+
+ for _, appender := range f.secondaries {
+ if err == nil {
+ err = appender.Commit()
+ } else {
+ if rollbackErr := appender.Rollback(); rollbackErr != nil {
+ f.logger.Error("Squashed rollback error on commit", "err", rollbackErr)
+ }
+ }
+ }
+ return err
+}
+
+func (f *fanoutAppenderV2) Rollback() (err error) {
+ err = f.primary.Rollback()
+
+ for _, appender := range f.secondaries {
+ rollbackErr := appender.Rollback()
+ switch {
+ case err == nil:
+ err = rollbackErr
+ case rollbackErr != nil:
+ f.logger.Error("Squashed rollback error on rollback", "err", rollbackErr)
+ }
+ }
+ return err
}
diff --git a/storage/fanout_test.go b/storage/fanout_test.go
index ed4cf17696..027511aa3a 100644
--- a/storage/fanout_test.go
+++ b/storage/fanout_test.go
@@ -16,16 +16,20 @@ package storage_test
import (
"context"
"errors"
+ "strconv"
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
+ "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
+ "github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/teststorage"
+ "github.com/prometheus/prometheus/util/testutil"
)
func TestFanout_SelectSorted(t *testing.T) {
@@ -36,7 +40,6 @@ func TestFanout_SelectSorted(t *testing.T) {
ctx := context.Background()
priStorage := teststorage.New(t)
- defer priStorage.Close()
app1 := priStorage.Appender(ctx)
app1.Append(0, inputLabel, 0, 0)
inputTotalSize++
@@ -48,7 +51,6 @@ func TestFanout_SelectSorted(t *testing.T) {
require.NoError(t, err)
remoteStorage1 := teststorage.New(t)
- defer remoteStorage1.Close()
app2 := remoteStorage1.Appender(ctx)
app2.Append(0, inputLabel, 3000, 3)
inputTotalSize++
@@ -60,7 +62,6 @@ func TestFanout_SelectSorted(t *testing.T) {
require.NoError(t, err)
remoteStorage2 := teststorage.New(t)
- defer remoteStorage2.Close()
app3 := remoteStorage2.Appender(ctx)
app3.Append(0, inputLabel, 6000, 6)
@@ -132,9 +133,113 @@ func TestFanout_SelectSorted(t *testing.T) {
})
}
+func TestFanout_SelectSorted_AppenderV2(t *testing.T) {
+ inputLabel := labels.FromStrings(model.MetricNameLabel, "a")
+ outputLabel := labels.FromStrings(model.MetricNameLabel, "a")
+
+ inputTotalSize := 0
+
+ priStorage := teststorage.New(t)
+ app1 := priStorage.AppenderV2(t.Context())
+ _, err := app1.Append(0, inputLabel, 0, 0, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ inputTotalSize++
+ _, err = app1.Append(0, inputLabel, 0, 1000, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ inputTotalSize++
+ _, err = app1.Append(0, inputLabel, 0, 2000, 2, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ inputTotalSize++
+ require.NoError(t, app1.Commit())
+
+ remoteStorage1 := teststorage.New(t)
+ app2 := remoteStorage1.AppenderV2(t.Context())
+ _, err = app2.Append(0, inputLabel, 0, 3000, 3, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ inputTotalSize++
+ _, err = app2.Append(0, inputLabel, 0, 4000, 4, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ inputTotalSize++
+ _, err = app2.Append(0, inputLabel, 0, 5000, 5, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ inputTotalSize++
+ require.NoError(t, app2.Commit())
+
+ remoteStorage2 := teststorage.New(t)
+ app3 := remoteStorage2.AppenderV2(t.Context())
+ _, err = app3.Append(0, inputLabel, 0, 6000, 6, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ inputTotalSize++
+ _, err = app3.Append(0, inputLabel, 0, 7000, 7, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ inputTotalSize++
+ _, err = app3.Append(0, inputLabel, 0, 8000, 8, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ inputTotalSize++
+
+ require.NoError(t, app3.Commit())
+
+ fanoutStorage := storage.NewFanout(nil, priStorage, remoteStorage1, remoteStorage2)
+
+ t.Run("querier", func(t *testing.T) {
+ querier, err := fanoutStorage.Querier(0, 8000)
+ require.NoError(t, err)
+ defer querier.Close()
+
+ matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a")
+ require.NoError(t, err)
+
+ seriesSet := querier.Select(t.Context(), true, nil, matcher)
+
+ result := make(map[int64]float64)
+ var labelsResult labels.Labels
+ var iterator chunkenc.Iterator
+ for seriesSet.Next() {
+ series := seriesSet.At()
+ seriesLabels := series.Labels()
+ labelsResult = seriesLabels
+ iterator := series.Iterator(iterator)
+ for iterator.Next() == chunkenc.ValFloat {
+ timestamp, value := iterator.At()
+ result[timestamp] = value
+ }
+ }
+
+ require.Equal(t, labelsResult, outputLabel)
+ require.Len(t, result, inputTotalSize)
+ })
+ t.Run("chunk querier", func(t *testing.T) {
+ querier, err := fanoutStorage.ChunkQuerier(0, 8000)
+ require.NoError(t, err)
+ defer querier.Close()
+
+ matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a")
+ require.NoError(t, err)
+
+ seriesSet := storage.NewSeriesSetFromChunkSeriesSet(querier.Select(t.Context(), true, nil, matcher))
+
+ result := make(map[int64]float64)
+ var labelsResult labels.Labels
+ var iterator chunkenc.Iterator
+ for seriesSet.Next() {
+ series := seriesSet.At()
+ seriesLabels := series.Labels()
+ labelsResult = seriesLabels
+ iterator := series.Iterator(iterator)
+ for iterator.Next() == chunkenc.ValFloat {
+ timestamp, value := iterator.At()
+ result[timestamp] = value
+ }
+ }
+
+ require.NoError(t, seriesSet.Err())
+ require.Equal(t, labelsResult, outputLabel)
+ require.Len(t, result, inputTotalSize)
+ })
+}
+
func TestFanoutErrors(t *testing.T) {
workingStorage := teststorage.New(t)
- defer workingStorage.Close()
cases := []struct {
primary storage.Storage
@@ -224,9 +329,10 @@ type errChunkQuerier struct{ errQuerier }
func (errStorage) ChunkQuerier(_, _ int64) (storage.ChunkQuerier, error) {
return errChunkQuerier{}, nil
}
-func (errStorage) Appender(context.Context) storage.Appender { return nil }
-func (errStorage) StartTime() (int64, error) { return 0, nil }
-func (errStorage) Close() error { return nil }
+func (errStorage) Appender(context.Context) storage.Appender { return nil }
+func (errStorage) AppenderV2(context.Context) storage.AppenderV2 { return nil }
+func (errStorage) StartTime() (int64, error) { return 0, nil }
+func (errStorage) Close() error { return nil }
func (errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet {
return storage.ErrSeriesSet(errSelect)
@@ -245,3 +351,254 @@ func (errQuerier) Close() error { return nil }
func (errChunkQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels.Matcher) storage.ChunkSeriesSet {
return storage.ErrChunkSeriesSet(errSelect)
}
+
+type mockStorage struct {
+ app storage.Appendable
+ appV2 storage.AppendableV2
+ storage.Storage
+}
+
+func (m mockStorage) Appender(ctx context.Context) storage.Appender {
+ return m.app.Appender(ctx)
+}
+
+func (m mockStorage) AppenderV2(ctx context.Context) storage.AppenderV2 {
+ return m.appV2.AppenderV2(ctx)
+}
+
+type sample = teststorage.Sample
+
+func withoutExemplars(s []sample) (ret []sample) {
+ ret = make([]sample, len(s))
+ copy(ret, s)
+ for i := range ret {
+ ret[i].ES = nil
+ }
+ return ret
+}
+
+type fanoutAppenderTestCase struct {
+ name string
+ primary *teststorage.Appendable
+ secondary *teststorage.Appendable
+
+ expectAppendErr bool
+ expectExemplarError bool
+ expectCommitError bool
+
+ expectPrimarySamples []sample
+ expectSecondarySamples []sample
+}
+
+func fanoutAppenderTestCases(expected []sample) []fanoutAppenderTestCase {
+ appErr := errors.New("append test error")
+ exErr := errors.New("exemplar test error")
+ commitErr := errors.New("commit test error")
+
+ return []fanoutAppenderTestCase{
+ {
+ name: "both works",
+ primary: teststorage.NewAppendable(),
+ secondary: teststorage.NewAppendable(),
+
+ expectPrimarySamples: expected,
+ expectSecondarySamples: expected,
+ },
+ {
+ name: "primary errors",
+ primary: teststorage.NewAppendable().WithErrs(func(labels.Labels) error { return appErr }, exErr, commitErr),
+ secondary: teststorage.NewAppendable(),
+
+ expectAppendErr: true,
+ expectExemplarError: true,
+ expectCommitError: true,
+ },
+ {
+ name: "exemplar errors",
+ primary: teststorage.NewAppendable().WithErrs(func(labels.Labels) error { return nil }, exErr, nil),
+ secondary: teststorage.NewAppendable().WithErrs(func(labels.Labels) error { return nil }, exErr, nil),
+
+ expectAppendErr: false,
+ expectExemplarError: true,
+ expectCommitError: false,
+
+ expectPrimarySamples: withoutExemplars(expected),
+ expectSecondarySamples: withoutExemplars(expected),
+ },
+ {
+ name: "secondary errors",
+ primary: teststorage.NewAppendable(),
+ secondary: teststorage.NewAppendable().WithErrs(func(labels.Labels) error { return appErr }, exErr, commitErr),
+
+ expectAppendErr: true,
+ expectExemplarError: true,
+ expectCommitError: true,
+
+ expectPrimarySamples: expected,
+ },
+ }
+}
+
+func TestFanoutAppender(t *testing.T) {
+ h := tsdbutil.GenerateTestHistogram(0)
+ fh := tsdbutil.GenerateTestFloatHistogram(0)
+ ex := exemplar.Exemplar{Value: 1}
+
+ expected := []sample{
+ {L: labels.FromStrings(model.MetricNameLabel, "metric1"), V: 1, ES: []exemplar.Exemplar{ex}},
+ {L: labels.FromStrings(model.MetricNameLabel, "metric2"), T: 1, H: h},
+ {L: labels.FromStrings(model.MetricNameLabel, "metric3"), T: 2, FH: fh},
+ }
+ for _, tt := range fanoutAppenderTestCases(expected) {
+ t.Run(tt.name, func(t *testing.T) {
+ f := storage.NewFanout(nil, mockStorage{app: tt.primary}, mockStorage{app: tt.secondary})
+
+ app := f.Appender(t.Context())
+ ref, err := app.Append(0, labels.FromStrings(model.MetricNameLabel, "metric1"), 0, 1)
+ if tt.expectAppendErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+
+ _, err = app.AppendExemplar(ref, labels.FromStrings(model.MetricNameLabel, "metric1"), ex)
+ if tt.expectExemplarError {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+
+ _, err = app.AppendHistogram(0, labels.FromStrings(model.MetricNameLabel, "metric2"), 1, h, nil)
+ if tt.expectAppendErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+
+ _, err = app.AppendHistogram(0, labels.FromStrings(model.MetricNameLabel, "metric3"), 2, nil, fh)
+ if tt.expectAppendErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+
+ err = app.Commit()
+ if tt.expectCommitError {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+
+ require.Nil(t, tt.primary.PendingSamples())
+ testutil.RequireEqual(t, tt.expectPrimarySamples, tt.primary.ResultSamples())
+ require.Nil(t, tt.primary.RolledbackSamples())
+
+ require.Nil(t, tt.secondary.PendingSamples())
+ testutil.RequireEqual(t, tt.expectSecondarySamples, tt.secondary.ResultSamples())
+ require.Nil(t, tt.secondary.RolledbackSamples())
+ })
+ }
+}
+
+func TestFanoutAppenderV2(t *testing.T) {
+ h := tsdbutil.GenerateTestHistogram(0)
+ fh := tsdbutil.GenerateTestFloatHistogram(0)
+ ex := exemplar.Exemplar{Value: 1}
+
+ expected := []sample{
+ {L: labels.FromStrings(model.MetricNameLabel, "metric1"), ST: -1, V: 1, ES: []exemplar.Exemplar{ex}},
+ {L: labels.FromStrings(model.MetricNameLabel, "metric2"), ST: -2, T: 1, H: h},
+ {L: labels.FromStrings(model.MetricNameLabel, "metric3"), ST: -3, T: 2, FH: fh},
+ }
+
+ for _, tt := range fanoutAppenderTestCases(expected) {
+ t.Run(tt.name, func(t *testing.T) {
+ f := storage.NewFanout(nil, mockStorage{appV2: tt.primary}, mockStorage{appV2: tt.secondary})
+
+ app := f.AppenderV2(t.Context())
+ _, err := app.Append(0, labels.FromStrings(model.MetricNameLabel, "metric1"), -1, 0, 1, nil, nil, storage.AOptions{
+ Exemplars: []exemplar.Exemplar{ex},
+ })
+ switch {
+ case tt.expectAppendErr:
+ require.Error(t, err)
+ case tt.expectExemplarError:
+ var pErr *storage.AppendPartialError
+ require.ErrorAs(t, err, &pErr)
+ // One for primary, one for secondary.
+ // This is because in V2 flow we must append sample even when first append partially failed with exemplars.
+ // Filtering out exemplars is neither feasible, nor important.
+ require.Len(t, pErr.ExemplarErrors, 2)
+ default:
+ require.NoError(t, err)
+ }
+
+ _, err = app.Append(0, labels.FromStrings(model.MetricNameLabel, "metric2"), -2, 1, 0, h, nil, storage.AOptions{})
+ if tt.expectAppendErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+
+ _, err = app.Append(0, labels.FromStrings(model.MetricNameLabel, "metric3"), -3, 2, 0, nil, fh, storage.AOptions{})
+ if tt.expectAppendErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+
+ err = app.Commit()
+ if tt.expectCommitError {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+
+ require.Nil(t, tt.primary.PendingSamples())
+ testutil.RequireEqual(t, tt.expectPrimarySamples, tt.primary.ResultSamples())
+ require.Nil(t, tt.primary.RolledbackSamples())
+
+ require.Nil(t, tt.secondary.PendingSamples())
+ testutil.RequireEqual(t, tt.expectSecondarySamples, tt.secondary.ResultSamples())
+ require.Nil(t, tt.secondary.RolledbackSamples())
+ })
+ }
+}
+
+// Recommended CLI invocation:
+/*
+ export bench=fanoutAppender && go test ./storage/... \
+ -run '^$' -bench '^BenchmarkFanoutAppenderV2' \
+ -benchtime 2s -count 6 -cpu 2 -timeout 999m \
+ | tee ${bench}.txt
+*/
+func BenchmarkFanoutAppenderV2(b *testing.B) {
+ ex := []exemplar.Exemplar{{Value: 1}}
+
+ var series []labels.Labels
+ for i := range 1000 {
+ series = append(series, labels.FromStrings(model.MetricNameLabel, "metric1", "i", strconv.Itoa(i)))
+ }
+ for _, tt := range fanoutAppenderTestCases(nil) {
+ // Turn our mock appender into ~noop for no allocs.
+ tt.primary.SkipRecording(true)
+ tt.secondary.SkipRecording(true)
+
+ b.Run(tt.name, func(b *testing.B) {
+ f := storage.NewFanout(nil, mockStorage{appV2: tt.primary}, mockStorage{appV2: tt.secondary})
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for b.Loop() {
+ app := f.AppenderV2(b.Context())
+ for _, s := range series {
+ // Purposefully skip errors as we want to benchmark error cases too (majority of the fanout logic).
+ _, _ = app.Append(0, s, 0, 0, 1, nil, nil, storage.AOptions{
+ Exemplars: ex,
+ })
+ }
+ require.NoError(b, app.Rollback())
+ }
+ })
+ }
+}
diff --git a/storage/interface.go b/storage/interface.go
index 23b8b48a0c..d15ba547c8 100644
--- a/storage/interface.go
+++ b/storage/interface.go
@@ -61,7 +61,8 @@ type SeriesRef uint64
// Appendable allows creating Appender.
//
-// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
+// WARNING(bwplotka): Switch to AppendableV2 is in progress (https://github.com/prometheus/prometheus/issues/17632).
+// Appendable will be removed soon (ETA: Q2 2026).
type Appendable interface {
// Appender returns a new appender for the storage.
//
@@ -77,10 +78,16 @@ type SampleAndChunkQueryable interface {
}
// Storage ingests and manages samples, along with various indexes. All methods
-// are goroutine-safe. Storage implements storage.Appender.
+// are goroutine-safe.
type Storage interface {
SampleAndChunkQueryable
+
+ // Appendable allows appending to storage.
+ // WARNING(bwplotka): Switch to AppendableV2 is in progress (https://github.com/prometheus/prometheus/issues/17632).
+ // Appendable will be removed soon (ETA: Q2 2026).
Appendable
+ // AppendableV2 allows appending to storage.
+ AppendableV2
// StartTime returns the oldest timestamp stored in the storage.
StartTime() (int64, error)
@@ -261,7 +268,8 @@ func (f QueryableFunc) Querier(mint, maxt int64) (Querier, error) {
// AppendOptions provides options for implementations of the Appender interface.
//
-// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
+// WARNING(bwplotka): Switch to AppendableV2 is in progress (https://github.com/prometheus/prometheus/issues/17632).
+// AppendOptions will be removed soon (ETA: Q2 2026).
type AppendOptions struct {
// DiscardOutOfOrder tells implementation that this append should not be out
// of order. An OOO append MUST be rejected with storage.ErrOutOfOrderSample
@@ -278,7 +286,8 @@ type AppendOptions struct {
// I.e. timestamp order within batch is not validated, samples are not reordered per timestamp or by float/histogram
// type.
//
-// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
+// WARNING(bwplotka): Switch to AppendableV2 is in progress (https://github.com/prometheus/prometheus/issues/17632).
+// Appender will be removed soon (ETA: Q2 2026).
type Appender interface {
AppenderTransaction
@@ -315,7 +324,8 @@ type GetRef interface {
// ExemplarAppender provides an interface for adding samples to exemplar storage, which
// within Prometheus is in-memory only.
//
-// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
+// WARNING(bwplotka): Switch to AppendableV2 is in progress (https://github.com/prometheus/prometheus/issues/17632).
+// ExemplarAppender will be removed soon (ETA: Q2 2026).
type ExemplarAppender interface {
// AppendExemplar adds an exemplar for the given series labels.
// An optional reference number can be provided to accelerate calls.
@@ -333,7 +343,8 @@ type ExemplarAppender interface {
// HistogramAppender provides an interface for appending histograms to the storage.
//
-// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
+// WARNING(bwplotka): Switch to AppendableV2 is in progress (https://github.com/prometheus/prometheus/issues/17632).
+// HistogramAppender will be removed soon (ETA: Q2 2026).
type HistogramAppender interface {
// AppendHistogram adds a histogram for the given series labels. An
// optional reference number can be provided to accelerate calls. A
@@ -365,7 +376,8 @@ type HistogramAppender interface {
// MetadataUpdater provides an interface for associating metadata to stored series.
//
-// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
+// WARNING(bwplotka): Switch to AppendableV2 is in progress (https://github.com/prometheus/prometheus/issues/17632).
+// MetadataUpdater will be removed soon (ETA: Q2 2026).
type MetadataUpdater interface {
// UpdateMetadata updates a metadata entry for the given series and labels.
// A series reference number is returned which can be used to modify the
@@ -379,7 +391,8 @@ type MetadataUpdater interface {
// StartTimestampAppender provides an interface for appending ST to storage.
//
-// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
+// WARNING(bwplotka): Switch to AppendableV2 is in progress (https://github.com/prometheus/prometheus/issues/17632).
+// StartTimestampAppender will be removed soon (ETA: Q2 2026).
type StartTimestampAppender interface {
// AppendSTZeroSample adds synthetic zero sample for the given st timestamp,
// which will be associated with given series, labels and the incoming
@@ -473,9 +486,10 @@ type Series interface {
}
type mockSeries struct {
- timestamps []int64
- values []float64
- labelSet []string
+ startTimestamps []int64
+ timestamps []int64
+ values []float64
+ labelSet []string
}
func (s mockSeries) Labels() labels.Labels {
@@ -483,15 +497,19 @@ func (s mockSeries) Labels() labels.Labels {
}
func (s mockSeries) Iterator(chunkenc.Iterator) chunkenc.Iterator {
- return chunkenc.MockSeriesIterator(s.timestamps, s.values)
+ return chunkenc.MockSeriesIterator(s.startTimestamps, s.timestamps, s.values)
}
-// MockSeries returns a series with custom timestamps, values and labelSet.
-func MockSeries(timestamps []int64, values []float64, labelSet []string) Series {
+// MockSeries returns a series with custom start timestamp, timestamps, values,
+// and labelSet.
+// Start timestamps is optional, pass nil or empty slice to indicate no start
+// timestamps.
+func MockSeries(startTimestamps, timestamps []int64, values []float64, labelSet []string) Series {
return mockSeries{
- timestamps: timestamps,
- values: values,
- labelSet: labelSet,
+ startTimestamps: startTimestamps,
+ timestamps: timestamps,
+ values: values,
+ labelSet: labelSet,
}
}
diff --git a/storage/interface_append.go b/storage/interface_append.go
index cc7045dbd5..3753544eb0 100644
--- a/storage/interface_append.go
+++ b/storage/interface_append.go
@@ -69,6 +69,7 @@ type AppendV2Options struct {
// Exemplars (optional) attached to the appended sample.
// Exemplar slice MUST be sorted by Exemplar.TS.
// Exemplar slice is unsafe for reuse.
+ // Duplicate exemplars errors MUST be ignored by implementations.
Exemplars []exemplar.Exemplar
// RejectOutOfOrder tells implementation that this append should not be out
@@ -89,6 +90,10 @@ type AppendPartialError struct {
// Error returns combined error string.
func (e *AppendPartialError) Error() string {
+ if e == nil {
+ return ""
+ }
+
errs := errors.Join(e.ExemplarErrors...)
if errs == nil {
return ""
@@ -96,6 +101,48 @@ func (e *AppendPartialError) Error() string {
return errs.Error()
}
+// ToError returns AppendPartialError as error, returning nil
+// if there are no errors.
+func (e *AppendPartialError) ToError() error {
+ if e == nil || len(e.ExemplarErrors) == 0 {
+ return nil
+ }
+ return e
+}
+
+// Is implements method that's expected by errors.Is.
+func (*AppendPartialError) Is(target error) bool {
+ // This does not need to handle wrapped errors as AppendPartialError.Is should be used
+ // via errors.Is.
+ _, ok := target.(*AppendPartialError)
+ return ok
+}
+
+// Handle handles the given err that may be an AppendPartialError.
+// If the err is nil or not an AppendPartialError it returns err.
+// Otherwise, partial errors are aggregated.
+func (e *AppendPartialError) Handle(err error) (*AppendPartialError, error) {
+ if err == nil {
+ return e, nil
+ }
+
+ // Fast, alloc-free path first for non-partial error cases.
+ if !errors.Is(err, e) {
+ return e, err
+ }
+ var pErr *AppendPartialError
+ if !errors.As(err, &pErr) {
+ return e, err
+ }
+
+ if e == nil {
+ // Lazy allocation.
+ e = &AppendPartialError{}
+ }
+ e.ExemplarErrors = append(e.ExemplarErrors, pErr.ExemplarErrors...)
+ return e, nil
+}
+
var _ error = &AppendPartialError{}
// AppenderV2 provides appends against a storage for all types of samples.
@@ -159,6 +206,8 @@ type AppenderTransaction interface {
// This is to support migration to AppenderV2.
// TODO(bwplotka): Remove once migration to AppenderV2 is fully complete.
type LimitedAppenderV1 interface {
+ AppenderTransaction
+
Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error)
AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error)
}
diff --git a/storage/interface_test.go b/storage/interface_test.go
index d28e5177e3..3ea4b757e7 100644
--- a/storage/interface_test.go
+++ b/storage/interface_test.go
@@ -23,7 +23,7 @@ import (
)
func TestMockSeries(t *testing.T) {
- s := storage.MockSeries([]int64{1, 2, 3}, []float64{1, 2, 3}, []string{"__name__", "foo"})
+ s := storage.MockSeries(nil, []int64{1, 2, 3}, []float64{1, 2, 3}, []string{"__name__", "foo"})
it := s.Iterator(nil)
ts := []int64{}
vs := []float64{}
@@ -35,3 +35,20 @@ func TestMockSeries(t *testing.T) {
require.Equal(t, []int64{1, 2, 3}, ts)
require.Equal(t, []float64{1, 2, 3}, vs)
}
+
+func TestMockSeriesWithST(t *testing.T) {
+ s := storage.MockSeries([]int64{0, 1, 2}, []int64{1, 2, 3}, []float64{1, 2, 3}, []string{"__name__", "foo"})
+ it := s.Iterator(nil)
+ ts := []int64{}
+ vs := []float64{}
+ st := []int64{}
+ for it.Next() == chunkenc.ValFloat {
+ t, v := it.At()
+ ts = append(ts, t)
+ vs = append(vs, v)
+ st = append(st, it.AtST())
+ }
+ require.Equal(t, []int64{1, 2, 3}, ts)
+ require.Equal(t, []float64{1, 2, 3}, vs)
+ require.Equal(t, []int64{0, 1, 2}, st)
+}
diff --git a/storage/merge.go b/storage/merge.go
index 12d6d3ac0d..76bf0994e0 100644
--- a/storage/merge.go
+++ b/storage/merge.go
@@ -599,6 +599,13 @@ func (c *chainSampleIterator) AtT() int64 {
return c.curr.AtT()
}
+func (c *chainSampleIterator) AtST() int64 {
+ if c.curr == nil {
+ panic("chainSampleIterator.AtST called before first .Next or after .Next returned false.")
+ }
+ return c.curr.AtST()
+}
+
func (c *chainSampleIterator) Next() chunkenc.ValueType {
var (
currT int64
diff --git a/storage/merge_test.go b/storage/merge_test.go
index 6e2daaeb3a..e42a6a4ce1 100644
--- a/storage/merge_test.go
+++ b/storage/merge_test.go
@@ -66,116 +66,116 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
{
name: "one querier, two series",
querierSeries: [][]Series{{
- NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
+ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}, fSample{0, 2, 2}}),
}},
expected: NewMockSeriesSet(
- NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
+ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}, fSample{0, 2, 2}}),
),
},
{
name: "two queriers, one different series each",
querierSeries: [][]Series{{
- NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
+ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}}),
}, {
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}, fSample{0, 2, 2}}),
}},
expected: NewMockSeriesSet(
- NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
+ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}, fSample{0, 2, 2}}),
),
},
{
name: "two time unsorted queriers, two series each",
querierSeries: [][]Series{{
- NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}, fSample{6, 6}}),
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
+ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 5, 5}, fSample{0, 6, 6}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}, fSample{0, 2, 2}}),
}, {
- NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}),
+ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 3, 3}, fSample{0, 4, 4}}),
}},
expected: NewMockSeriesSet(
NewListSeries(
labels.FromStrings("bar", "baz"),
- []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}},
+ []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}, fSample{0, 6, 6}},
),
NewListSeries(
labels.FromStrings("foo", "bar"),
- []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}},
+ []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 4, 4}},
),
),
},
{
name: "five queriers, only two queriers have two time unsorted series each",
querierSeries: [][]Series{{}, {}, {
- NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}, fSample{6, 6}}),
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
+ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 5, 5}, fSample{0, 6, 6}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}, fSample{0, 2, 2}}),
}, {
- NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}),
+ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 3, 3}, fSample{0, 4, 4}}),
}, {}},
expected: NewMockSeriesSet(
NewListSeries(
labels.FromStrings("bar", "baz"),
- []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}},
+ []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}, fSample{0, 6, 6}},
),
NewListSeries(
labels.FromStrings("foo", "bar"),
- []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}},
+ []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 4, 4}},
),
),
},
{
name: "two queriers, only two queriers have two time unsorted series each, with 3 noop and one nil querier together",
querierSeries: [][]Series{{}, {}, {
- NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}, fSample{6, 6}}),
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
+ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 5, 5}, fSample{0, 6, 6}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}, fSample{0, 2, 2}}),
}, {
- NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}),
+ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 3, 3}, fSample{0, 4, 4}}),
}, {}},
extraQueriers: []Querier{NoopQuerier(), NoopQuerier(), nil, NoopQuerier()},
expected: NewMockSeriesSet(
NewListSeries(
labels.FromStrings("bar", "baz"),
- []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}},
+ []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}, fSample{0, 6, 6}},
),
NewListSeries(
labels.FromStrings("foo", "bar"),
- []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}},
+ []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 4, 4}},
),
),
},
{
name: "two queriers, with two series, one is overlapping",
querierSeries: [][]Series{{}, {}, {
- NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}}),
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
+ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 2, 21}, fSample{0, 3, 31}, fSample{0, 5, 5}, fSample{0, 6, 6}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}, fSample{0, 2, 2}}),
}, {
- NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 22}, fSample{3, 32}}),
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}),
+ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 22}, fSample{0, 3, 32}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 3, 3}, fSample{0, 4, 4}}),
}, {}},
expected: NewMockSeriesSet(
NewListSeries(
labels.FromStrings("bar", "baz"),
- []chunks.Sample{fSample{1, 1}, fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}},
+ []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 21}, fSample{0, 3, 31}, fSample{0, 5, 5}, fSample{0, 6, 6}},
),
NewListSeries(
labels.FromStrings("foo", "bar"),
- []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}},
+ []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 4, 4}},
),
),
},
{
name: "two queries, one with NaN samples series",
querierSeries: [][]Series{{
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, math.NaN()}}),
}, {
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{1, 1}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 1, 1}}),
}},
expected: NewMockSeriesSet(
- NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}, fSample{1, 1}}),
+ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, math.NaN()}, fSample{0, 1, 1}}),
),
},
} {
@@ -249,108 +249,108 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
{
name: "one querier, two series",
chkQuerierSeries: [][]ChunkSeries{{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}}, []chunks.Sample{fSample{0, 2, 2}}),
}},
expected: NewMockChunkSeriesSet(
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}}, []chunks.Sample{fSample{0, 2, 2}}),
),
},
{
name: "two secondaries, one different series each",
chkQuerierSeries: [][]ChunkSeries{{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}}),
}, {
- NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}}, []chunks.Sample{fSample{0, 2, 2}}),
}},
expected: NewMockChunkSeriesSet(
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}}, []chunks.Sample{fSample{0, 2, 2}}),
),
},
{
name: "two secondaries, two not in time order series each",
chkQuerierSeries: [][]ChunkSeries{{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 5, 5}}, []chunks.Sample{fSample{0, 6, 6}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}}, []chunks.Sample{fSample{0, 2, 2}}),
}, {
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 3, 3}}, []chunks.Sample{fSample{0, 4, 4}}),
}},
expected: NewMockChunkSeriesSet(
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
- []chunks.Sample{fSample{1, 1}, fSample{2, 2}},
- []chunks.Sample{fSample{3, 3}},
- []chunks.Sample{fSample{5, 5}},
- []chunks.Sample{fSample{6, 6}},
+ []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}},
+ []chunks.Sample{fSample{0, 3, 3}},
+ []chunks.Sample{fSample{0, 5, 5}},
+ []chunks.Sample{fSample{0, 6, 6}},
),
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
- []chunks.Sample{fSample{0, 0}, fSample{1, 1}},
- []chunks.Sample{fSample{2, 2}},
- []chunks.Sample{fSample{3, 3}},
- []chunks.Sample{fSample{4, 4}},
+ []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}},
+ []chunks.Sample{fSample{0, 2, 2}},
+ []chunks.Sample{fSample{0, 3, 3}},
+ []chunks.Sample{fSample{0, 4, 4}},
),
),
},
{
name: "five secondaries, only two have two not in time order series each",
chkQuerierSeries: [][]ChunkSeries{{}, {}, {
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 5, 5}}, []chunks.Sample{fSample{0, 6, 6}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}}, []chunks.Sample{fSample{0, 2, 2}}),
}, {
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 3, 3}}, []chunks.Sample{fSample{0, 4, 4}}),
}, {}},
expected: NewMockChunkSeriesSet(
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
- []chunks.Sample{fSample{1, 1}, fSample{2, 2}},
- []chunks.Sample{fSample{3, 3}},
- []chunks.Sample{fSample{5, 5}},
- []chunks.Sample{fSample{6, 6}},
+ []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}},
+ []chunks.Sample{fSample{0, 3, 3}},
+ []chunks.Sample{fSample{0, 5, 5}},
+ []chunks.Sample{fSample{0, 6, 6}},
),
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
- []chunks.Sample{fSample{0, 0}, fSample{1, 1}},
- []chunks.Sample{fSample{2, 2}},
- []chunks.Sample{fSample{3, 3}},
- []chunks.Sample{fSample{4, 4}},
+ []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}},
+ []chunks.Sample{fSample{0, 2, 2}},
+ []chunks.Sample{fSample{0, 3, 3}},
+ []chunks.Sample{fSample{0, 4, 4}},
),
),
},
{
name: "two secondaries, with two not in time order series each, with 3 noop queries and one nil together",
chkQuerierSeries: [][]ChunkSeries{{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 5, 5}}, []chunks.Sample{fSample{0, 6, 6}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}}, []chunks.Sample{fSample{0, 2, 2}}),
}, {
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 3, 3}}, []chunks.Sample{fSample{0, 4, 4}}),
}},
extraQueriers: []ChunkQuerier{NoopChunkedQuerier(), NoopChunkedQuerier(), nil, NoopChunkedQuerier()},
expected: NewMockChunkSeriesSet(
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
- []chunks.Sample{fSample{1, 1}, fSample{2, 2}},
- []chunks.Sample{fSample{3, 3}},
- []chunks.Sample{fSample{5, 5}},
- []chunks.Sample{fSample{6, 6}},
+ []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}},
+ []chunks.Sample{fSample{0, 3, 3}},
+ []chunks.Sample{fSample{0, 5, 5}},
+ []chunks.Sample{fSample{0, 6, 6}},
),
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
- []chunks.Sample{fSample{0, 0}, fSample{1, 1}},
- []chunks.Sample{fSample{2, 2}},
- []chunks.Sample{fSample{3, 3}},
- []chunks.Sample{fSample{4, 4}},
+ []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}},
+ []chunks.Sample{fSample{0, 2, 2}},
+ []chunks.Sample{fSample{0, 3, 3}},
+ []chunks.Sample{fSample{0, 4, 4}},
),
),
},
{
name: "two queries, one with NaN samples series",
chkQuerierSeries: [][]ChunkSeries{{
- NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, math.NaN()}}),
}, {
- NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{1, 1}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 1, 1}}),
}},
expected: NewMockChunkSeriesSet(
- NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}}, []chunks.Sample{fSample{1, 1}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0, math.NaN()}}, []chunks.Sample{fSample{0, 1, 1}}),
),
},
} {
@@ -387,13 +387,13 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
func histogramSample(ts int64, hint histogram.CounterResetHint) hSample {
h := tsdbutil.GenerateTestHistogram(ts + 1)
h.CounterResetHint = hint
- return hSample{t: ts, h: h}
+ return hSample{st: -ts, t: ts, h: h}
}
func floatHistogramSample(ts int64, hint histogram.CounterResetHint) fhSample {
fh := tsdbutil.GenerateTestFloatHistogram(ts + 1)
fh.CounterResetHint = hint
- return fhSample{t: ts, fh: fh}
+ return fhSample{st: -ts, t: ts, fh: fh}
}
// Shorthands for counter reset hints.
@@ -431,9 +431,9 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
{
name: "single series",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}}),
},
- expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
+ expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}}),
},
{
name: "two empty series",
@@ -446,55 +446,55 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
{
name: "two non overlapping",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}, fSample{0, 5, 5}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 7, 7}, fSample{0, 9, 9}}, []chunks.Sample{fSample{0, 10, 10}}),
},
- expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}, []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
+ expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}, fSample{0, 5, 5}}, []chunks.Sample{fSample{0, 7, 7}, fSample{0, 9, 9}}, []chunks.Sample{fSample{0, 10, 10}}),
},
{
name: "two overlapping",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{8, 8}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}, fSample{0, 8, 8}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 7, 7}, fSample{0, 9, 9}}, []chunks.Sample{fSample{0, 10, 10}}),
},
- expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{7, 7}, fSample{8, 8}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
+ expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}, fSample{0, 7, 7}, fSample{0, 8, 8}, fSample{0, 9, 9}}, []chunks.Sample{fSample{0, 10, 10}}),
},
{
name: "two duplicated",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}}),
},
- expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
+ expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}}),
},
{
name: "three overlapping",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{4, 4}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 6, 6}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 4, 4}}),
},
- expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 6}}),
+ expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 4, 4}, fSample{0, 5, 5}, fSample{0, 6, 6}}),
},
{
name: "three in chained overlap",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{4, 4}, fSample{6, 66}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{6, 6}, fSample{10, 10}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 4, 4}, fSample{0, 6, 66}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 6, 6}, fSample{0, 10, 10}}),
},
- expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 66}, fSample{10, 10}}),
+ expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 4, 4}, fSample{0, 5, 5}, fSample{0, 6, 66}, fSample{0, 10, 10}}),
},
{
name: "three in chained overlap complex",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{5, 5}}, []chunks.Sample{fSample{10, 10}, fSample{15, 15}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{20, 20}}, []chunks.Sample{fSample{25, 25}, fSample{30, 30}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{18, 18}, fSample{26, 26}}, []chunks.Sample{fSample{31, 31}, fSample{35, 35}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 5, 5}}, []chunks.Sample{fSample{0, 10, 10}, fSample{0, 15, 15}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 2, 2}, fSample{0, 20, 20}}, []chunks.Sample{fSample{0, 25, 25}, fSample{0, 30, 30}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 18, 18}, fSample{0, 26, 26}}, []chunks.Sample{fSample{0, 31, 31}, fSample{0, 35, 35}}),
},
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
- []chunks.Sample{fSample{0, 0}, fSample{2, 2}, fSample{5, 5}, fSample{10, 10}, fSample{15, 15}, fSample{18, 18}, fSample{20, 20}, fSample{25, 25}, fSample{26, 26}, fSample{30, 30}},
- []chunks.Sample{fSample{31, 31}, fSample{35, 35}},
+ []chunks.Sample{fSample{0, 0, 0}, fSample{0, 2, 2}, fSample{0, 5, 5}, fSample{0, 10, 10}, fSample{0, 15, 15}, fSample{0, 18, 18}, fSample{0, 20, 20}, fSample{0, 25, 25}, fSample{0, 26, 26}, fSample{0, 30, 30}},
+ []chunks.Sample{fSample{0, 31, 31}, fSample{0, 35, 35}},
),
},
{
@@ -534,13 +534,13 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
name: "histogram chunks overlapping with float chunks",
input: []ChunkSeries{
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(0), histogramSample(5)}, []chunks.Sample{histogramSample(10), histogramSample(15)}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{12, 12}}, []chunks.Sample{fSample{14, 14}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 12, 12}}, []chunks.Sample{fSample{0, 14, 14}}),
},
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
[]chunks.Sample{histogramSample(0)},
- []chunks.Sample{fSample{1, 1}},
+ []chunks.Sample{fSample{0, 1, 1}},
[]chunks.Sample{histogramSample(5), histogramSample(10)},
- []chunks.Sample{fSample{12, 12}, fSample{14, 14}},
+ []chunks.Sample{fSample{0, 12, 12}, fSample{0, 14, 14}},
[]chunks.Sample{histogramSample(15)},
),
},
@@ -560,13 +560,13 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
name: "float histogram chunks overlapping with float chunks",
input: []ChunkSeries{
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []chunks.Sample{floatHistogramSample(10), floatHistogramSample(15)}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{12, 12}}, []chunks.Sample{fSample{14, 14}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 12, 12}}, []chunks.Sample{fSample{0, 14, 14}}),
},
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
[]chunks.Sample{floatHistogramSample(0)},
- []chunks.Sample{fSample{1, 1}},
+ []chunks.Sample{fSample{0, 1, 1}},
[]chunks.Sample{floatHistogramSample(5), floatHistogramSample(10)},
- []chunks.Sample{fSample{12, 12}, fSample{14, 14}},
+ []chunks.Sample{fSample{0, 12, 12}, fSample{0, 14, 14}},
[]chunks.Sample{floatHistogramSample(15)},
),
},
@@ -736,9 +736,9 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) {
{
name: "single series",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}}),
},
- expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
+ expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}}),
},
{
name: "two empty series",
@@ -751,70 +751,70 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) {
{
name: "two non overlapping",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}, fSample{0, 5, 5}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 7, 7}, fSample{0, 9, 9}}, []chunks.Sample{fSample{0, 10, 10}}),
},
- expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}, []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
+ expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}, fSample{0, 5, 5}}, []chunks.Sample{fSample{0, 7, 7}, fSample{0, 9, 9}}, []chunks.Sample{fSample{0, 10, 10}}),
},
{
name: "two overlapping",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{8, 8}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}, fSample{0, 8, 8}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 7, 7}, fSample{0, 9, 9}}, []chunks.Sample{fSample{0, 10, 10}}),
},
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
- []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{8, 8}},
- []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}},
+ []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}}, []chunks.Sample{fSample{0, 3, 3}, fSample{0, 8, 8}},
+ []chunks.Sample{fSample{0, 7, 7}, fSample{0, 9, 9}}, []chunks.Sample{fSample{0, 10, 10}},
),
},
{
name: "two duplicated",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}}),
},
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
- []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}},
- []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}},
+ []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}},
+ []chunks.Sample{fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}},
),
},
{
name: "three overlapping",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{4, 4}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 6, 6}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 4, 4}}),
},
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
- []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}},
- []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}},
- []chunks.Sample{fSample{0, 0}, fSample{4, 4}},
+ []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}},
+ []chunks.Sample{fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 6, 6}},
+ []chunks.Sample{fSample{0, 0, 0}, fSample{0, 4, 4}},
),
},
{
name: "three in chained overlap",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{4, 4}, fSample{6, 66}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{6, 6}, fSample{10, 10}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 4, 4}, fSample{0, 6, 66}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 6, 6}, fSample{0, 10, 10}}),
},
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
- []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}},
- []chunks.Sample{fSample{4, 4}, fSample{6, 66}},
- []chunks.Sample{fSample{6, 6}, fSample{10, 10}},
+ []chunks.Sample{fSample{0, 1, 1}, fSample{0, 2, 2}, fSample{0, 3, 3}, fSample{0, 5, 5}},
+ []chunks.Sample{fSample{0, 4, 4}, fSample{0, 6, 66}},
+ []chunks.Sample{fSample{0, 6, 6}, fSample{0, 10, 10}},
),
},
{
name: "three in chained overlap complex",
input: []ChunkSeries{
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{5, 5}}, []chunks.Sample{fSample{10, 10}, fSample{15, 15}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{20, 20}}, []chunks.Sample{fSample{25, 25}, fSample{30, 30}}),
- NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{18, 18}, fSample{26, 26}}, []chunks.Sample{fSample{31, 31}, fSample{35, 35}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0, 0}, fSample{0, 5, 5}}, []chunks.Sample{fSample{0, 10, 10}, fSample{0, 15, 15}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 2, 2}, fSample{0, 20, 20}}, []chunks.Sample{fSample{0, 25, 25}, fSample{0, 30, 30}}),
+ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 18, 18}, fSample{0, 26, 26}}, []chunks.Sample{fSample{0, 31, 31}, fSample{0, 35, 35}}),
},
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
- []chunks.Sample{fSample{0, 0}, fSample{5, 5}}, []chunks.Sample{fSample{10, 10}, fSample{15, 15}},
- []chunks.Sample{fSample{2, 2}, fSample{20, 20}}, []chunks.Sample{fSample{25, 25}, fSample{30, 30}},
- []chunks.Sample{fSample{18, 18}, fSample{26, 26}}, []chunks.Sample{fSample{31, 31}, fSample{35, 35}},
+ []chunks.Sample{fSample{0, 0, 0}, fSample{0, 5, 5}}, []chunks.Sample{fSample{0, 10, 10}, fSample{0, 15, 15}},
+ []chunks.Sample{fSample{0, 2, 2}, fSample{0, 20, 20}}, []chunks.Sample{fSample{0, 25, 25}, fSample{0, 30, 30}},
+ []chunks.Sample{fSample{0, 18, 18}, fSample{0, 26, 26}}, []chunks.Sample{fSample{0, 31, 31}, fSample{0, 35, 35}},
),
},
{
@@ -1059,7 +1059,7 @@ func (*mockChunkSeriesSet) Warnings() annotations.Annotations { return nil }
func TestChainSampleIterator(t *testing.T) {
for sampleType, sampleFunc := range map[string]func(int64) chunks.Sample{
- "float": func(ts int64) chunks.Sample { return fSample{ts, float64(ts)} },
+ "float": func(ts int64) chunks.Sample { return fSample{-ts, ts, float64(ts)} },
"histogram": func(ts int64) chunks.Sample { return histogramSample(ts, uk) },
"float histogram": func(ts int64) chunks.Sample { return floatHistogramSample(ts, uk) },
} {
@@ -1176,7 +1176,7 @@ func TestChainSampleIteratorHistogramCounterResetHint(t *testing.T) {
func TestChainSampleIteratorSeek(t *testing.T) {
for sampleType, sampleFunc := range map[string]func(int64) chunks.Sample{
- "float": func(ts int64) chunks.Sample { return fSample{ts, float64(ts)} },
+ "float": func(ts int64) chunks.Sample { return fSample{-ts, ts, float64(ts)} },
"histogram": func(ts int64) chunks.Sample { return histogramSample(ts, uk) },
"float histogram": func(ts int64) chunks.Sample { return floatHistogramSample(ts, uk) },
} {
@@ -1224,13 +1224,13 @@ func TestChainSampleIteratorSeek(t *testing.T) {
switch merged.Seek(tc.seek) {
case chunkenc.ValFloat:
t, f := merged.At()
- actual = append(actual, fSample{t, f})
+ actual = append(actual, fSample{merged.AtST(), t, f})
case chunkenc.ValHistogram:
t, h := merged.AtHistogram(nil)
- actual = append(actual, hSample{t, h})
+ actual = append(actual, hSample{merged.AtST(), t, h})
case chunkenc.ValFloatHistogram:
t, fh := merged.AtFloatHistogram(nil)
- actual = append(actual, fhSample{t, fh})
+ actual = append(actual, fhSample{merged.AtST(), t, fh})
}
s, err := ExpandSamples(merged, nil)
require.NoError(t, err)
@@ -1243,7 +1243,7 @@ func TestChainSampleIteratorSeek(t *testing.T) {
func TestChainSampleIteratorSeekFailingIterator(t *testing.T) {
merged := ChainSampleIteratorFromIterators(nil, []chunkenc.Iterator{
- NewListSeriesIterator(samples{fSample{0, 0.1}, fSample{1, 1.1}, fSample{2, 2.1}}),
+ NewListSeriesIterator(samples{fSample{0, 0, 0.1}, fSample{0, 1, 1.1}, fSample{0, 2, 2.1}}),
errIterator{errors.New("something went wrong")},
})
@@ -1253,7 +1253,7 @@ func TestChainSampleIteratorSeekFailingIterator(t *testing.T) {
func TestChainSampleIteratorNextImmediatelyFailingIterator(t *testing.T) {
merged := ChainSampleIteratorFromIterators(nil, []chunkenc.Iterator{
- NewListSeriesIterator(samples{fSample{0, 0.1}, fSample{1, 1.1}, fSample{2, 2.1}}),
+ NewListSeriesIterator(samples{fSample{0, 0, 0.1}, fSample{0, 1, 1.1}, fSample{0, 2, 2.1}}),
errIterator{errors.New("something went wrong")},
})
@@ -1263,7 +1263,7 @@ func TestChainSampleIteratorNextImmediatelyFailingIterator(t *testing.T) {
// Next() does some special handling for the first iterator, so make sure it handles the first iterator returning an error too.
merged = ChainSampleIteratorFromIterators(nil, []chunkenc.Iterator{
errIterator{errors.New("something went wrong")},
- NewListSeriesIterator(samples{fSample{0, 0.1}, fSample{1, 1.1}, fSample{2, 2.1}}),
+ NewListSeriesIterator(samples{fSample{0, 0, 0.1}, fSample{0, 1, 1.1}, fSample{0, 2, 2.1}}),
})
require.Equal(t, chunkenc.ValNone, merged.Next())
@@ -1310,13 +1310,13 @@ func TestChainSampleIteratorSeekHistogramCounterResetHint(t *testing.T) {
switch merged.Seek(tc.seek) {
case chunkenc.ValFloat:
t, f := merged.At()
- actual = append(actual, fSample{t, f})
+ actual = append(actual, fSample{merged.AtST(), t, f})
case chunkenc.ValHistogram:
t, h := merged.AtHistogram(nil)
- actual = append(actual, hSample{t, h})
+ actual = append(actual, hSample{merged.AtST(), t, h})
case chunkenc.ValFloatHistogram:
t, fh := merged.AtFloatHistogram(nil)
- actual = append(actual, fhSample{t, fh})
+ actual = append(actual, fhSample{merged.AtST(), t, fh})
}
s, err := ExpandSamples(merged, nil)
require.NoError(t, err)
@@ -1716,6 +1716,10 @@ func (errIterator) AtT() int64 {
return 0
}
+func (errIterator) AtST() int64 {
+ return 0
+}
+
func (e errIterator) Err() error {
return e.err
}
diff --git a/storage/remote/codec.go b/storage/remote/codec.go
index 9f0fb7d92a..c689a51164 100644
--- a/storage/remote/codec.go
+++ b/storage/remote/codec.go
@@ -564,6 +564,12 @@ func (c *concreteSeriesIterator) AtT() int64 {
return c.series.floats[c.floatsCur].Timestamp
}
+// TODO(krajorama): implement AtST. Maybe. concreteSeriesIterator is used
+// for turning query results into an iterable, but query results do not have ST.
+func (*concreteSeriesIterator) AtST() int64 {
+ return 0
+}
+
const noTS = int64(math.MaxInt64)
// Next implements chunkenc.Iterator.
@@ -832,6 +838,11 @@ func (it *chunkedSeriesIterator) AtT() int64 {
return it.cur.AtT()
}
+// TODO(krajorama): test AtST once we have a chunk format that provides ST.
+func (it *chunkedSeriesIterator) AtST() int64 {
+ return it.cur.AtST()
+}
+
func (it *chunkedSeriesIterator) Err() error {
return it.err
}
diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go
index e6e7813c7b..5da8c8176c 100644
--- a/storage/remote/codec_test.go
+++ b/storage/remote/codec_test.go
@@ -1146,7 +1146,7 @@ func buildTestChunks(t *testing.T) []prompb.Chunk {
minTimeMs := time
for j := range numSamplesPerTestChunk {
- a.Append(time, float64(i+j))
+ a.Append(0, time, float64(i+j))
time += int64(1000)
}
diff --git a/storage/remote/googleiam/googleiam.go b/storage/remote/googleiam/googleiam.go
index 0ca7185ab7..2095ee9747 100644
--- a/storage/remote/googleiam/googleiam.go
+++ b/storage/remote/googleiam/googleiam.go
@@ -19,7 +19,6 @@ import (
"context"
"fmt"
"net/http"
- "os"
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
@@ -42,15 +41,7 @@ func NewRoundTripper(cfg *Config, next http.RoundTripper) (http.RoundTripper, er
option.WithScopes(scopes),
}
if cfg.CredentialsFile != "" {
- credBytes, err := os.ReadFile(cfg.CredentialsFile)
- if err != nil {
- return nil, fmt.Errorf("error reading Google credentials file: %w", err)
- }
- creds, err := google.CredentialsFromJSON(ctx, credBytes, scopes)
- if err != nil {
- return nil, fmt.Errorf("error parsing Google credentials file: %w", err)
- }
- opts = append(opts, option.WithCredentials(creds))
+ opts = append(opts, option.WithAuthCredentialsFile(option.ServiceAccount, cfg.CredentialsFile))
} else {
creds, err := google.FindDefaultCredentials(ctx, scopes)
if err != nil {
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go
deleted file mode 100644
index 883b8d3142..0000000000
--- a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// TODO(krajorama): rename this package to otlpappender or similar, as it is
-// not specific to Prometheus remote write anymore.
-// Note otlptranslator is already used by prometheus/otlptranslator repo.
-package prometheusremotewrite
-
-import (
- "errors"
- "fmt"
- "log/slog"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
-
- "github.com/prometheus/prometheus/model/exemplar"
- "github.com/prometheus/prometheus/model/histogram"
- "github.com/prometheus/prometheus/model/labels"
- "github.com/prometheus/prometheus/model/metadata"
- "github.com/prometheus/prometheus/storage"
-)
-
-// Metadata extends metadata.Metadata with the metric family name.
-// OTLP calculates the metric family name for all metrics and uses
-// it for generating summary, histogram series by adding the magic
-// suffixes. The metric family name is passed down to the appender
-// in case the storage needs it for metadata updates.
-// Known user is Mimir that implements /api/v1/metadata and uses
-// Remote-Write 1.0 for this. Might be removed later if no longer
-// needed by any downstream project.
-type Metadata struct {
- metadata.Metadata
- MetricFamilyName string
-}
-
-// CombinedAppender is similar to storage.Appender, but combines updates to
-// metadata, created timestamps, exemplars and samples into a single call.
-type CombinedAppender interface {
- // AppendSample appends a sample and related exemplars, metadata, and
- // created timestamp to the storage.
- AppendSample(ls labels.Labels, meta Metadata, st, t int64, v float64, es []exemplar.Exemplar) error
- // AppendHistogram appends a histogram and related exemplars, metadata, and
- // created timestamp to the storage.
- AppendHistogram(ls labels.Labels, meta Metadata, st, t int64, h *histogram.Histogram, es []exemplar.Exemplar) error
-}
-
-// CombinedAppenderMetrics is for the metrics observed by the
-// combinedAppender implementation.
-type CombinedAppenderMetrics struct {
- samplesAppendedWithoutMetadata prometheus.Counter
- outOfOrderExemplars prometheus.Counter
-}
-
-func NewCombinedAppenderMetrics(reg prometheus.Registerer) CombinedAppenderMetrics {
- return CombinedAppenderMetrics{
- samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{
- Namespace: "prometheus",
- Subsystem: "api",
- Name: "otlp_appended_samples_without_metadata_total",
- Help: "The total number of samples ingested from OTLP without corresponding metadata.",
- }),
- outOfOrderExemplars: promauto.With(reg).NewCounter(prometheus.CounterOpts{
- Namespace: "prometheus",
- Subsystem: "api",
- Name: "otlp_out_of_order_exemplars_total",
- Help: "The total number of received OTLP exemplars which were rejected because they were out of order.",
- }),
- }
-}
-
-// NewCombinedAppender creates a combined appender that sets start times and
-// updates metadata for each series only once, and appends samples and
-// exemplars for each call.
-func NewCombinedAppender(app storage.Appender, logger *slog.Logger, ingestSTZeroSample, appendMetadata bool, metrics CombinedAppenderMetrics) CombinedAppender {
- return &combinedAppender{
- app: app,
- logger: logger,
- ingestSTZeroSample: ingestSTZeroSample,
- appendMetadata: appendMetadata,
- refs: make(map[uint64]seriesRef),
- samplesAppendedWithoutMetadata: metrics.samplesAppendedWithoutMetadata,
- outOfOrderExemplars: metrics.outOfOrderExemplars,
- }
-}
-
-type seriesRef struct {
- ref storage.SeriesRef
- st int64
- ls labels.Labels
- meta metadata.Metadata
-}
-
-type combinedAppender struct {
- app storage.Appender
- logger *slog.Logger
- samplesAppendedWithoutMetadata prometheus.Counter
- outOfOrderExemplars prometheus.Counter
- ingestSTZeroSample bool
- appendMetadata bool
- // Used to ensure we only update metadata and created timestamps once, and to share storage.SeriesRefs.
- // To detect hash collision it also stores the labels.
- // There is no overflow/conflict list, the TSDB will handle that part.
- refs map[uint64]seriesRef
-}
-
-func (b *combinedAppender) AppendSample(ls labels.Labels, meta Metadata, st, t int64, v float64, es []exemplar.Exemplar) (err error) {
- return b.appendFloatOrHistogram(ls, meta.Metadata, st, t, v, nil, es)
-}
-
-func (b *combinedAppender) AppendHistogram(ls labels.Labels, meta Metadata, st, t int64, h *histogram.Histogram, es []exemplar.Exemplar) (err error) {
- if h == nil {
- // Sanity check, we should never get here with a nil histogram.
- b.logger.Error("Received nil histogram in CombinedAppender.AppendHistogram", "series", ls.String())
- return errors.New("internal error, attempted to append nil histogram")
- }
- return b.appendFloatOrHistogram(ls, meta.Metadata, st, t, 0, h, es)
-}
-
-func (b *combinedAppender) appendFloatOrHistogram(ls labels.Labels, meta metadata.Metadata, st, t int64, v float64, h *histogram.Histogram, es []exemplar.Exemplar) (err error) {
- hash := ls.Hash()
- series, exists := b.refs[hash]
- ref := series.ref
- if exists && !labels.Equal(series.ls, ls) {
- // Hash collision. The series reference we stored is pointing to a
- // different series so we cannot use it, we need to reset the
- // reference and cache.
- // Note: we don't need to keep track of conflicts here,
- // the TSDB will handle that part when we pass 0 reference.
- exists = false
- ref = 0
- }
- updateRefs := !exists || series.st != st
- if updateRefs && st != 0 && st < t && b.ingestSTZeroSample {
- var newRef storage.SeriesRef
- if h != nil {
- newRef, err = b.app.AppendHistogramSTZeroSample(ref, ls, t, st, h, nil)
- } else {
- newRef, err = b.app.AppendSTZeroSample(ref, ls, t, st)
- }
- if err != nil {
- if !errors.Is(err, storage.ErrOutOfOrderST) && !errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
- // Even for the first sample OOO is a common scenario because
- // we can't tell if a ST was already ingested in a previous request.
- // We ignore the error.
- // ErrDuplicateSampleForTimestamp is also a common scenario because
- // unknown start times in Opentelemetry are indicated by setting
- // the start time to the same as the first sample time.
- // https://opentelemetry.io/docs/specs/otel/metrics/data-model/#cumulative-streams-handling-unknown-start-time
- b.logger.Warn("Error when appending ST from OTLP", "err", err, "series", ls.String(), "start_timestamp", st, "timestamp", t, "sample_type", sampleType(h))
- }
- } else {
- // We only use the returned reference on success as otherwise an
- // error of ST append could invalidate the series reference.
- ref = newRef
- }
- }
- {
- var newRef storage.SeriesRef
- if h != nil {
- newRef, err = b.app.AppendHistogram(ref, ls, t, h, nil)
- } else {
- newRef, err = b.app.Append(ref, ls, t, v)
- }
- if err != nil {
- // Although Append does not currently return ErrDuplicateSampleForTimestamp there is
- // a note indicating its inclusion in the future.
- if errors.Is(err, storage.ErrOutOfOrderSample) ||
- errors.Is(err, storage.ErrOutOfBounds) ||
- errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
- b.logger.Error("Error when appending sample from OTLP", "err", err.Error(), "series", ls.String(), "timestamp", t, "sample_type", sampleType(h))
- }
- } else {
- // If the append was successful, we can use the returned reference.
- ref = newRef
- }
- }
-
- if ref == 0 {
- // We cannot update metadata or add exemplars on non existent series.
- return err
- }
-
- metadataChanged := exists && (series.meta.Help != meta.Help || series.meta.Type != meta.Type || series.meta.Unit != meta.Unit)
-
- // Update cache if references changed or metadata changed.
- if updateRefs || metadataChanged {
- b.refs[hash] = seriesRef{
- ref: ref,
- st: st,
- ls: ls,
- meta: meta,
- }
- }
-
- // Update metadata in storage if enabled and needed.
- if b.appendMetadata && (!exists || metadataChanged) {
- // Only update metadata in WAL if the metadata-wal-records feature is enabled.
- // Without this feature, metadata is not persisted to WAL.
- _, err := b.app.UpdateMetadata(ref, ls, meta)
- if err != nil {
- b.samplesAppendedWithoutMetadata.Add(1)
- b.logger.Warn("Error while updating metadata from OTLP", "err", err)
- }
- }
-
- b.appendExemplars(ref, ls, es)
-
- return err
-}
-
-func sampleType(h *histogram.Histogram) string {
- if h == nil {
- return "float"
- }
- return "histogram"
-}
-
-func (b *combinedAppender) appendExemplars(ref storage.SeriesRef, ls labels.Labels, es []exemplar.Exemplar) storage.SeriesRef {
- var err error
- for _, e := range es {
- if ref, err = b.app.AppendExemplar(ref, ls, e); err != nil {
- switch {
- case errors.Is(err, storage.ErrOutOfOrderExemplar):
- b.outOfOrderExemplars.Add(1)
- b.logger.Debug("Out of order exemplar from OTLP", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
- default:
- // Since exemplar storage is still experimental, we don't fail the request on ingestion errors
- b.logger.Debug("Error while adding exemplar from OTLP", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
- }
- }
- }
- return ref
-}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go
deleted file mode 100644
index a1a17fe82b..0000000000
--- a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go
+++ /dev/null
@@ -1,937 +0,0 @@
-// Copyright The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheusremotewrite
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "math"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/common/model"
- "github.com/prometheus/common/promslog"
- "github.com/stretchr/testify/require"
-
- "github.com/prometheus/prometheus/model/exemplar"
- "github.com/prometheus/prometheus/model/histogram"
- "github.com/prometheus/prometheus/model/labels"
- "github.com/prometheus/prometheus/model/metadata"
- "github.com/prometheus/prometheus/storage"
- "github.com/prometheus/prometheus/tsdb"
- "github.com/prometheus/prometheus/tsdb/chunkenc"
- "github.com/prometheus/prometheus/tsdb/tsdbutil"
- "github.com/prometheus/prometheus/util/testutil"
-)
-
-type mockCombinedAppender struct {
- pendingSamples []combinedSample
- pendingHistograms []combinedHistogram
-
- samples []combinedSample
- histograms []combinedHistogram
-}
-
-type combinedSample struct {
- metricFamilyName string
- ls labels.Labels
- meta metadata.Metadata
- t int64
- st int64
- v float64
- es []exemplar.Exemplar
-}
-
-type combinedHistogram struct {
- metricFamilyName string
- ls labels.Labels
- meta metadata.Metadata
- t int64
- st int64
- h *histogram.Histogram
- es []exemplar.Exemplar
-}
-
-func (m *mockCombinedAppender) AppendSample(ls labels.Labels, meta Metadata, st, t int64, v float64, es []exemplar.Exemplar) error {
- m.pendingSamples = append(m.pendingSamples, combinedSample{
- metricFamilyName: meta.MetricFamilyName,
- ls: ls,
- meta: meta.Metadata,
- t: t,
- st: st,
- v: v,
- es: es,
- })
- return nil
-}
-
-func (m *mockCombinedAppender) AppendHistogram(ls labels.Labels, meta Metadata, st, t int64, h *histogram.Histogram, es []exemplar.Exemplar) error {
- m.pendingHistograms = append(m.pendingHistograms, combinedHistogram{
- metricFamilyName: meta.MetricFamilyName,
- ls: ls,
- meta: meta.Metadata,
- t: t,
- st: st,
- h: h,
- es: es,
- })
- return nil
-}
-
-func (m *mockCombinedAppender) Commit() error {
- m.samples = append(m.samples, m.pendingSamples...)
- m.pendingSamples = m.pendingSamples[:0]
- m.histograms = append(m.histograms, m.pendingHistograms...)
- m.pendingHistograms = m.pendingHistograms[:0]
- return nil
-}
-
-func requireEqual(t testing.TB, expected, actual any, msgAndArgs ...any) {
- testutil.RequireEqualWithOptions(t, expected, actual, []cmp.Option{cmp.AllowUnexported(combinedSample{}, combinedHistogram{})}, msgAndArgs...)
-}
-
-// TestCombinedAppenderOnTSDB runs some basic tests on a real TSDB to check
-// that the combinedAppender works on a real TSDB.
-func TestCombinedAppenderOnTSDB(t *testing.T) {
- t.Run("ingestSTZeroSample=false", func(t *testing.T) { testCombinedAppenderOnTSDB(t, false) })
-
- t.Run("ingestSTZeroSample=true", func(t *testing.T) { testCombinedAppenderOnTSDB(t, true) })
-}
-
-func testCombinedAppenderOnTSDB(t *testing.T, ingestSTZeroSample bool) {
- t.Helper()
-
- now := time.Now()
-
- testExemplars := []exemplar.Exemplar{
- {
- Labels: labels.FromStrings("tracid", "122"),
- Value: 1337,
- },
- {
- Labels: labels.FromStrings("tracid", "132"),
- Value: 7777,
- },
- }
- expectedExemplars := []exemplar.QueryResult{
- {
- SeriesLabels: labels.FromStrings(
- model.MetricNameLabel, "test_bytes_total",
- "foo", "bar",
- ),
- Exemplars: testExemplars,
- },
- }
-
- seriesLabels := labels.FromStrings(
- model.MetricNameLabel, "test_bytes_total",
- "foo", "bar",
- )
- floatMetadata := Metadata{
- Metadata: metadata.Metadata{
- Type: model.MetricTypeCounter,
- Unit: "bytes",
- Help: "some help",
- },
- MetricFamilyName: "test_bytes_total",
- }
-
- histogramMetadata := Metadata{
- Metadata: metadata.Metadata{
- Type: model.MetricTypeHistogram,
- Unit: "bytes",
- Help: "some help",
- },
- MetricFamilyName: "test_bytes",
- }
-
- testCases := map[string]struct {
- appendFunc func(*testing.T, CombinedAppender)
- extraAppendFunc func(*testing.T, CombinedAppender)
- expectedSamples []sample
- expectedExemplars []exemplar.QueryResult
- expectedLogsForST []string
- }{
- "single float sample, zero ST": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.UnixMilli(), 42.0, testExemplars))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- },
- expectedExemplars: expectedExemplars,
- },
- "single float sample, very old ST": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 1, now.UnixMilli(), 42.0, nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- },
- expectedLogsForST: []string{
- "Error when appending ST from OTLP",
- "out of bound",
- },
- },
- "single float sample, normal ST": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-2*time.Minute).UnixMilli(), now.UnixMilli(), 42.0, nil))
- },
- expectedSamples: []sample{
- {
- stZero: true,
- t: now.Add(-2 * time.Minute).UnixMilli(),
- },
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- },
- },
- "single float sample, ST same time as sample": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), 42.0, nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- },
- },
- "two float samples in different messages, ST same time as first sample": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), 42.0, nil))
- },
- extraAppendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.Add(time.Second).UnixMilli(), 43.0, nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- {
- t: now.Add(time.Second).UnixMilli(),
- f: 43.0,
- },
- },
- },
- "single float sample, ST in the future of the sample": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), 42.0, nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- },
- },
- "single histogram sample, zero ST": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), testExemplars))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- },
- expectedExemplars: expectedExemplars,
- },
- "single histogram sample, very old ST": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 1, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- },
- expectedLogsForST: []string{
- "Error when appending ST from OTLP",
- "out of bound",
- },
- },
- "single histogram sample, normal ST": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.Add(-2*time.Minute).UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
- },
- expectedSamples: []sample{
- {
- stZero: true,
- t: now.Add(-2 * time.Minute).UnixMilli(),
- h: &histogram.Histogram{},
- },
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- },
- },
- "single histogram sample, ST same time as sample": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- },
- },
- "two histogram samples in different messages, ST same time as first sample": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
- },
- extraAppendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.Add(time.Second).UnixMilli(), tsdbutil.GenerateTestHistogram(43), nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- {
- t: now.Add(time.Second).UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(43),
- },
- },
- },
- "single histogram sample, ST in the future of the sample": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- },
- },
- "multiple float samples": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.UnixMilli(), 42.0, nil))
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.Add(15*time.Second).UnixMilli(), 62.0, nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- f: 42.0,
- },
- {
- t: now.Add(15 * time.Second).UnixMilli(),
- f: 62.0,
- },
- },
- },
- "multiple histogram samples": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
- require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.Add(15*time.Second).UnixMilli(), tsdbutil.GenerateTestHistogram(62), nil))
- },
- expectedSamples: []sample{
- {
- t: now.UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(42),
- },
- {
- t: now.Add(15 * time.Second).UnixMilli(),
- h: tsdbutil.GenerateTestHistogram(62),
- },
- },
- },
- "float samples with ST changing": {
- appendFunc: func(t *testing.T, app CombinedAppender) {
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-4*time.Second).UnixMilli(), now.Add(-3*time.Second).UnixMilli(), 42.0, nil))
- require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-1*time.Second).UnixMilli(), now.UnixMilli(), 62.0, nil))
- },
- expectedSamples: []sample{
- {
- stZero: true,
- t: now.Add(-4 * time.Second).UnixMilli(),
- },
- {
- t: now.Add(-3 * time.Second).UnixMilli(),
- f: 42.0,
- },
- {
- stZero: true,
- t: now.Add(-1 * time.Second).UnixMilli(),
- },
- {
- t: now.UnixMilli(),
- f: 62.0,
- },
- },
- },
- }
-
- for name, tc := range testCases {
- t.Run(name, func(t *testing.T) {
- var expectedLogs []string
- if ingestSTZeroSample {
- expectedLogs = append(expectedLogs, tc.expectedLogsForST...)
- }
-
- dir := t.TempDir()
- opts := tsdb.DefaultOptions()
- opts.EnableExemplarStorage = true
- opts.MaxExemplars = 100
- db, err := tsdb.Open(dir, promslog.NewNopLogger(), prometheus.NewRegistry(), opts, nil)
- require.NoError(t, err)
-
- t.Cleanup(func() { db.Close() })
-
- var output bytes.Buffer
- logger := promslog.New(&promslog.Config{Writer: &output})
-
- ctx := context.Background()
- reg := prometheus.NewRegistry()
- cappMetrics := NewCombinedAppenderMetrics(reg)
- app := db.Appender(ctx)
- capp := NewCombinedAppender(app, logger, ingestSTZeroSample, false, cappMetrics)
- tc.appendFunc(t, capp)
- require.NoError(t, app.Commit())
-
- if tc.extraAppendFunc != nil {
- app = db.Appender(ctx)
- capp = NewCombinedAppender(app, logger, ingestSTZeroSample, false, cappMetrics)
- tc.extraAppendFunc(t, capp)
- require.NoError(t, app.Commit())
- }
-
- if len(expectedLogs) > 0 {
- for _, expectedLog := range expectedLogs {
- require.Contains(t, output.String(), expectedLog)
- }
- } else {
- require.Empty(t, output.String(), "unexpected log output")
- }
-
- q, err := db.Querier(int64(math.MinInt64), int64(math.MaxInt64))
- require.NoError(t, err)
-
- ss := q.Select(ctx, false, &storage.SelectHints{
- Start: int64(math.MinInt64),
- End: int64(math.MaxInt64),
- }, labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_bytes_total"))
-
- require.NoError(t, ss.Err())
-
- require.True(t, ss.Next())
- series := ss.At()
- it := series.Iterator(nil)
- for i, sample := range tc.expectedSamples {
- if !ingestSTZeroSample && sample.stZero {
- continue
- }
- if sample.h == nil {
- require.Equal(t, chunkenc.ValFloat, it.Next())
- ts, v := it.At()
- require.Equal(t, sample.t, ts, "sample ts %d", i)
- require.Equal(t, sample.f, v, "sample v %d", i)
- } else {
- require.Equal(t, chunkenc.ValHistogram, it.Next())
- ts, h := it.AtHistogram(nil)
- require.Equal(t, sample.t, ts, "sample ts %d", i)
- require.Equal(t, sample.h.Count, h.Count, "sample v %d", i)
- }
- }
- require.False(t, ss.Next())
-
- eq, err := db.ExemplarQuerier(ctx)
- require.NoError(t, err)
- exResult, err := eq.Select(int64(math.MinInt64), int64(math.MaxInt64), []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_bytes_total")})
- require.NoError(t, err)
- if tc.expectedExemplars == nil {
- tc.expectedExemplars = []exemplar.QueryResult{}
- }
- require.Equal(t, tc.expectedExemplars, exResult)
- })
- }
-}
-
-type sample struct {
- stZero bool
-
- t int64
- f float64
- h *histogram.Histogram
-}
-
-// TestCombinedAppenderSeriesRefs checks that the combined appender
-// correctly uses and updates the series references in the internal map.
-func TestCombinedAppenderSeriesRefs(t *testing.T) {
- seriesLabels := labels.FromStrings(
- model.MetricNameLabel, "test_bytes_total",
- "foo", "bar",
- )
-
- floatMetadata := Metadata{
- Metadata: metadata.Metadata{
- Type: model.MetricTypeCounter,
- Unit: "bytes",
- Help: "some help",
- },
- MetricFamilyName: "test_bytes_total",
- }
-
- t.Run("happy case with ST zero, reference is passed and reused", func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil))
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 3, 4, 62.0, []exemplar.Exemplar{
- {
- Labels: labels.FromStrings("tracid", "122"),
- Value: 1337,
- },
- }))
-
- require.Len(t, app.records, 5)
- requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
- ref := app.records[0].outRef
- require.NotZero(t, ref)
- requireEqualOpAndRef(t, "Append", ref, app.records[1])
- requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[2])
- requireEqualOpAndRef(t, "Append", ref, app.records[3])
- requireEqualOpAndRef(t, "AppendExemplar", ref, app.records[4])
- })
-
- t.Run("error on second ST ingest doesn't update the reference", func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil))
-
- app.appendSTZeroSampleError = errors.New("test error")
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 3, 4, 62.0, nil))
-
- require.Len(t, app.records, 4)
- requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
- ref := app.records[0].outRef
- require.NotZero(t, ref)
- requireEqualOpAndRef(t, "Append", ref, app.records[1])
- requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[2])
- require.Zero(t, app.records[2].outRef, "the second AppendSTZeroSample returned 0")
- requireEqualOpAndRef(t, "Append", ref, app.records[3])
- })
-
- t.Run("metadata, exemplars are not updated if append failed", func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
- app.appendError = errors.New("test error")
- require.Error(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 0, 1, 42.0, []exemplar.Exemplar{
- {
- Labels: labels.FromStrings("tracid", "122"),
- Value: 1337,
- },
- }))
-
- require.Len(t, app.records, 1)
- require.Equal(t, appenderRecord{
- op: "Append",
- ls: labels.FromStrings(model.MetricNameLabel, "test_bytes_total", "foo", "bar"),
- }, app.records[0])
- })
-
- t.Run("metadata, exemplars are updated if append failed but reference is valid", func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, true, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- newMetadata := floatMetadata
- newMetadata.Help = "some other help"
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil))
- app.appendError = errors.New("test error")
- require.Error(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 4, 62.0, []exemplar.Exemplar{
- {
- Labels: labels.FromStrings("tracid", "122"),
- Value: 1337,
- },
- }))
-
- require.Len(t, app.records, 7)
- requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
- ref := app.records[0].outRef
- require.NotZero(t, ref)
- requireEqualOpAndRef(t, "Append", ref, app.records[1])
- requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2])
- requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[3])
- requireEqualOpAndRef(t, "Append", ref, app.records[4])
- require.Zero(t, app.records[4].outRef, "the second Append returned 0")
- requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[5])
- requireEqualOpAndRef(t, "AppendExemplar", ref, app.records[6])
- })
-
- t.Run("simulate conflict with existing series", func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- ls := labels.FromStrings(
- model.MetricNameLabel, "test_bytes_total",
- "foo", "bar",
- )
-
- require.NoError(t, capp.AppendSample(ls, floatMetadata, 1, 2, 42.0, nil))
-
- hash := ls.Hash()
- cappImpl := capp.(*combinedAppender)
- series := cappImpl.refs[hash]
- series.ls = labels.FromStrings(
- model.MetricNameLabel, "test_bytes_total",
- "foo", "club",
- )
- // The hash and ref remain the same, but we altered the labels.
- // This simulates a conflict with an existing series.
- cappImpl.refs[hash] = series
-
- require.NoError(t, capp.AppendSample(ls, floatMetadata, 3, 4, 62.0, []exemplar.Exemplar{
- {
- Labels: labels.FromStrings("tracid", "122"),
- Value: 1337,
- },
- }))
-
- require.Len(t, app.records, 5)
- requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
- ref := app.records[0].outRef
- require.NotZero(t, ref)
- requireEqualOpAndRef(t, "Append", ref, app.records[1])
- requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[2])
- newRef := app.records[2].outRef
- require.NotEqual(t, ref, newRef, "the second AppendSTZeroSample returned a different reference")
- requireEqualOpAndRef(t, "Append", newRef, app.records[3])
- requireEqualOpAndRef(t, "AppendExemplar", newRef, app.records[4])
- })
-
- t.Run("check that invoking AppendHistogram returns an error for nil histogram", func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- ls := labels.FromStrings(
- model.MetricNameLabel, "test_bytes_total",
- "foo", "bar",
- )
- err := capp.AppendHistogram(ls, Metadata{}, 4, 2, nil, nil)
- require.Error(t, err)
- })
-
- for _, appendMetadata := range []bool{false, true} {
- t.Run(fmt.Sprintf("appendMetadata=%t", appendMetadata), func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, appendMetadata, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil))
-
- if appendMetadata {
- require.Len(t, app.records, 3)
- requireEqualOp(t, "AppendSTZeroSample", app.records[0])
- requireEqualOp(t, "Append", app.records[1])
- requireEqualOp(t, "UpdateMetadata", app.records[2])
- } else {
- require.Len(t, app.records, 2)
- requireEqualOp(t, "AppendSTZeroSample", app.records[0])
- requireEqualOp(t, "Append", app.records[1])
- }
- })
- }
-}
-
-// TestCombinedAppenderMetadataChanges verifies that UpdateMetadata is called
-// when metadata fields change (help, unit, or type).
-func TestCombinedAppenderMetadataChanges(t *testing.T) {
- seriesLabels := labels.FromStrings(
- model.MetricNameLabel, "test_metric",
- "foo", "bar",
- )
-
- baseMetadata := Metadata{
- Metadata: metadata.Metadata{
- Type: model.MetricTypeCounter,
- Unit: "bytes",
- Help: "original help",
- },
- MetricFamilyName: "test_metric",
- }
-
- tests := []struct {
- name string
- modifyMetadata func(Metadata) Metadata
- }{
- {
- name: "help changes",
- modifyMetadata: func(m Metadata) Metadata {
- m.Help = "new help text"
- return m
- },
- },
- {
- name: "unit changes",
- modifyMetadata: func(m Metadata) Metadata {
- m.Unit = "seconds"
- return m
- },
- },
- {
- name: "type changes",
- modifyMetadata: func(m Metadata) Metadata {
- m.Type = model.MetricTypeGauge
- return m
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, true, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- newMetadata := tt.modifyMetadata(baseMetadata)
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), baseMetadata, 1, 2, 42.0, nil))
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 4, 62.0, nil))
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 5, 162.0, nil))
-
- // Verify expected operations.
- require.Len(t, app.records, 7)
- requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
- ref := app.records[0].outRef
- require.NotZero(t, ref)
- requireEqualOpAndRef(t, "Append", ref, app.records[1])
- requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2])
- requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[3])
- requireEqualOpAndRef(t, "Append", ref, app.records[4])
- requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[5])
- requireEqualOpAndRef(t, "Append", ref, app.records[6])
- })
- }
-}
-
-func requireEqualOp(t *testing.T, expectedOp string, actual appenderRecord) {
- t.Helper()
- require.Equal(t, expectedOp, actual.op)
-}
-
-func requireEqualOpAndRef(t *testing.T, expectedOp string, expectedRef storage.SeriesRef, actual appenderRecord) {
- t.Helper()
- require.Equal(t, expectedOp, actual.op)
- require.Equal(t, expectedRef, actual.ref)
-}
-
-type appenderRecord struct {
- op string
- ref storage.SeriesRef
- outRef storage.SeriesRef
- ls labels.Labels
-}
-
-type appenderRecorder struct {
- refcount uint64
- records []appenderRecord
-
- appendError error
- appendSTZeroSampleError error
- appendHistogramError error
- appendHistogramSTZeroSampleError error
- updateMetadataError error
- appendExemplarError error
-}
-
-var _ storage.Appender = &appenderRecorder{}
-
-func (a *appenderRecorder) setOutRef(ref storage.SeriesRef) {
- if len(a.records) == 0 {
- return
- }
- a.records[len(a.records)-1].outRef = ref
-}
-
-func (a *appenderRecorder) newRef() storage.SeriesRef {
- a.refcount++
- return storage.SeriesRef(a.refcount)
-}
-
-func (a *appenderRecorder) Append(ref storage.SeriesRef, ls labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) {
- a.records = append(a.records, appenderRecord{op: "Append", ref: ref, ls: ls})
- if a.appendError != nil {
- return 0, a.appendError
- }
- if ref == 0 {
- ref = a.newRef()
- }
- a.setOutRef(ref)
- return ref, nil
-}
-
-func (a *appenderRecorder) AppendSTZeroSample(ref storage.SeriesRef, ls labels.Labels, _, _ int64) (storage.SeriesRef, error) {
- a.records = append(a.records, appenderRecord{op: "AppendSTZeroSample", ref: ref, ls: ls})
- if a.appendSTZeroSampleError != nil {
- return 0, a.appendSTZeroSampleError
- }
- if ref == 0 {
- ref = a.newRef()
- }
- a.setOutRef(ref)
- return ref, nil
-}
-
-func (a *appenderRecorder) AppendHistogram(ref storage.SeriesRef, ls labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
- a.records = append(a.records, appenderRecord{op: "AppendHistogram", ref: ref, ls: ls})
- if a.appendHistogramError != nil {
- return 0, a.appendHistogramError
- }
- if ref == 0 {
- ref = a.newRef()
- }
- a.setOutRef(ref)
- return ref, nil
-}
-
-func (a *appenderRecorder) AppendHistogramSTZeroSample(ref storage.SeriesRef, ls labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
- a.records = append(a.records, appenderRecord{op: "AppendHistogramSTZeroSample", ref: ref, ls: ls})
- if a.appendHistogramSTZeroSampleError != nil {
- return 0, a.appendHistogramSTZeroSampleError
- }
- if ref == 0 {
- ref = a.newRef()
- }
- a.setOutRef(ref)
- return ref, nil
-}
-
-func (a *appenderRecorder) UpdateMetadata(ref storage.SeriesRef, ls labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
- a.records = append(a.records, appenderRecord{op: "UpdateMetadata", ref: ref, ls: ls})
- if a.updateMetadataError != nil {
- return 0, a.updateMetadataError
- }
- a.setOutRef(ref)
- return ref, nil
-}
-
-func (a *appenderRecorder) AppendExemplar(ref storage.SeriesRef, ls labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) {
- a.records = append(a.records, appenderRecord{op: "AppendExemplar", ref: ref, ls: ls})
- if a.appendExemplarError != nil {
- return 0, a.appendExemplarError
- }
- a.setOutRef(ref)
- return ref, nil
-}
-
-func (a *appenderRecorder) Commit() error {
- a.records = append(a.records, appenderRecord{op: "Commit"})
- return nil
-}
-
-func (a *appenderRecorder) Rollback() error {
- a.records = append(a.records, appenderRecord{op: "Rollback"})
- return nil
-}
-
-func (*appenderRecorder) SetOptions(_ *storage.AppendOptions) {
- panic("not implemented")
-}
-
-func TestMetadataChangedLogic(t *testing.T) {
- seriesLabels := labels.FromStrings(model.MetricNameLabel, "test_metric", "foo", "bar")
- baseMetadata := Metadata{
- Metadata: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "original"},
- MetricFamilyName: "test_metric",
- }
-
- tests := []struct {
- name string
- appendMetadata bool
- modifyMetadata func(Metadata) Metadata
- expectWALCall bool
- verifyCached func(*testing.T, metadata.Metadata)
- }{
- {
- name: "appendMetadata=false, no change",
- appendMetadata: false,
- modifyMetadata: func(m Metadata) Metadata { return m },
- expectWALCall: false,
- verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "original", m.Help) },
- },
- {
- name: "appendMetadata=false, help changes - cache updated, no WAL",
- appendMetadata: false,
- modifyMetadata: func(m Metadata) Metadata { m.Help = "changed"; return m },
- expectWALCall: false,
- verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "changed", m.Help) },
- },
- {
- name: "appendMetadata=true, help changes - cache and WAL updated",
- appendMetadata: true,
- modifyMetadata: func(m Metadata) Metadata { m.Help = "changed"; return m },
- expectWALCall: true,
- verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "changed", m.Help) },
- },
- {
- name: "appendMetadata=true, unit changes",
- appendMetadata: true,
- modifyMetadata: func(m Metadata) Metadata { m.Unit = "seconds"; return m },
- expectWALCall: true,
- verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "seconds", m.Unit) },
- },
- {
- name: "appendMetadata=true, type changes",
- appendMetadata: true,
- modifyMetadata: func(m Metadata) Metadata { m.Type = model.MetricTypeGauge; return m },
- expectWALCall: true,
- verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, model.MetricTypeGauge, m.Type) },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- app := &appenderRecorder{}
- capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, tt.appendMetadata, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
-
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), baseMetadata, 1, 2, 42.0, nil))
-
- modifiedMetadata := tt.modifyMetadata(baseMetadata)
- app.records = nil
- require.NoError(t, capp.AppendSample(seriesLabels.Copy(), modifiedMetadata, 1, 3, 43.0, nil))
-
- hash := seriesLabels.Hash()
- cached, exists := capp.(*combinedAppender).refs[hash]
- require.True(t, exists)
- tt.verifyCached(t, cached.meta)
-
- updateMetadataCalled := false
- for _, record := range app.records {
- if record.op == "UpdateMetadata" {
- updateMetadataCalled = true
- break
- }
- }
- require.Equal(t, tt.expectWALCall, updateMetadataCalled)
- })
- }
-}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go
index 7e3c9d5021..1d321218e7 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go
@@ -19,6 +19,7 @@ package prometheusremotewrite
import (
"context"
"encoding/hex"
+ "errors"
"fmt"
"log"
"math"
@@ -32,13 +33,14 @@ import (
"github.com/prometheus/otlptranslator"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
- conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
+ "github.com/prometheus/prometheus/storage"
)
const (
@@ -60,18 +62,29 @@ const (
defaultLookbackDelta = 5 * time.Minute
)
+// reservedLabelNames contains label names that should be filtered from
+// OTLP attributes because they are set separately (via extras parameter).
+// Allowing these through could create duplicate labels.
+var reservedLabelNames = []string{
+ model.MetricNameLabel, // "__name__" - set from metric name
+}
+
// createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values.
// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and
// if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized.
-// If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels.
-func (c *PrometheusConverter) createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope scope, settings Settings,
- ignoreAttrs []string, logOnOverwrite bool, meta Metadata, extras ...string,
+//
+// This function requires for cached resource and scope labels to be set up first.
+func (c *PrometheusConverter) createAttributes(
+ attributes pcommon.Map,
+ settings Settings,
+ ignoreAttrs []string,
+ logOnOverwrite bool,
+ meta metadata.Metadata,
+ extras ...string,
) (labels.Labels, error) {
- resourceAttrs := resource.Attributes()
- serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName)
- instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID)
-
- promoteScope := settings.PromoteScopeMetadata && scope.name != ""
+ if c.resourceLabels == nil {
+ return labels.EmptyLabels(), errors.New("createAttributes called without initializing resource context")
+ }
// Ensure attributes are sorted by key for consistent merging of keys which
// collide when sanitized.
@@ -88,12 +101,6 @@ func (c *PrometheusConverter) createAttributes(resource pcommon.Resource, attrib
c.scratchBuilder.Sort()
sortedLabels := c.scratchBuilder.Labels()
- labelNamer := otlptranslator.LabelNamer{
- UTF8Allowed: settings.AllowUTF8,
- UnderscoreLabelSanitization: settings.LabelNameUnderscoreSanitization,
- PreserveMultipleUnderscores: settings.LabelNamePreserveMultipleUnderscores,
- }
-
if settings.AllowUTF8 {
// UTF8 is allowed, so conflicts aren't possible.
c.builder.Reset(sortedLabels)
@@ -106,7 +113,7 @@ func (c *PrometheusConverter) createAttributes(resource pcommon.Resource, attrib
if sortErr != nil {
return
}
- finalKey, err := labelNamer.Build(l.Name)
+ finalKey, err := c.buildLabelName(l.Name)
if err != nil {
sortErr = err
return
@@ -122,28 +129,36 @@ func (c *PrometheusConverter) createAttributes(resource pcommon.Resource, attrib
}
}
- err := settings.PromoteResourceAttributes.addPromotedAttributes(c.builder, resourceAttrs, labelNamer)
- if err != nil {
- return labels.EmptyLabels(), err
- }
- if promoteScope {
- var rangeErr error
- scope.attributes.Range(func(k string, v pcommon.Value) bool {
- name, err := labelNamer.Build("otel_scope_" + k)
- if err != nil {
- rangeErr = err
- return false
+ if settings.PromoteResourceAttributes != nil {
+ // Merge cached promoted resource labels.
+ c.resourceLabels.promotedLabels.Range(func(l labels.Label) {
+ if c.builder.Get(l.Name) == "" {
+ c.builder.Set(l.Name, l.Value)
}
- c.builder.Set(name, v.AsString())
- return true
})
- if rangeErr != nil {
- return labels.EmptyLabels(), rangeErr
+ }
+ // Merge cached job/instance labels.
+ if c.resourceLabels.jobLabel != "" {
+ c.builder.Set(model.JobLabel, c.resourceLabels.jobLabel)
+ }
+ if c.resourceLabels.instanceLabel != "" {
+ c.builder.Set(model.InstanceLabel, c.resourceLabels.instanceLabel)
+ }
+ // Merge cached external labels.
+ for key, value := range c.resourceLabels.externalLabels {
+ if c.builder.Get(key) == "" {
+ c.builder.Set(key, value)
}
- // Scope Name, Version and Schema URL are added after attributes to ensure they are not overwritten by attributes.
- c.builder.Set("otel_scope_name", scope.name)
- c.builder.Set("otel_scope_version", scope.version)
- c.builder.Set("otel_scope_schema_url", scope.schemaURL)
+ }
+
+ if c.scopeLabels != nil {
+ // Merge cached scope labels if scope promotion is enabled.
+ c.scopeLabels.scopeAttrs.Range(func(l labels.Label) {
+ c.builder.Set(l.Name, l.Value)
+ })
+ c.builder.Set("otel_scope_name", c.scopeLabels.scopeName)
+ c.builder.Set("otel_scope_version", c.scopeLabels.scopeVersion)
+ c.builder.Set("otel_scope_schema_url", c.scopeLabels.scopeSchemaURL)
}
if settings.EnableTypeAndUnitLabels {
@@ -156,27 +171,6 @@ func (c *PrometheusConverter) createAttributes(resource pcommon.Resource, attrib
}
}
- // Map service.name + service.namespace to job.
- if haveServiceName {
- val := serviceName.AsString()
- if serviceNamespace, ok := resourceAttrs.Get(conventions.AttributeServiceNamespace); ok {
- val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val)
- }
- c.builder.Set(model.JobLabel, val)
- }
- // Map service.instance.id to instance.
- if haveInstanceID {
- c.builder.Set(model.InstanceLabel, instance.AsString())
- }
- for key, value := range settings.ExternalLabels {
- // External labels have already been sanitized.
- if existingValue := c.builder.Get(key); existingValue != "" {
- // Skip external labels if they are overridden by metric attributes.
- continue
- }
- c.builder.Set(key, value)
- }
-
for i := 0; i < len(extras); i += 2 {
if i+1 >= len(extras) {
break
@@ -189,7 +183,7 @@ func (c *PrometheusConverter) createAttributes(resource pcommon.Resource, attrib
// internal labels should be maintained.
if len(name) <= 4 || name[:2] != "__" || name[len(name)-2:] != "__" {
var err error
- name, err = labelNamer.Build(name)
+ name, err = c.buildLabelName(name)
if err != nil {
return labels.EmptyLabels(), err
}
@@ -222,8 +216,11 @@ func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporali
// with the user defined bucket boundaries of non-exponential OTel histograms.
// However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets:
// https://github.com/prometheus/prometheus/issues/13485.
-func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice,
- resource pcommon.Resource, settings Settings, scope scope, meta Metadata,
+func (c *PrometheusConverter) addHistogramDataPoints(
+ ctx context.Context,
+ dataPoints pmetric.HistogramDataPointSlice,
+ settings Settings,
+ appOpts storage.AOptions,
) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
@@ -231,38 +228,37 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo
}
pt := dataPoints.At(x)
+ // Clear stale exemplars from the previous data point to prevent
+ // them from leaking into _sum and _count of this data point.
+ appOpts.Exemplars = nil
timestamp := convertTimeStamp(pt.Timestamp())
startTimestamp := convertTimeStamp(pt.StartTimestamp())
- baseLabels, err := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false, meta)
+ baseLabels, err := c.createAttributes(pt.Attributes(), settings, reservedLabelNames, false, appOpts.Metadata)
if err != nil {
return err
}
- baseName := meta.MetricFamilyName
-
// If the sum is unset, it indicates the _sum metric point should be
// omitted
if pt.HasSum() {
- // treat sum as a sample in an individual TimeSeries
+ // Treat sum as a sample in an individual TimeSeries.
val := pt.Sum()
if pt.Flags().NoRecordedValue() {
val = math.Float64frombits(value.StaleNaN)
}
-
- sumlabels := c.addLabels(baseName+sumStr, baseLabels)
- if err := c.appender.AppendSample(sumlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ sumLabels := c.addLabels(appOpts.MetricFamilyName+sumStr, baseLabels)
+ if _, err := c.appender.Append(0, sumLabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
}
- // treat count as a sample in an individual TimeSeries
+ // Treat count as a sample in an individual TimeSeries.
val := float64(pt.Count())
if pt.Flags().NoRecordedValue() {
val = math.Float64frombits(value.StaleNaN)
}
-
- countlabels := c.addLabels(baseName+countStr, baseLabels)
- if err := c.appender.AppendSample(countlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ countLabels := c.addLabels(appOpts.MetricFamilyName+countStr, baseLabels)
+ if _, err := c.appender.Append(0, countLabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
@@ -271,10 +267,10 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo
}
nextExemplarIdx := 0
- // cumulative count for conversion to cumulative histogram
+ // Cumulative count for conversion to cumulative histogram.
var cumulativeCount uint64
- // process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1
+ // Process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1.
for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ {
if err := c.everyN.checkContext(ctx); err != nil {
return err
@@ -285,32 +281,34 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo
// Find exemplars that belong to this bucket. Both exemplars and
// buckets are sorted in ascending order.
- var currentBucketExemplars []exemplar.Exemplar
+ appOpts.Exemplars = appOpts.Exemplars[:0]
for ; nextExemplarIdx < len(exemplars); nextExemplarIdx++ {
ex := exemplars[nextExemplarIdx]
if ex.Value > bound {
// This exemplar belongs in a higher bucket.
break
}
- currentBucketExemplars = append(currentBucketExemplars, ex)
+ appOpts.Exemplars = append(appOpts.Exemplars, ex)
}
val := float64(cumulativeCount)
if pt.Flags().NoRecordedValue() {
val = math.Float64frombits(value.StaleNaN)
}
boundStr := strconv.FormatFloat(bound, 'f', -1, 64)
- labels := c.addLabels(baseName+bucketStr, baseLabels, leStr, boundStr)
- if err := c.appender.AppendSample(labels, meta, startTimestamp, timestamp, val, currentBucketExemplars); err != nil {
+ bucketLabels := c.addLabels(appOpts.MetricFamilyName+bucketStr, baseLabels, leStr, boundStr)
+ if _, err := c.appender.Append(0, bucketLabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
}
- // add le=+Inf bucket
+
+ appOpts.Exemplars = exemplars[nextExemplarIdx:]
+ // Add le=+Inf bucket.
val = float64(pt.Count())
if pt.Flags().NoRecordedValue() {
val = math.Float64frombits(value.StaleNaN)
}
- infLabels := c.addLabels(baseName+bucketStr, baseLabels, leStr, pInfStr)
- if err := c.appender.AppendSample(infLabels, meta, startTimestamp, timestamp, val, exemplars[nextExemplarIdx:]); err != nil {
+ infLabels := c.addLabels(appOpts.MetricFamilyName+bucketStr, baseLabels, leStr, pInfStr)
+ if _, err := c.appender.Append(0, infLabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
}
@@ -424,8 +422,11 @@ func findMinAndMaxTimestamps(metric pmetric.Metric, minTimestamp, maxTimestamp p
return minTimestamp, maxTimestamp
}
-func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource,
- settings Settings, scope scope, meta Metadata,
+func (c *PrometheusConverter) addSummaryDataPoints(
+ ctx context.Context,
+ dataPoints pmetric.SummaryDataPointSlice,
+ settings Settings,
+ appOpts storage.AOptions,
) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
@@ -435,21 +436,18 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin
pt := dataPoints.At(x)
timestamp := convertTimeStamp(pt.Timestamp())
startTimestamp := convertTimeStamp(pt.StartTimestamp())
- baseLabels, err := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false, meta)
+ baseLabels, err := c.createAttributes(pt.Attributes(), settings, reservedLabelNames, false, appOpts.Metadata)
if err != nil {
return err
}
- baseName := meta.MetricFamilyName
-
// treat sum as a sample in an individual TimeSeries
val := pt.Sum()
if pt.Flags().NoRecordedValue() {
val = math.Float64frombits(value.StaleNaN)
}
- // sum and count of the summary should append suffix to baseName
- sumlabels := c.addLabels(baseName+sumStr, baseLabels)
- if err := c.appender.AppendSample(sumlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ sumLabels := c.addLabels(appOpts.MetricFamilyName+sumStr, baseLabels)
+ if _, err := c.appender.Append(0, sumLabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
@@ -458,8 +456,8 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin
if pt.Flags().NoRecordedValue() {
val = math.Float64frombits(value.StaleNaN)
}
- countlabels := c.addLabels(baseName+countStr, baseLabels)
- if err := c.appender.AppendSample(countlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ countLabels := c.addLabels(appOpts.MetricFamilyName+countStr, baseLabels)
+ if _, err := c.appender.Append(0, countLabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
@@ -471,8 +469,8 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin
val = math.Float64frombits(value.StaleNaN)
}
percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64)
- qtlabels := c.addLabels(baseName, baseLabels, quantileStr, percentileStr)
- if err := c.appender.AppendSample(qtlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ qtlabels := c.addLabels(appOpts.MetricFamilyName, baseLabels, quantileStr, percentileStr)
+ if _, err := c.appender.Append(0, qtlabels, startTimestamp, timestamp, val, nil, nil, appOpts); err != nil {
return err
}
}
@@ -504,9 +502,9 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s
attributes := resource.Attributes()
identifyingAttrs := []string{
- conventions.AttributeServiceNamespace,
- conventions.AttributeServiceName,
- conventions.AttributeServiceInstanceID,
+ string(semconv.ServiceNamespaceKey),
+ string(semconv.ServiceNameKey),
+ string(semconv.ServiceInstanceIDKey),
}
nonIdentifyingAttrsCount := attributes.Len()
for _, a := range identifyingAttrs {
@@ -530,7 +528,7 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s
// Do not pass identifying attributes as ignoreAttrs below.
identifyingAttrs = nil
}
- meta := Metadata{
+ appOpts := storage.AOptions{
Metadata: metadata.Metadata{
Type: model.MetricTypeGauge,
Help: "Target metadata",
@@ -538,7 +536,12 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s
MetricFamilyName: name,
}
// TODO: should target info have the __type__ metadata label?
- lbls, err := c.createAttributes(resource, attributes, scope{}, settings, identifyingAttrs, false, Metadata{}, model.MetricNameLabel, name)
+ // target_info is a resource-level metric and should not include scope labels.
+ // Temporarily clear scope labels for this call.
+ savedScopeLabels := c.scopeLabels
+ c.scopeLabels = nil
+ lbls, err := c.createAttributes(attributes, settings, identifyingAttrs, false, metadata.Metadata{}, model.MetricNameLabel, name)
+ c.scopeLabels = savedScopeLabels
if err != nil {
return err
}
@@ -580,7 +583,8 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s
}
c.seenTargetInfo[key] = struct{}{}
- if err := c.appender.AppendSample(lbls, meta, 0, timestampMs, float64(1), nil); err != nil {
+ _, err = c.appender.Append(0, lbls, 0, timestampMs, 1.0, nil, nil, appOpts)
+ if err != nil {
return err
}
}
@@ -596,7 +600,8 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s
}
c.seenTargetInfo[key] = struct{}{}
- return c.appender.AppendSample(lbls, meta, 0, finalTimestampMs, float64(1), nil)
+ _, err = c.appender.Append(0, lbls, 0, finalTimestampMs, 1.0, nil, nil, appOpts)
+ return err
}
// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms.
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
index b06bf3d416..f4f5283164 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
@@ -30,12 +30,18 @@ import (
"go.opentelemetry.io/collector/pdata/pmetric"
"github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/prompb"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil"
)
-func TestCreateAttributes(t *testing.T) {
+type sample = teststorage.Sample
+
+func TestPrometheusConverter_createAttributes(t *testing.T) {
resourceAttrs := map[string]string{
"service.name": "service name",
"service.instance.id": "service ID",
@@ -386,10 +392,22 @@ func TestCreateAttributes(t *testing.T) {
"metric_multi", "multi metric",
),
},
+ {
+ name: "__name__ attribute is filtered when passed in ignoreAttrs",
+ promoteResourceAttributes: nil,
+ ignoreAttrs: []string{model.MetricNameLabel},
+ expectedLabels: labels.FromStrings(
+ "__name__", "test_metric",
+ "instance", "service ID",
+ "job", "service name",
+ "metric_attr", "metric value",
+ "metric_attr_other", "metric value other",
+ ),
+ },
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- c := NewPrometheusConverter(&mockCombinedAppender{})
+ c := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
settings := Settings{
PromoteResourceAttributes: NewPromoteResourceAttributes(config.OTLPConfig{
PromoteAllResourceAttributes: tc.promoteAllResourceAttributes,
@@ -413,12 +431,116 @@ func TestCreateAttributes(t *testing.T) {
if tc.attrs != (pcommon.Map{}) {
testAttrs = tc.attrs
}
- lbls, err := c.createAttributes(testResource, testAttrs, tc.scope, settings, tc.ignoreAttrs, false, Metadata{}, model.MetricNameLabel, "test_metric")
+ // Initialize resource and scope context as FromMetrics would.
+ require.NoError(t, c.setResourceContext(testResource, settings))
+ require.NoError(t, c.setScopeContext(tc.scope, settings))
+
+ lbls, err := c.createAttributes(testAttrs, settings, tc.ignoreAttrs, false, metadata.Metadata{}, model.MetricNameLabel, "test_metric")
require.NoError(t, err)
testutil.RequireEqual(t, tc.expectedLabels, lbls)
})
}
+
+ // Test that __name__ attributes in OTLP data are filtered out to prevent
+ // duplicate labels.
+ t.Run("__name__ attribute in OTLP data is filtered", func(t *testing.T) {
+ resource := pcommon.NewResource()
+ resource.Attributes().PutStr("service.name", "test-service")
+ resource.Attributes().PutStr("service.instance.id", "test-instance")
+
+ // Create attributes with __name__ to simulate problematic OTLP data.
+ attrsWithNameLabel := pcommon.NewMap()
+ attrsWithNameLabel.PutStr("__name__", "wrong_metric_name")
+ attrsWithNameLabel.PutStr("other_attr", "value")
+
+ c := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
+ settings := Settings{}
+
+ require.NoError(t, c.setResourceContext(resource, settings))
+ require.NoError(t, c.setScopeContext(scope{}, settings))
+
+ // Call createAttributes with reservedLabelNames to filter __name__.
+ lbls, err := c.createAttributes(
+ attrsWithNameLabel,
+ settings,
+ reservedLabelNames,
+ true,
+ metadata.Metadata{},
+ model.MetricNameLabel, "correct_metric_name",
+ )
+ require.NoError(t, err)
+
+ // Verify there's exactly one __name__ label with the correct value.
+ nameCount := 0
+ var nameValue string
+ lbls.Range(func(l labels.Label) {
+ if l.Name == model.MetricNameLabel {
+ nameCount++
+ nameValue = l.Value
+ }
+ })
+
+ require.Equal(t, 1, nameCount)
+ require.Equal(t, "correct_metric_name", nameValue)
+ require.Equal(t, "value", lbls.Get("other_attr"))
+ })
+
+ // Test that __type__ and __unit__ attributes in OTLP data are overwritten
+ // by auto-generated labels from metadata when EnableTypeAndUnitLabels is true.
+ t.Run("__type__ and __unit__ attributes are overwritten by metadata", func(t *testing.T) {
+ resource := pcommon.NewResource()
+ resource.Attributes().PutStr("service.name", "test-service")
+ resource.Attributes().PutStr("service.instance.id", "test-instance")
+
+ // Create attributes with __type__ and __unit__ to simulate problematic OTLP data.
+ attrsWithTypeAndUnit := pcommon.NewMap()
+ attrsWithTypeAndUnit.PutStr(model.MetricTypeLabel, "wrong_type")
+ attrsWithTypeAndUnit.PutStr(model.MetricUnitLabel, "wrong_unit")
+ attrsWithTypeAndUnit.PutStr("other_attr", "value")
+
+ c := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
+ settings := Settings{EnableTypeAndUnitLabels: true}
+
+ require.NoError(t, c.setResourceContext(resource, settings))
+ require.NoError(t, c.setScopeContext(scope{}, settings))
+
+ // Call createAttributes with Metadata containing correct Type and Unit.
+ lbls, err := c.createAttributes(
+ attrsWithTypeAndUnit,
+ settings,
+ reservedLabelNames,
+ true,
+ metadata.Metadata{Type: model.MetricTypeGauge, Unit: "seconds"},
+ model.MetricNameLabel, "test_metric",
+ )
+ require.NoError(t, err)
+
+ // Verify there's exactly one __type__ label with the correct value (from metadata).
+ typeCount := 0
+ var typeValue string
+ lbls.Range(func(l labels.Label) {
+ if l.Name == model.MetricTypeLabel {
+ typeCount++
+ typeValue = l.Value
+ }
+ })
+ require.Equal(t, 1, typeCount)
+ require.Equal(t, "gauge", typeValue)
+
+ // Verify there's exactly one __unit__ label with the correct value (from metadata).
+ unitCount := 0
+ var unitValue string
+ lbls.Range(func(l labels.Label) {
+ if l.Name == model.MetricUnitLabel {
+ unitCount++
+ unitValue = l.Value
+ }
+ })
+ require.Equal(t, 1, unitCount)
+ require.Equal(t, "seconds", unitValue)
+ require.Equal(t, "value", lbls.Get("other_attr"))
+ })
}
func Test_convertTimeStamp(t *testing.T) {
@@ -457,7 +579,7 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
metric func() pmetric.Metric
scope scope
promoteScope bool
- want func() []combinedSample
+ want func() []sample
}{
{
name: "summary with start time and without scope promotion",
@@ -474,25 +596,25 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
- return []combinedSample{
+ want: func() []sample {
+ return []sample{
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary"+sumStr,
),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary"+countStr,
),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -512,7 +634,7 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: true,
- want: func() []combinedSample {
+ want: func() []sample {
scopeLabels := []string{
"otel_scope_attr1", "value1",
"otel_scope_attr2", "value2",
@@ -520,22 +642,22 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
"otel_scope_schema_url", defaultScope.schemaURL,
"otel_scope_version", defaultScope.version,
}
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(append(scopeLabels,
+ MF: "test_summary",
+ L: labels.FromStrings(append(scopeLabels,
model.MetricNameLabel, "test_summary"+sumStr)...),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(append(scopeLabels,
+ MF: "test_summary",
+ L: labels.FromStrings(append(scopeLabels,
model.MetricNameLabel, "test_summary"+countStr)...),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -554,23 +676,23 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
- return []combinedSample{
+ want: func() []sample {
+ return []sample{
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary"+sumStr,
),
- t: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ V: 0,
},
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary"+countStr,
),
- t: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -598,41 +720,41 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
- return []combinedSample{
+ want: func() []sample {
+ return []sample{
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary"+sumStr,
),
- t: convertTimeStamp(ts),
- v: 100,
+ T: convertTimeStamp(ts),
+ V: 100,
},
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary"+countStr,
),
- t: convertTimeStamp(ts),
- v: 50,
+ T: convertTimeStamp(ts),
+ V: 50,
},
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary",
quantileStr, "0.5",
),
- t: convertTimeStamp(ts),
- v: 30,
+ T: convertTimeStamp(ts),
+ V: 30,
},
{
- metricFamilyName: "test_summary",
- ls: labels.FromStrings(
+ MF: "test_summary",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_summary",
quantileStr, "0.9",
),
- t: convertTimeStamp(ts),
- v: 40,
+ T: convertTimeStamp(ts),
+ V: 40,
},
}
},
@@ -641,24 +763,28 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
metric := tt.metric()
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
+ settings := Settings{
+ PromoteScopeMetadata: tt.promoteScope,
+ }
+ resource := pcommon.NewResource()
- converter.addSummaryDataPoints(
+ // Initialize resource and scope context as FromMetrics would.
+ require.NoError(t, converter.setResourceContext(resource, settings))
+ require.NoError(t, converter.setScopeContext(tt.scope, settings))
+
+ require.NoError(t, converter.addSummaryDataPoints(
context.Background(),
metric.Summary().DataPoints(),
- pcommon.NewResource(),
- Settings{
- PromoteScopeMetadata: tt.promoteScope,
- },
- tt.scope,
- Metadata{
+ settings,
+ storage.AOptions{
MetricFamilyName: metric.Name(),
},
- )
- require.NoError(t, mockAppender.Commit())
-
- requireEqual(t, tt.want(), mockAppender.samples)
+ ))
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tt.want(), appTest.ResultSamples())
})
}
}
@@ -681,7 +807,7 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
metric func() pmetric.Metric
scope scope
promoteScope bool
- want func() []combinedSample
+ want func() []sample
}{
{
name: "histogram with start time and without scope promotion",
@@ -698,26 +824,26 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
- return []combinedSample{
+ want: func() []sample {
+ return []sample{
{
- metricFamilyName: "test_hist",
- ls: labels.FromStrings(
+ MF: "test_hist",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_hist"+countStr,
),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
{
- metricFamilyName: "test_hist",
- ls: labels.FromStrings(
+ MF: "test_hist",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_hist_bucket",
model.BucketLabel, "+Inf",
),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -737,7 +863,7 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: true,
- want: func() []combinedSample {
+ want: func() []sample {
scopeLabels := []string{
"otel_scope_attr1", "value1",
"otel_scope_attr2", "value2",
@@ -745,23 +871,23 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
"otel_scope_schema_url", defaultScope.schemaURL,
"otel_scope_version", defaultScope.version,
}
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test_hist",
- ls: labels.FromStrings(append(scopeLabels,
+ MF: "test_hist",
+ L: labels.FromStrings(append(scopeLabels,
model.MetricNameLabel, "test_hist"+countStr)...),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
{
- metricFamilyName: "test_hist",
- ls: labels.FromStrings(append(scopeLabels,
+ MF: "test_hist",
+ L: labels.FromStrings(append(scopeLabels,
model.MetricNameLabel, "test_hist_bucket",
model.BucketLabel, "+Inf")...),
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -778,24 +904,24 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
return metric
},
- want: func() []combinedSample {
- return []combinedSample{
+ want: func() []sample {
+ return []sample{
{
- metricFamilyName: "test_hist",
- ls: labels.FromStrings(
+ MF: "test_hist",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_hist"+countStr,
),
- t: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ V: 0,
},
{
- metricFamilyName: "test_hist",
- ls: labels.FromStrings(
+ MF: "test_hist",
+ L: labels.FromStrings(
model.MetricNameLabel, "test_hist_bucket",
model.BucketLabel, "+Inf",
),
- t: convertTimeStamp(ts),
- v: 0,
+ T: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -804,31 +930,150 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
metric := tt.metric()
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
+ settings := Settings{
+ PromoteScopeMetadata: tt.promoteScope,
+ }
+ resource := pcommon.NewResource()
- converter.addHistogramDataPoints(
+ // Initialize resource and scope context as FromMetrics would.
+ require.NoError(t, converter.setResourceContext(resource, settings))
+ require.NoError(t, converter.setScopeContext(tt.scope, settings))
+
+ require.NoError(t, converter.addHistogramDataPoints(
context.Background(),
metric.Histogram().DataPoints(),
- pcommon.NewResource(),
- Settings{
- PromoteScopeMetadata: tt.promoteScope,
- },
- tt.scope,
- Metadata{
+ settings,
+ storage.AOptions{
MetricFamilyName: metric.Name(),
},
- )
- require.NoError(t, mockAppender.Commit())
-
- requireEqual(t, tt.want(), mockAppender.samples)
+ ))
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tt.want(), appTest.ResultSamples())
})
}
}
+// TestAddHistogramDataPoints_ExemplarLeakAcrossDataPoints verifies that
+// exemplars from a previous data point don't leak into _sum/_count of the
+// next data point. Regression test for stale exemplar leak.
+func TestAddHistogramDataPoints_ExemplarLeakAcrossDataPoints(t *testing.T) {
+ ts := pcommon.Timestamp(time.Now().UnixNano())
+ exTs := pcommon.Timestamp(time.Now().Add(time.Second).UnixNano())
+
+ metric := pmetric.NewMetric()
+ metric.SetName("test_hist")
+ metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+
+ // First data point: has buckets and an exemplar with value 200 (> bound 100, so falls into +Inf).
+ pt1 := metric.Histogram().DataPoints().AppendEmpty()
+ pt1.SetTimestamp(ts)
+ pt1.SetStartTimestamp(ts)
+ pt1.SetSum(42)
+ pt1.SetCount(10)
+ pt1.ExplicitBounds().FromRaw([]float64{100})
+ pt1.BucketCounts().FromRaw([]uint64{7, 3})
+
+ ex := pt1.Exemplars().AppendEmpty()
+ ex.SetTimestamp(exTs)
+ ex.SetDoubleValue(200) // > 100, so falls into the +Inf bucket.
+
+ // Second data point: no exemplars.
+ pt2 := metric.Histogram().DataPoints().AppendEmpty()
+ pt2.SetTimestamp(ts)
+ pt2.SetStartTimestamp(ts)
+ pt2.SetSum(84)
+ pt2.SetCount(20)
+ pt2.ExplicitBounds().FromRaw([]float64{100})
+ pt2.BucketCounts().FromRaw([]uint64{14, 6})
+
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
+ settings := Settings{}
+ resource := pcommon.NewResource()
+
+ require.NoError(t, converter.setResourceContext(resource, settings))
+ require.NoError(t, converter.setScopeContext(scope{}, settings))
+ require.NoError(t, converter.addHistogramDataPoints(
+ context.Background(),
+ metric.Histogram().DataPoints(),
+ settings,
+ storage.AOptions{
+ MetricFamilyName: metric.Name(),
+ },
+ ))
+ require.NoError(t, app.Commit())
+
+ exConverted := exemplar.Exemplar{
+ Value: 200,
+ Ts: convertTimeStamp(exTs),
+ HasTs: true,
+ }
+ tsMs := convertTimeStamp(ts)
+
+ want := []sample{
+ // -- First data point --
+ // _sum: no exemplars.
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_sum"),
+ T: tsMs, ST: tsMs, V: 42,
+ },
+ // _count: no exemplars.
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_count"),
+ T: tsMs, ST: tsMs, V: 10,
+ },
+ // le=100 bucket: no exemplars (exemplar value 200 > 100).
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_bucket", model.BucketLabel, "100"),
+ T: tsMs, ST: tsMs, V: 7,
+ },
+ // le=+Inf bucket: gets the exemplar.
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_bucket", model.BucketLabel, "+Inf"),
+ T: tsMs, ST: tsMs, V: 10,
+ ES: []exemplar.Exemplar{exConverted},
+ },
+ // -- Second data point --
+ // _sum: NO exemplars (this is the regression check).
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_sum"),
+ T: tsMs, ST: tsMs, V: 84,
+ },
+ // _count: NO exemplars (this is the regression check).
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_count"),
+ T: tsMs, ST: tsMs, V: 20,
+ },
+ // le=100 bucket: no exemplars.
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_bucket", model.BucketLabel, "100"),
+ T: tsMs, ST: tsMs, V: 14,
+ },
+ // le=+Inf bucket: no exemplars.
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_bucket", model.BucketLabel, "+Inf"),
+ T: tsMs, ST: tsMs, V: 20,
+ },
+ }
+
+ teststorage.RequireEqual(t, want, appTest.ResultSamples())
+}
+
func TestGetPromExemplars(t *testing.T) {
ctx := context.Background()
- c := NewPrometheusConverter(&mockCombinedAppender{})
+ c := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
t.Run("Exemplars with int value", func(t *testing.T) {
es := pmetric.NewExemplarSlice()
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go
index db7c0e1275..31c16b1c10 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go
@@ -22,11 +22,11 @@ import (
"math"
"github.com/prometheus/common/model"
- "go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/value"
+ "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/annotations"
)
@@ -34,9 +34,12 @@ const defaultZeroThreshold = 1e-128
// addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series
// as native histogram samples.
-func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Context, dataPoints pmetric.ExponentialHistogramDataPointSlice,
- resource pcommon.Resource, settings Settings, temporality pmetric.AggregationTemporality,
- scope scope, meta Metadata,
+func (c *PrometheusConverter) addExponentialHistogramDataPoints(
+ ctx context.Context,
+ dataPoints pmetric.ExponentialHistogramDataPointSlice,
+ settings Settings,
+ temporality pmetric.AggregationTemporality,
+ appOpts storage.AOptions,
) (annotations.Annotations, error) {
var annots annotations.Annotations
for x := 0; x < dataPoints.Len(); x++ {
@@ -53,15 +56,13 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont
}
lbls, err := c.createAttributes(
- resource,
pt.Attributes(),
- scope,
settings,
- nil,
+ reservedLabelNames,
true,
- meta,
+ appOpts.Metadata,
model.MetricNameLabel,
- meta.MetricFamilyName,
+ appOpts.MetricFamilyName,
)
if err != nil {
return annots, err
@@ -72,8 +73,10 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont
if err != nil {
return annots, err
}
- // OTel exponential histograms are always Int Histograms.
- if err = c.appender.AppendHistogram(lbls, meta, st, ts, hp, exemplars); err != nil {
+
+ appOpts.Exemplars = exemplars
+ // OTel exponential histograms are always integer histograms.
+ if _, err = c.appender.Append(0, lbls, st, ts, 0, hp, nil, appOpts); err != nil {
return annots, err
}
}
@@ -252,9 +255,12 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust
return spans, deltas
}
-func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice,
- resource pcommon.Resource, settings Settings, temporality pmetric.AggregationTemporality,
- scope scope, meta Metadata,
+func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(
+ ctx context.Context,
+ dataPoints pmetric.HistogramDataPointSlice,
+ settings Settings,
+ temporality pmetric.AggregationTemporality,
+ appOpts storage.AOptions,
) (annotations.Annotations, error) {
var annots annotations.Annotations
@@ -272,15 +278,13 @@ func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Co
}
lbls, err := c.createAttributes(
- resource,
pt.Attributes(),
- scope,
settings,
- nil,
+ reservedLabelNames,
true,
- meta,
+ appOpts.Metadata,
model.MetricNameLabel,
- meta.MetricFamilyName,
+ appOpts.MetricFamilyName,
)
if err != nil {
return annots, err
@@ -291,7 +295,9 @@ func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Co
if err != nil {
return annots, err
}
- if err = c.appender.AppendHistogram(lbls, meta, st, ts, hp, exemplars); err != nil {
+
+ appOpts.Exemplars = exemplars
+ if _, err = c.appender.Append(0, lbls, st, ts, 0, hp, nil, appOpts); err != nil {
return annots, err
}
}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
index 644ec2e01b..5422796002 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
@@ -32,6 +32,8 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/teststorage"
)
type expectedBucketLayout struct {
@@ -382,8 +384,8 @@ func TestConvertBucketsLayout(t *testing.T) {
for scaleDown, wantLayout := range tt.wantLayout {
t.Run(fmt.Sprintf("%s-scaleby-%d", tt.name, scaleDown), func(t *testing.T) {
gotSpans, gotDeltas := convertBucketsLayout(tt.buckets().BucketCounts().AsRaw(), tt.buckets().Offset(), scaleDown, true)
- requireEqual(t, wantLayout.wantSpans, gotSpans)
- requireEqual(t, wantLayout.wantDeltas, gotDeltas)
+ require.Equal(t, wantLayout.wantSpans, gotSpans)
+ require.Equal(t, wantLayout.wantDeltas, gotDeltas)
})
}
}
@@ -633,7 +635,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
metric func() pmetric.Metric
scope scope
promoteScope bool
- wantSeries func() []combinedHistogram
+ wantSeries func() []sample
}{
{
name: "histogram data points with same labels and without scope promotion",
@@ -662,19 +664,19 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- wantSeries: func() []combinedHistogram {
+ wantSeries: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_hist",
"attr", "test_attr",
)
- return []combinedHistogram{
+ return []sample{
{
- metricFamilyName: "test_hist",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 7,
Schema: 1,
ZeroThreshold: defaultZeroThreshold,
@@ -682,15 +684,15 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
PositiveBuckets: []int64{4, -2},
},
- es: []exemplar.Exemplar{{Value: 1}},
+ ES: []exemplar.Exemplar{{Value: 1}},
},
{
- metricFamilyName: "test_hist",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 4,
Schema: 1,
ZeroThreshold: defaultZeroThreshold,
@@ -698,7 +700,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}},
PositiveBuckets: []int64{4, -2, -1},
},
- es: []exemplar.Exemplar{{Value: 2}},
+ ES: []exemplar.Exemplar{{Value: 2}},
},
}
},
@@ -730,7 +732,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: true,
- wantSeries: func() []combinedHistogram {
+ wantSeries: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_hist",
"attr", "test_attr",
@@ -740,14 +742,14 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
"otel_scope_attr1", "value1",
"otel_scope_attr2", "value2",
)
- return []combinedHistogram{
+ return []sample{
{
- metricFamilyName: "test_hist",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 7,
Schema: 1,
ZeroThreshold: defaultZeroThreshold,
@@ -755,15 +757,15 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
PositiveBuckets: []int64{4, -2},
},
- es: []exemplar.Exemplar{{Value: 1}},
+ ES: []exemplar.Exemplar{{Value: 1}},
},
{
- metricFamilyName: "test_hist",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 4,
Schema: 1,
ZeroThreshold: defaultZeroThreshold,
@@ -771,7 +773,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}},
PositiveBuckets: []int64{4, -2, -1},
},
- es: []exemplar.Exemplar{{Value: 2}},
+ ES: []exemplar.Exemplar{{Value: 2}},
},
}
},
@@ -803,7 +805,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- wantSeries: func() []combinedHistogram {
+ wantSeries: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_hist",
"attr", "test_attr",
@@ -813,14 +815,14 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
"attr", "test_attr_two",
)
- return []combinedHistogram{
+ return []sample{
{
- metricFamilyName: "test_hist",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 7,
Schema: 1,
ZeroThreshold: defaultZeroThreshold,
@@ -828,15 +830,15 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
PositiveBuckets: []int64{4, -2},
},
- es: []exemplar.Exemplar{{Value: 1}},
+ ES: []exemplar.Exemplar{{Value: 1}},
},
{
- metricFamilyName: "test_hist",
- ls: labelsAnother,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist",
+ L: labelsAnother,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 4,
Schema: 1,
ZeroThreshold: defaultZeroThreshold,
@@ -844,7 +846,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
NegativeSpans: []histogram.Span{{Offset: 0, Length: 3}},
NegativeBuckets: []int64{4, -2, -1},
},
- es: []exemplar.Exemplar{{Value: 2}},
+ ES: []exemplar.Exemplar{{Value: 2}},
},
}
},
@@ -854,32 +856,37 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
metric := tt.metric()
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
namer := otlptranslator.MetricNamer{
WithMetricSuffixes: true,
}
name, err := namer.Build(TranslatorMetricFromOtelMetric(metric))
require.NoError(t, err)
+ settings := Settings{
+ PromoteScopeMetadata: tt.promoteScope,
+ }
+ resource := pcommon.NewResource()
+
+ // Initialize resource and scope context as FromMetrics would.
+ require.NoError(t, converter.setResourceContext(resource, settings))
+ require.NoError(t, converter.setScopeContext(tt.scope, settings))
+
annots, err := converter.addExponentialHistogramDataPoints(
context.Background(),
metric.ExponentialHistogram().DataPoints(),
- pcommon.NewResource(),
- Settings{
- PromoteScopeMetadata: tt.promoteScope,
- },
+ settings,
pmetric.AggregationTemporalityCumulative,
- tt.scope,
- Metadata{
+ storage.AOptions{
MetricFamilyName: name,
},
)
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
-
- requireEqual(t, tt.wantSeries(), mockAppender.histograms)
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tt.wantSeries(), appTest.ResultSamples())
})
}
}
@@ -1106,7 +1113,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
metric func() pmetric.Metric
scope scope
promoteScope bool
- wantSeries func() []combinedHistogram
+ wantSeries func() []sample
}{
{
name: "histogram data points with same labels and without scope promotion",
@@ -1135,19 +1142,19 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- wantSeries: func() []combinedHistogram {
+ wantSeries: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_hist_to_nhcb",
"attr", "test_attr",
)
- return []combinedHistogram{
+ return []sample{
{
- metricFamilyName: "test_hist_to_nhcb",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist_to_nhcb",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 3,
Sum: 3,
Schema: -53,
@@ -1155,15 +1162,15 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
PositiveBuckets: []int64{2, -2, 1},
CustomValues: []float64{5, 10},
},
- es: []exemplar.Exemplar{{Value: 1}},
+ ES: []exemplar.Exemplar{{Value: 1}},
},
{
- metricFamilyName: "test_hist_to_nhcb",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist_to_nhcb",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 11,
Sum: 5,
Schema: -53,
@@ -1171,7 +1178,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
PositiveBuckets: []int64{3, 5, -8},
CustomValues: []float64{0, 1},
},
- es: []exemplar.Exemplar{{Value: 2}},
+ ES: []exemplar.Exemplar{{Value: 2}},
},
}
},
@@ -1203,7 +1210,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: true,
- wantSeries: func() []combinedHistogram {
+ wantSeries: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_hist_to_nhcb",
"attr", "test_attr",
@@ -1213,14 +1220,14 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
"otel_scope_attr1", "value1",
"otel_scope_attr2", "value2",
)
- return []combinedHistogram{
+ return []sample{
{
- metricFamilyName: "test_hist_to_nhcb",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist_to_nhcb",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 3,
Sum: 3,
Schema: -53,
@@ -1228,15 +1235,15 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
PositiveBuckets: []int64{2, -2, 1},
CustomValues: []float64{5, 10},
},
- es: []exemplar.Exemplar{{Value: 1}},
+ ES: []exemplar.Exemplar{{Value: 1}},
},
{
- metricFamilyName: "test_hist_to_nhcb",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist_to_nhcb",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 11,
Sum: 5,
Schema: -53,
@@ -1244,7 +1251,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
PositiveBuckets: []int64{3, 5, -8},
CustomValues: []float64{0, 1},
},
- es: []exemplar.Exemplar{{Value: 2}},
+ ES: []exemplar.Exemplar{{Value: 2}},
},
}
},
@@ -1276,7 +1283,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- wantSeries: func() []combinedHistogram {
+ wantSeries: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_hist_to_nhcb",
"attr", "test_attr",
@@ -1286,14 +1293,14 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
"attr", "test_attr_two",
)
- return []combinedHistogram{
+ return []sample{
{
- metricFamilyName: "test_hist_to_nhcb",
- ls: lbls,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist_to_nhcb",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 6,
Sum: 3,
Schema: -53,
@@ -1301,15 +1308,15 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
PositiveBuckets: []int64{4, -2},
CustomValues: []float64{0, 1},
},
- es: []exemplar.Exemplar{{Value: 1}},
+ ES: []exemplar.Exemplar{{Value: 1}},
},
{
- metricFamilyName: "test_hist_to_nhcb",
- ls: labelsAnother,
- meta: metadata.Metadata{},
- t: 0,
- st: 0,
- h: &histogram.Histogram{
+ MF: "test_hist_to_nhcb",
+ L: labelsAnother,
+ M: metadata.Metadata{},
+ T: 0,
+ ST: 0,
+ H: &histogram.Histogram{
Count: 11,
Sum: 5,
Schema: -53,
@@ -1317,7 +1324,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
PositiveBuckets: []int64{3, 5},
CustomValues: []float64{0, 1},
},
- es: []exemplar.Exemplar{{Value: 2}},
+ ES: []exemplar.Exemplar{{Value: 2}},
},
}
},
@@ -1327,24 +1334,30 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
metric := tt.metric()
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
namer := otlptranslator.MetricNamer{
WithMetricSuffixes: true,
}
name, err := namer.Build(TranslatorMetricFromOtelMetric(metric))
require.NoError(t, err)
+ settings := Settings{
+ ConvertHistogramsToNHCB: true,
+ PromoteScopeMetadata: tt.promoteScope,
+ }
+ resource := pcommon.NewResource()
+
+ // Initialize resource and scope context as FromMetrics would.
+ require.NoError(t, converter.setResourceContext(resource, settings))
+ require.NoError(t, converter.setScopeContext(tt.scope, settings))
+
annots, err := converter.addCustomBucketsHistogramDataPoints(
context.Background(),
metric.Histogram().DataPoints(),
- pcommon.NewResource(),
- Settings{
- ConvertHistogramsToNHCB: true,
- PromoteScopeMetadata: tt.promoteScope,
- },
+ settings,
pmetric.AggregationTemporalityCumulative,
- tt.scope,
- Metadata{
+ storage.AOptions{
MetricFamilyName: name,
},
)
@@ -1352,9 +1365,8 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
-
- requireEqual(t, tt.wantSeries(), mockAppender.histograms)
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tt.wantSeries(), appTest.ResultSamples())
})
}
}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
index 41de42548a..600282af6f 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
@@ -26,11 +26,12 @@ import (
"github.com/prometheus/otlptranslator"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
- "go.uber.org/multierr"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/annotations"
)
@@ -62,14 +63,41 @@ type Settings struct {
LabelNamePreserveMultipleUnderscores bool
}
+// cachedResourceLabels holds precomputed labels constant for all datapoints in a ResourceMetrics.
+// These are computed once per ResourceMetrics boundary and reused for all datapoints.
+type cachedResourceLabels struct {
+ jobLabel string // from service.name + service.namespace.
+ instanceLabel string // from service.instance.id.
+ promotedLabels labels.Labels // promoted resource attributes.
+ externalLabels map[string]string
+}
+
+// cachedScopeLabels holds precomputed scope metadata labels.
+// These are computed once per ScopeMetrics boundary and reused for all datapoints.
+type cachedScopeLabels struct {
+ scopeName string
+ scopeVersion string
+ scopeSchemaURL string
+ scopeAttrs labels.Labels // otel_scope_* labels.
+}
+
// PrometheusConverter converts from OTel write format to Prometheus remote write format.
type PrometheusConverter struct {
everyN everyNTimes
scratchBuilder labels.ScratchBuilder
builder *labels.Builder
- appender CombinedAppender
+ appender storage.AppenderV2
// seenTargetInfo tracks target_info samples within a batch to prevent duplicates.
seenTargetInfo map[targetInfoKey]struct{}
+
+ // Label caching for optimization - computed once per resource/scope boundary.
+ resourceLabels *cachedResourceLabels
+ scopeLabels *cachedScopeLabels
+ labelNamer otlptranslator.LabelNamer
+
+ // sanitizedLabels caches the results of label name sanitization within a request.
+ // This avoids repeated string allocations for the same label names.
+ sanitizedLabels map[string]string
}
// targetInfoKey uniquely identifies a target_info sample by its labelset and timestamp.
@@ -78,14 +106,29 @@ type targetInfoKey struct {
timestamp int64
}
-func NewPrometheusConverter(appender CombinedAppender) *PrometheusConverter {
+func NewPrometheusConverter(appender storage.AppenderV2) *PrometheusConverter {
return &PrometheusConverter{
- scratchBuilder: labels.NewScratchBuilder(0),
- builder: labels.NewBuilder(labels.EmptyLabels()),
- appender: appender,
+ scratchBuilder: labels.NewScratchBuilder(0),
+ builder: labels.NewBuilder(labels.EmptyLabels()),
+ appender: appender,
+ sanitizedLabels: make(map[string]string, 64), // Pre-size for typical label count.
}
}
+// buildLabelName returns a sanitized label name, using the cache to avoid repeated allocations.
+func (c *PrometheusConverter) buildLabelName(label string) (string, error) {
+ if sanitized, ok := c.sanitizedLabels[label]; ok {
+ return sanitized, nil
+ }
+
+ sanitized, err := c.labelNamer.Build(label)
+ if err != nil {
+ return "", err
+ }
+ c.sanitizedLabels[label] = sanitized
+ return sanitized, nil
+}
+
func TranslatorMetricFromOtelMetric(metric pmetric.Metric) otlptranslator.Metric {
m := otlptranslator.Metric{
Name: metric.Name(),
@@ -128,7 +171,7 @@ func newScopeFromScopeMetrics(scopeMetrics pmetric.ScopeMetrics) scope {
}
}
-// FromMetrics converts pmetric.Metrics to Prometheus remote write format.
+// FromMetrics appends pmetric.Metrics to storage.AppenderV2.
func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings) (annots annotations.Annotations, errs error) {
namer := otlptranslator.MetricNamer{
Namespace: settings.Namespace,
@@ -140,23 +183,33 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
c.seenTargetInfo = make(map[targetInfoKey]struct{})
resourceMetricsSlice := md.ResourceMetrics()
- for i := 0; i < resourceMetricsSlice.Len(); i++ {
+ for i := range resourceMetricsSlice.Len() {
resourceMetrics := resourceMetricsSlice.At(i)
resource := resourceMetrics.Resource()
scopeMetricsSlice := resourceMetrics.ScopeMetrics()
+ if err := c.setResourceContext(resource, settings); err != nil {
+ errs = errors.Join(errs, err)
+ continue
+ }
+
// keep track of the earliest and latest timestamp in the ResourceMetrics for
// use with the "target" info metric
earliestTimestamp := pcommon.Timestamp(math.MaxUint64)
latestTimestamp := pcommon.Timestamp(0)
- for j := 0; j < scopeMetricsSlice.Len(); j++ {
+ for j := range scopeMetricsSlice.Len() {
scopeMetrics := scopeMetricsSlice.At(j)
scope := newScopeFromScopeMetrics(scopeMetrics)
+ if err := c.setScopeContext(scope, settings); err != nil {
+ errs = errors.Join(errs, err)
+ continue
+ }
+
metricSlice := scopeMetrics.Metrics()
// TODO: decide if instrumentation library information should be exported as labels
for k := 0; k < metricSlice.Len(); k++ {
if err := c.everyN.checkContext(ctx); err != nil {
- errs = multierr.Append(errs, err)
+ errs = errors.Join(errs, err)
return annots, errs
}
@@ -164,7 +217,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
earliestTimestamp, latestTimestamp = findMinAndMaxTimestamps(metric, earliestTimestamp, latestTimestamp)
temporality, hasTemporality, err := aggregationTemporality(metric)
if err != nil {
- errs = multierr.Append(errs, err)
+ errs = errors.Join(errs, err)
continue
}
@@ -175,16 +228,17 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
//nolint:staticcheck // QF1001 Applying De Morgan’s law would make the conditions harder to read.
!(temporality == pmetric.AggregationTemporalityCumulative ||
(settings.AllowDeltaTemporality && temporality == pmetric.AggregationTemporalityDelta)) {
- errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name()))
+ errs = errors.Join(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name()))
continue
}
promName, err := namer.Build(TranslatorMetricFromOtelMetric(metric))
if err != nil {
- errs = multierr.Append(errs, err)
+ errs = errors.Join(errs, err)
continue
}
- meta := Metadata{
+
+ appOpts := storage.AOptions{
Metadata: metadata.Metadata{
Type: otelMetricTypeToPromMetricType(metric),
Unit: unitNamer.Build(metric.Unit()),
@@ -199,11 +253,11 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
case pmetric.MetricTypeGauge:
dataPoints := metric.Gauge().DataPoints()
if dataPoints.Len() == 0 {
- errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
+ errs = errors.Join(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
}
- if err := c.addGaugeNumberDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil {
- errs = multierr.Append(errs, err)
+ if err := c.addGaugeNumberDataPoints(ctx, dataPoints, settings, appOpts); err != nil {
+ errs = errors.Join(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return annots, errs
}
@@ -211,11 +265,11 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
case pmetric.MetricTypeSum:
dataPoints := metric.Sum().DataPoints()
if dataPoints.Len() == 0 {
- errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
+ errs = errors.Join(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
}
- if err := c.addSumNumberDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil {
- errs = multierr.Append(errs, err)
+ if err := c.addSumNumberDataPoints(ctx, dataPoints, settings, appOpts); err != nil {
+ errs = errors.Join(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return annots, errs
}
@@ -223,23 +277,23 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
case pmetric.MetricTypeHistogram:
dataPoints := metric.Histogram().DataPoints()
if dataPoints.Len() == 0 {
- errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
+ errs = errors.Join(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
}
if settings.ConvertHistogramsToNHCB {
ws, err := c.addCustomBucketsHistogramDataPoints(
- ctx, dataPoints, resource, settings, temporality, scope, meta,
+ ctx, dataPoints, settings, temporality, appOpts,
)
annots.Merge(ws)
if err != nil {
- errs = multierr.Append(errs, err)
+ errs = errors.Join(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return annots, errs
}
}
} else {
- if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil {
- errs = multierr.Append(errs, err)
+ if err := c.addHistogramDataPoints(ctx, dataPoints, settings, appOpts); err != nil {
+ errs = errors.Join(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return annots, errs
}
@@ -248,21 +302,19 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
case pmetric.MetricTypeExponentialHistogram:
dataPoints := metric.ExponentialHistogram().DataPoints()
if dataPoints.Len() == 0 {
- errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
+ errs = errors.Join(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
}
ws, err := c.addExponentialHistogramDataPoints(
ctx,
dataPoints,
- resource,
settings,
temporality,
- scope,
- meta,
+ appOpts,
)
annots.Merge(ws)
if err != nil {
- errs = multierr.Append(errs, err)
+ errs = errors.Join(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return annots, errs
}
@@ -270,17 +322,17 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
case pmetric.MetricTypeSummary:
dataPoints := metric.Summary().DataPoints()
if dataPoints.Len() == 0 {
- errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
+ errs = errors.Join(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
}
- if err := c.addSummaryDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil {
- errs = multierr.Append(errs, err)
+ if err := c.addSummaryDataPoints(ctx, dataPoints, settings, appOpts); err != nil {
+ errs = errors.Join(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return annots, errs
}
}
default:
- errs = multierr.Append(errs, errors.New("unsupported metric type"))
+ errs = errors.Join(errs, errors.New("unsupported metric type"))
}
}
}
@@ -288,7 +340,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
// We have at least one metric sample for this resource.
// Generate a corresponding target_info series.
if err := c.addResourceTargetInfo(resource, settings, earliestTimestamp.AsTime(), latestTimestamp.AsTime()); err != nil {
- errs = multierr.Append(errs, err)
+ errs = errors.Join(errs, err)
}
}
}
@@ -311,8 +363,11 @@ func NewPromoteResourceAttributes(otlpCfg config.OTLPConfig) *PromoteResourceAtt
}
}
+// LabelNameBuilder is a function that builds/sanitizes label names.
+type LabelNameBuilder func(string) (string, error)
+
// addPromotedAttributes adds labels for promoted resourceAttributes to the builder.
-func (s *PromoteResourceAttributes) addPromotedAttributes(builder *labels.Builder, resourceAttributes pcommon.Map, labelNamer otlptranslator.LabelNamer) error {
+func (s *PromoteResourceAttributes) addPromotedAttributes(builder *labels.Builder, resourceAttributes pcommon.Map, buildLabelName LabelNameBuilder) error {
if s == nil {
return nil
}
@@ -322,13 +377,11 @@ func (s *PromoteResourceAttributes) addPromotedAttributes(builder *labels.Builde
resourceAttributes.Range(func(name string, value pcommon.Value) bool {
if _, exists := s.attrs[name]; !exists {
var normalized string
- normalized, err = labelNamer.Build(name)
+ normalized, err = buildLabelName(name)
if err != nil {
return false
}
- if builder.Get(normalized) == "" {
- builder.Set(normalized, value.AsString())
- }
+ builder.Set(normalized, value.AsString())
}
return true
})
@@ -338,15 +391,91 @@ func (s *PromoteResourceAttributes) addPromotedAttributes(builder *labels.Builde
resourceAttributes.Range(func(name string, value pcommon.Value) bool {
if _, exists := s.attrs[name]; exists {
var normalized string
- normalized, err = labelNamer.Build(name)
+ normalized, err = buildLabelName(name)
if err != nil {
return false
}
- if builder.Get(normalized) == "" {
- builder.Set(normalized, value.AsString())
- }
+ builder.Set(normalized, value.AsString())
}
return true
})
return err
}
+
+// setResourceContext precomputes and caches resource-level labels.
+// Called once per ResourceMetrics boundary, before processing any datapoints.
+// If an error is returned, resource level cache is reset.
+func (c *PrometheusConverter) setResourceContext(resource pcommon.Resource, settings Settings) error {
+ resourceAttrs := resource.Attributes()
+ c.resourceLabels = &cachedResourceLabels{
+ externalLabels: settings.ExternalLabels,
+ }
+
+ c.labelNamer = otlptranslator.LabelNamer{
+ UTF8Allowed: settings.AllowUTF8,
+ UnderscoreLabelSanitization: settings.LabelNameUnderscoreSanitization,
+ PreserveMultipleUnderscores: settings.LabelNamePreserveMultipleUnderscores,
+ }
+
+ if serviceName, ok := resourceAttrs.Get(string(semconv.ServiceNameKey)); ok {
+ val := serviceName.AsString()
+ if serviceNamespace, ok := resourceAttrs.Get(string(semconv.ServiceNamespaceKey)); ok {
+ val = serviceNamespace.AsString() + "/" + val
+ }
+ c.resourceLabels.jobLabel = val
+ }
+
+ if instance, ok := resourceAttrs.Get(string(semconv.ServiceInstanceIDKey)); ok {
+ c.resourceLabels.instanceLabel = instance.AsString()
+ }
+
+ if settings.PromoteResourceAttributes != nil {
+ c.builder.Reset(labels.EmptyLabels())
+ if err := settings.PromoteResourceAttributes.addPromotedAttributes(c.builder, resourceAttrs, c.buildLabelName); err != nil {
+ c.clearResourceContext()
+ return err
+ }
+ c.resourceLabels.promotedLabels = c.builder.Labels()
+ }
+ return nil
+}
+
+// setScopeContext precomputes and caches scope-level labels.
+// Called once per ScopeMetrics boundary, before processing any metrics.
+// If an error is returned, scope level cache is reset.
+func (c *PrometheusConverter) setScopeContext(scope scope, settings Settings) error {
+ if !settings.PromoteScopeMetadata || scope.name == "" {
+ c.scopeLabels = nil
+ return nil
+ }
+
+ c.scopeLabels = &cachedScopeLabels{
+ scopeName: scope.name,
+ scopeVersion: scope.version,
+ scopeSchemaURL: scope.schemaURL,
+ }
+ c.builder.Reset(labels.EmptyLabels())
+ var err error
+ scope.attributes.Range(func(k string, v pcommon.Value) bool {
+ var name string
+ name, err = c.buildLabelName("otel_scope_" + k)
+ if err != nil {
+ return false
+ }
+ c.builder.Set(name, v.AsString())
+ return true
+ })
+ if err != nil {
+ c.scopeLabels = nil
+ return err
+ }
+
+ c.scopeLabels.scopeAttrs = c.builder.Labels()
+ return nil
+}
+
+// clearResourceContext clears cached labels between ResourceMetrics.
+func (c *PrometheusConverter) clearResourceContext() {
+ c.resourceLabels = nil
+ c.scopeLabels = nil
+}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
index 8eb0029dd7..647105e640 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
@@ -22,20 +22,19 @@ import (
"testing"
"time"
- "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
- "github.com/prometheus/common/promslog"
"github.com/prometheus/otlptranslator"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
- "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/teststorage"
)
func TestFromMetrics(t *testing.T) {
@@ -81,8 +80,9 @@ func TestFromMetrics(t *testing.T) {
},
} {
t.Run(tc.name, func(t *testing.T) {
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
payload, wantPromMetrics := createExportRequest(5, 128, 128, 2, 0, tc.settings, tc.temporality)
seenFamilyNames := map[string]struct{}{}
for _, wantMetric := range wantPromMetrics {
@@ -104,14 +104,14 @@ func TestFromMetrics(t *testing.T) {
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
+ require.NoError(t, app.Commit())
- ts := mockAppender.samples
- require.Len(t, ts, 1536+1) // +1 for the target_info.
+ got := appTest.ResultSamples()
+ require.Len(t, got, 1536+1) // +1 for the target_info.
tgtInfoCount := 0
- for _, s := range ts {
- lbls := s.ls
+ for _, s := range got {
+ lbls := s.L
if lbls.Get(labels.MetricName) == "target_info" {
tgtInfoCount++
require.Equal(t, "test-namespace/test-service", lbls.Get("job"))
@@ -150,11 +150,14 @@ func TestFromMetrics(t *testing.T) {
h.SetCount(15)
h.SetSum(155)
+ h.BucketCounts().FromRaw([]uint64{3, 11, 0})
+ h.ExplicitBounds().FromRaw([]float64{0.124, 1.123})
generateAttributes(h.Attributes(), "series", 1)
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
annots, err := converter.FromMetrics(
context.Background(),
request.Metrics(),
@@ -162,21 +165,56 @@ func TestFromMetrics(t *testing.T) {
)
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
+ require.NoError(t, app.Commit())
- if convertHistogramsToNHCB {
- require.Len(t, mockAppender.histograms, 1)
- require.Empty(t, mockAppender.samples)
- } else {
- require.Empty(t, mockAppender.histograms)
- require.Len(t, mockAppender.samples, 3)
+ expectedSamples := []sample{
+ {
+ MF: "histogram_1", M: metadata.Metadata{Type: model.MetricTypeHistogram},
+ L: labels.FromStrings("__name__", "histogram_1_sum", "series_name_1", "value-1"),
+ T: ts.AsTime().UnixMilli(), V: 155,
+ },
+ {
+ MF: "histogram_1", M: metadata.Metadata{Type: model.MetricTypeHistogram},
+ L: labels.FromStrings("__name__", "histogram_1_count", "series_name_1", "value-1"),
+ T: ts.AsTime().UnixMilli(), V: 15,
+ },
+ {
+ MF: "histogram_1", M: metadata.Metadata{Type: model.MetricTypeHistogram},
+ L: labels.FromStrings("__name__", "histogram_1_bucket", "le", "0.124", "series_name_1", "value-1"),
+ T: ts.AsTime().UnixMilli(), V: 3,
+ },
+ {
+ MF: "histogram_1", M: metadata.Metadata{Type: model.MetricTypeHistogram},
+ L: labels.FromStrings("__name__", "histogram_1_bucket", "le", "1.123", "series_name_1", "value-1"),
+ T: ts.AsTime().UnixMilli(), V: 14,
+ },
+ {
+ MF: "histogram_1", M: metadata.Metadata{Type: model.MetricTypeHistogram},
+ L: labels.FromStrings("__name__", "histogram_1_bucket", "le", "+Inf", "series_name_1", "value-1"),
+ T: ts.AsTime().UnixMilli(), V: 15,
+ },
}
+ if convertHistogramsToNHCB {
+ expectedSamples = []sample{
+ {
+ MF: "histogram_1", M: metadata.Metadata{Type: model.MetricTypeHistogram},
+ L: labels.FromStrings("__name__", "histogram_1", "series_name_1", "value-1"),
+ T: ts.AsTime().UnixMilli(), H: &histogram.Histogram{
+ Schema: -53, Count: 15, Sum: 155,
+ PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}},
+ PositiveBuckets: []int64{3, 8, -11},
+ CustomValues: []float64{0.124, 1.123},
+ },
+ },
+ }
+ }
+ teststorage.RequireEqual(t, expectedSamples, appTest.ResultSamples())
})
}
t.Run("context cancellation", func(t *testing.T) {
settings := Settings{}
- converter := NewPrometheusConverter(&mockCombinedAppender{})
+ converter := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
ctx, cancel := context.WithCancel(context.Background())
// Verify that converter.FromMetrics respects cancellation.
cancel()
@@ -189,7 +227,7 @@ func TestFromMetrics(t *testing.T) {
t.Run("context timeout", func(t *testing.T) {
settings := Settings{}
- converter := NewPrometheusConverter(&mockCombinedAppender{})
+ converter := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
// Verify that converter.FromMetrics respects timeout.
ctx, cancel := context.WithTimeout(context.Background(), 0)
t.Cleanup(cancel)
@@ -222,7 +260,7 @@ func TestFromMetrics(t *testing.T) {
generateAttributes(h.Attributes(), "series", 10)
}
- converter := NewPrometheusConverter(&mockCombinedAppender{})
+ converter := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
annots, err := converter.FromMetrics(context.Background(), request.Metrics(), Settings{})
require.NoError(t, err)
require.NotEmpty(t, annots)
@@ -255,7 +293,7 @@ func TestFromMetrics(t *testing.T) {
generateAttributes(h.Attributes(), "series", 10)
}
- converter := NewPrometheusConverter(&mockCombinedAppender{})
+ converter := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
annots, err := converter.FromMetrics(
context.Background(),
request.Metrics(),
@@ -303,8 +341,9 @@ func TestFromMetrics(t *testing.T) {
}
}
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
annots, err := converter.FromMetrics(
context.Background(),
request.Metrics(),
@@ -314,8 +353,11 @@ func TestFromMetrics(t *testing.T) {
)
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
- require.Len(t, mockAppender.samples, 22)
+ require.NoError(t, app.Commit())
+
+ got := appTest.ResultSamples()
+ require.Len(t, got, 22)
+
// There should be a target_info sample at the earliest metric timestamp, then two spaced lookback delta/2 apart,
// then one at the latest metric timestamp.
targetInfoLabels := labels.FromStrings(
@@ -332,36 +374,36 @@ func TestFromMetrics(t *testing.T) {
Type: model.MetricTypeGauge,
Help: "Target metadata",
}
- requireEqual(t, []combinedSample{
+ teststorage.RequireEqual(t, []sample{
{
- metricFamilyName: "target_info",
- v: 1,
- t: ts.AsTime().UnixMilli(),
- ls: targetInfoLabels,
- meta: targetInfoMeta,
+ MF: "target_info",
+ V: 1,
+ T: ts.AsTime().UnixMilli(),
+ L: targetInfoLabels,
+ M: targetInfoMeta,
},
{
- metricFamilyName: "target_info",
- v: 1,
- t: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(),
- ls: targetInfoLabels,
- meta: targetInfoMeta,
+ MF: "target_info",
+ V: 1,
+ T: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(),
+ L: targetInfoLabels,
+ M: targetInfoMeta,
},
{
- metricFamilyName: "target_info",
- v: 1,
- t: ts.AsTime().Add(defaultLookbackDelta).UnixMilli(),
- ls: targetInfoLabels,
- meta: targetInfoMeta,
+ MF: "target_info",
+ V: 1,
+ T: ts.AsTime().Add(defaultLookbackDelta).UnixMilli(),
+ L: targetInfoLabels,
+ M: targetInfoMeta,
},
{
- metricFamilyName: "target_info",
- v: 1,
- t: ts.AsTime().Add(defaultLookbackDelta + defaultLookbackDelta/4).UnixMilli(),
- ls: targetInfoLabels,
- meta: targetInfoMeta,
+ MF: "target_info",
+ V: 1,
+ T: ts.AsTime().Add(defaultLookbackDelta + defaultLookbackDelta/4).UnixMilli(),
+ L: targetInfoLabels,
+ M: targetInfoMeta,
},
- }, mockAppender.samples[len(mockAppender.samples)-4:])
+ }, got[len(got)-4:])
})
t.Run("target_info deduplication across multiple resources with same labels", func(t *testing.T) {
@@ -403,8 +445,9 @@ func TestFromMetrics(t *testing.T) {
generateAttributes(point2.Attributes(), "series", 1)
}
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
annots, err := converter.FromMetrics(
context.Background(),
request.Metrics(),
@@ -414,11 +457,11 @@ func TestFromMetrics(t *testing.T) {
)
require.NoError(t, err)
require.Empty(t, annots)
- require.NoError(t, mockAppender.Commit())
+ require.NoError(t, app.Commit())
- var targetInfoSamples []combinedSample
- for _, s := range mockAppender.samples {
- if s.ls.Get(labels.MetricName) == "target_info" {
+ var targetInfoSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "target_info" {
targetInfoSamples = append(targetInfoSamples, s)
}
}
@@ -439,36 +482,244 @@ func TestFromMetrics(t *testing.T) {
Type: model.MetricTypeGauge,
Help: "Target metadata",
}
- requireEqual(t, []combinedSample{
+ teststorage.RequireEqual(t, []sample{
{
- metricFamilyName: "target_info",
- v: 1,
- t: ts.AsTime().UnixMilli(),
- ls: targetInfoLabels,
- meta: targetInfoMeta,
+ MF: "target_info",
+ V: 1,
+ T: ts.AsTime().UnixMilli(),
+ L: targetInfoLabels,
+ M: targetInfoMeta,
},
{
- metricFamilyName: "target_info",
- v: 1,
- t: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(),
- ls: targetInfoLabels,
- meta: targetInfoMeta,
+ MF: "target_info",
+ V: 1,
+ T: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(),
+ L: targetInfoLabels,
+ M: targetInfoMeta,
},
}, targetInfoSamples)
})
+
+ t.Run("target_info should not include scope labels when PromoteScopeMetadata is enabled", func(t *testing.T) {
+ // Regression test: When PromoteScopeMetadata is enabled and a scope has a non-empty name,
+ // the cached scopeLabels should NOT be merged into target_info.
+ request := pmetricotlp.NewExportRequest()
+ rm := request.Metrics().ResourceMetrics().AppendEmpty()
+
+ // Set up resource attributes for job/instance labels.
+ rm.Resource().Attributes().PutStr("service.name", "test-service")
+ rm.Resource().Attributes().PutStr("service.instance.id", "instance-1")
+ generateAttributes(rm.Resource().Attributes(), "resource", 2)
+
+ // Create a scope with a non-empty name (this triggers scope label caching).
+ scopeMetrics := rm.ScopeMetrics().AppendEmpty()
+ scope := scopeMetrics.Scope()
+ scope.SetName("my-scope")
+ scope.SetVersion("1.0.0")
+ scope.Attributes().PutStr("scope-attr", "scope-value")
+
+ // Add a metric.
+ ts := pcommon.NewTimestampFromTime(time.Now())
+ m := scopeMetrics.Metrics().AppendEmpty()
+ m.SetEmptyGauge()
+ m.SetName("test_gauge")
+ m.SetDescription("test gauge")
+ point := m.Gauge().DataPoints().AppendEmpty()
+ point.SetTimestamp(ts)
+ point.SetDoubleValue(1.0)
+
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
+ annots, err := converter.FromMetrics(
+ context.Background(),
+ request.Metrics(),
+ Settings{
+ PromoteScopeMetadata: true,
+ LookbackDelta: defaultLookbackDelta,
+ },
+ )
+ require.NoError(t, err)
+ require.Empty(t, annots)
+ require.NoError(t, app.Commit())
+
+ // Find target_info samples.
+ var targetInfoSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "target_info" {
+ targetInfoSamples = append(targetInfoSamples, s)
+ }
+ }
+ require.NotEmpty(t, targetInfoSamples, "expected target_info samples")
+
+ // Verify target_info does NOT have scope labels.
+ for _, s := range targetInfoSamples {
+ require.Empty(t, s.L.Get("otel_scope_name"), "target_info should not have otel_scope_name")
+ require.Empty(t, s.L.Get("otel_scope_version"), "target_info should not have otel_scope_version")
+ require.Empty(t, s.L.Get("otel_scope_schema_url"), "target_info should not have otel_scope_schema_url")
+ require.Empty(t, s.L.Get("otel_scope_scope_attr"), "target_info should not have scope attributes")
+ }
+
+ // Verify the metric itself DOES have scope labels.
+ var metricSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "test_gauge" {
+ metricSamples = append(metricSamples, s)
+ }
+ }
+
+ require.NotEmpty(t, metricSamples, "expected metric samples")
+ require.Equal(t, "my-scope", metricSamples[0].L.Get("otel_scope_name"), "metric should have otel_scope_name")
+ require.Equal(t, "1.0.0", metricSamples[0].L.Get("otel_scope_version"), "metric should have otel_scope_version")
+ })
+
+ t.Run("target_info should include promoted resource attributes", func(t *testing.T) {
+ // Promoted resource attributes should appear on both metrics and target_info.
+ request := pmetricotlp.NewExportRequest()
+ rm := request.Metrics().ResourceMetrics().AppendEmpty()
+
+ // Set up resource attributes.
+ rm.Resource().Attributes().PutStr("service.name", "test-service")
+ rm.Resource().Attributes().PutStr("service.instance.id", "instance-1")
+ rm.Resource().Attributes().PutStr("custom.promoted.attr", "promoted-value")
+ rm.Resource().Attributes().PutStr("another.resource.attr", "another-value")
+
+ // Add a metric.
+ ts := pcommon.NewTimestampFromTime(time.Now())
+ scopeMetrics := rm.ScopeMetrics().AppendEmpty()
+ m := scopeMetrics.Metrics().AppendEmpty()
+ m.SetEmptyGauge()
+ m.SetName("test_gauge")
+ m.SetDescription("test gauge")
+ point := m.Gauge().DataPoints().AppendEmpty()
+ point.SetTimestamp(ts)
+ point.SetDoubleValue(1.0)
+
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
+ annots, err := converter.FromMetrics(
+ context.Background(),
+ request.Metrics(),
+ Settings{
+ PromoteResourceAttributes: NewPromoteResourceAttributes(config.OTLPConfig{
+ PromoteResourceAttributes: []string{"custom.promoted.attr"},
+ }),
+ LookbackDelta: defaultLookbackDelta,
+ },
+ )
+ require.NoError(t, err)
+ require.Empty(t, annots)
+ require.NoError(t, app.Commit())
+
+ // Find target_info samples.
+ var targetInfoSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "target_info" {
+ targetInfoSamples = append(targetInfoSamples, s)
+ }
+ }
+ require.NotEmpty(t, targetInfoSamples, "expected target_info samples")
+
+ // Verify target_info has the promoted resource attribute.
+ for _, s := range targetInfoSamples {
+ require.Equal(t, "promoted-value", s.L.Get("custom_promoted_attr"), "target_info should have promoted resource attributes")
+ require.Equal(t, "another-value", s.L.Get("another_resource_attr"), "target_info should have non-promoted resource attributes")
+ }
+
+ // Verify the metric also has the promoted resource attribute.
+ var metricSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "test_gauge" {
+ metricSamples = append(metricSamples, s)
+ }
+ }
+ require.NotEmpty(t, metricSamples, "expected metric samples")
+ require.Equal(t, "promoted-value", metricSamples[0].L.Get("custom_promoted_attr"), "metric should have promoted resource attribute")
+ })
+
+ t.Run("target_info should include promoted attributes when KeepIdentifyingResourceAttributes is enabled", func(t *testing.T) {
+ // When both PromoteResourceAttributes and KeepIdentifyingResourceAttributes are configured,
+ // target_info should include both the promoted attributes and the identifying attributes.
+ request := pmetricotlp.NewExportRequest()
+ rm := request.Metrics().ResourceMetrics().AppendEmpty()
+
+ rm.Resource().Attributes().PutStr("service.name", "test-service")
+ rm.Resource().Attributes().PutStr("service.namespace", "test-namespace")
+ rm.Resource().Attributes().PutStr("service.instance.id", "instance-1")
+ rm.Resource().Attributes().PutStr("custom.promoted.attr", "promoted-value")
+ rm.Resource().Attributes().PutStr("another.resource.attr", "another-value")
+
+ // Add a metric.
+ ts := pcommon.NewTimestampFromTime(time.Now())
+ scopeMetrics := rm.ScopeMetrics().AppendEmpty()
+ m := scopeMetrics.Metrics().AppendEmpty()
+ m.SetEmptyGauge()
+ m.SetName("test_gauge")
+ m.SetDescription("test gauge")
+ point := m.Gauge().DataPoints().AppendEmpty()
+ point.SetTimestamp(ts)
+ point.SetDoubleValue(1.0)
+
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
+ annots, err := converter.FromMetrics(
+ context.Background(),
+ request.Metrics(),
+ Settings{
+ PromoteResourceAttributes: NewPromoteResourceAttributes(config.OTLPConfig{
+ PromoteResourceAttributes: []string{"custom.promoted.attr"},
+ }),
+ KeepIdentifyingResourceAttributes: true,
+ LookbackDelta: defaultLookbackDelta,
+ },
+ )
+ require.NoError(t, err)
+ require.Empty(t, annots)
+ require.NoError(t, app.Commit())
+
+ var targetInfoSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "target_info" {
+ targetInfoSamples = append(targetInfoSamples, s)
+ }
+ }
+ require.NotEmpty(t, targetInfoSamples, "expected target_info samples")
+
+ // Verify target_info has the promoted resource attribute.
+ for _, s := range targetInfoSamples {
+ require.Equal(t, "promoted-value", s.L.Get("custom_promoted_attr"), "target_info should have promoted resource attributes")
+ // And it should have the identifying attributes (since KeepIdentifyingResourceAttributes is true).
+ require.Equal(t, "test-service", s.L.Get("service_name"), "target_info should have service.name when KeepIdentifyingResourceAttributes is true")
+ require.Equal(t, "test-namespace", s.L.Get("service_namespace"), "target_info should have service.namespace when KeepIdentifyingResourceAttributes is true")
+ require.Equal(t, "instance-1", s.L.Get("service_instance_id"), "target_info should have service.instance.id when KeepIdentifyingResourceAttributes is true")
+ // And the non-promoted resource attribute.
+ require.Equal(t, "another-value", s.L.Get("another_resource_attr"), "target_info should have non-promoted resource attributes")
+ }
+
+ // Verify the metric also has the promoted resource attribute.
+ var metricSamples []sample
+ for _, s := range appTest.ResultSamples() {
+ if s.L.Get(labels.MetricName) == "test_gauge" {
+ metricSamples = append(metricSamples, s)
+ }
+ }
+ require.NotEmpty(t, metricSamples, "expected metric samples")
+ require.Equal(t, "promoted-value", metricSamples[0].L.Get("custom_promoted_attr"), "metric should have promoted resource attribute")
+ })
}
func TestTemporality(t *testing.T) {
ts := time.Unix(100, 0)
tests := []struct {
- name string
- allowDelta bool
- convertToNHCB bool
- inputSeries []pmetric.Metric
- expectedSamples []combinedSample
- expectedHistograms []combinedHistogram
- expectedError string
+ name string
+ allowDelta bool
+ convertToNHCB bool
+ inputSeries []pmetric.Metric
+ expectedSamples []sample
+ expectedError string
}{
{
name: "all cumulative when delta not allowed",
@@ -477,7 +728,7 @@ func TestTemporality(t *testing.T) {
createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedSamples: []combinedSample{
+ expectedSamples: []sample{
createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter),
createPromFloatSeries("test_metric_2", ts, model.MetricTypeCounter),
},
@@ -489,7 +740,7 @@ func TestTemporality(t *testing.T) {
createOtelSum("test_metric_1", pmetric.AggregationTemporalityDelta, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityDelta, ts),
},
- expectedSamples: []combinedSample{
+ expectedSamples: []sample{
createPromFloatSeries("test_metric_1", ts, model.MetricTypeUnknown),
createPromFloatSeries("test_metric_2", ts, model.MetricTypeUnknown),
},
@@ -501,7 +752,7 @@ func TestTemporality(t *testing.T) {
createOtelSum("test_metric_1", pmetric.AggregationTemporalityDelta, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedSamples: []combinedSample{
+ expectedSamples: []sample{
createPromFloatSeries("test_metric_1", ts, model.MetricTypeUnknown),
createPromFloatSeries("test_metric_2", ts, model.MetricTypeCounter),
},
@@ -513,7 +764,7 @@ func TestTemporality(t *testing.T) {
createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityDelta, ts),
},
- expectedSamples: []combinedSample{
+ expectedSamples: []sample{
createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter),
},
expectedError: `invalid temporality and type combination for metric "test_metric_2"`,
@@ -525,7 +776,7 @@ func TestTemporality(t *testing.T) {
createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityUnspecified, ts),
},
- expectedSamples: []combinedSample{
+ expectedSamples: []sample{
createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter),
},
expectedError: `invalid temporality and type combination for metric "test_metric_2"`,
@@ -536,7 +787,7 @@ func TestTemporality(t *testing.T) {
inputSeries: []pmetric.Metric{
createOtelExponentialHistogram("test_histogram", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedHistograms: []combinedHistogram{
+ expectedSamples: []sample{
createPromNativeHistogramSeries("test_histogram", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram),
},
},
@@ -547,7 +798,7 @@ func TestTemporality(t *testing.T) {
createOtelExponentialHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExponentialHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedHistograms: []combinedHistogram{
+ expectedSamples: []sample{
createPromNativeHistogramSeries("test_histogram_1", histogram.GaugeType, ts, model.MetricTypeUnknown),
createPromNativeHistogramSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram),
},
@@ -559,7 +810,7 @@ func TestTemporality(t *testing.T) {
createOtelExponentialHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExponentialHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedHistograms: []combinedHistogram{
+ expectedSamples: []sample{
createPromNativeHistogramSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram),
},
expectedError: `invalid temporality and type combination for metric "test_histogram_1"`,
@@ -571,7 +822,7 @@ func TestTemporality(t *testing.T) {
inputSeries: []pmetric.Metric{
createOtelExplicitHistogram("test_histogram", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedHistograms: []combinedHistogram{
+ expectedSamples: []sample{
createPromNHCBSeries("test_histogram", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram),
},
},
@@ -583,7 +834,7 @@ func TestTemporality(t *testing.T) {
createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedHistograms: []combinedHistogram{
+ expectedSamples: []sample{
createPromNHCBSeries("test_histogram_1", histogram.GaugeType, ts, model.MetricTypeUnknown),
createPromNHCBSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram),
},
@@ -596,7 +847,7 @@ func TestTemporality(t *testing.T) {
createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
- expectedHistograms: []combinedHistogram{
+ expectedSamples: []sample{
createPromNHCBSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram),
},
expectedError: `invalid temporality and type combination for metric "test_histogram_1"`,
@@ -637,7 +888,7 @@ func TestTemporality(t *testing.T) {
inputSeries: []pmetric.Metric{
createOtelGauge("test_gauge_1", ts),
},
- expectedSamples: []combinedSample{
+ expectedSamples: []sample{
createPromFloatSeries("test_gauge_1", ts, model.MetricTypeGauge),
},
},
@@ -660,25 +911,22 @@ func TestTemporality(t *testing.T) {
s.CopyTo(sm.Metrics().AppendEmpty())
}
- mockAppender := &mockCombinedAppender{}
- c := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ c := NewPrometheusConverter(app)
settings := Settings{
AllowDeltaTemporality: tc.allowDelta,
ConvertHistogramsToNHCB: tc.convertToNHCB,
}
_, err := c.FromMetrics(context.Background(), metrics, settings)
-
if tc.expectedError != "" {
require.EqualError(t, err, tc.expectedError)
} else {
require.NoError(t, err)
}
- require.NoError(t, mockAppender.Commit())
-
- // Sort series to make the test deterministic.
- requireEqual(t, tc.expectedSamples, mockAppender.samples)
- requireEqual(t, tc.expectedHistograms, mockAppender.histograms)
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tc.expectedSamples, appTest.ResultSamples())
})
}
}
@@ -697,13 +945,13 @@ func createOtelSum(name string, temporality pmetric.AggregationTemporality, ts t
return m
}
-func createPromFloatSeries(name string, ts time.Time, typ model.MetricType) combinedSample {
- return combinedSample{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name, "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 5,
- meta: metadata.Metadata{
+func createPromFloatSeries(name string, ts time.Time, typ model.MetricType) sample {
+ return sample{
+ MF: name,
+ L: labels.FromStrings("__name__", name, "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 5,
+ M: metadata.Metadata{
Type: typ,
},
}
@@ -735,15 +983,15 @@ func createOtelExponentialHistogram(name string, temporality pmetric.Aggregation
return m
}
-func createPromNativeHistogramSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) combinedHistogram {
- return combinedHistogram{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name, "test_label", "test_value"),
- t: ts.UnixMilli(),
- meta: metadata.Metadata{
+func createPromNativeHistogramSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) sample {
+ return sample{
+ MF: name,
+ L: labels.FromStrings("__name__", name, "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ M: metadata.Metadata{
Type: typ,
},
- h: &histogram.Histogram{
+ H: &histogram.Histogram{
Count: 1,
Sum: 5,
Schema: 0,
@@ -770,15 +1018,15 @@ func createOtelExplicitHistogram(name string, temporality pmetric.AggregationTem
return m
}
-func createPromNHCBSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) combinedHistogram {
- return combinedHistogram{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name, "test_label", "test_value"),
- meta: metadata.Metadata{
+func createPromNHCBSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) sample {
+ return sample{
+ MF: name,
+ L: labels.FromStrings("__name__", name, "test_label", "test_value"),
+ M: metadata.Metadata{
Type: typ,
},
- t: ts.UnixMilli(),
- h: &histogram.Histogram{
+ T: ts.UnixMilli(),
+ H: &histogram.Histogram{
Count: 20,
Sum: 30,
Schema: -53,
@@ -795,50 +1043,50 @@ func createPromNHCBSeries(name string, hint histogram.CounterResetHint, ts time.
}
}
-func createPromClassicHistogramSeries(name string, ts time.Time, typ model.MetricType) []combinedSample {
- return []combinedSample{
+func createPromClassicHistogramSeries(name string, ts time.Time, typ model.MetricType) []sample {
+ return []sample{
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 30,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 30,
+ M: metadata.Metadata{
Type: typ,
},
},
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 20,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 20,
+ M: metadata.Metadata{
Type: typ,
},
},
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_bucket", "le", "1", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 10,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_bucket", "le", "1", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 10,
+ M: metadata.Metadata{
Type: typ,
},
},
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_bucket", "le", "2", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 20,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_bucket", "le", "2", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 20,
+ M: metadata.Metadata{
Type: typ,
},
},
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_bucket", "le", "+Inf", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 20,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_bucket", "le", "+Inf", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 20,
+ M: metadata.Metadata{
Type: typ,
},
},
@@ -861,32 +1109,32 @@ func createOtelSummary(name string, ts time.Time) pmetric.Metric {
return m
}
-func createPromSummarySeries(name string, ts time.Time) []combinedSample {
- return []combinedSample{
+func createPromSummarySeries(name string, ts time.Time) []sample {
+ return []sample{
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 18,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 18,
+ M: metadata.Metadata{
Type: model.MetricTypeSummary,
},
},
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 9,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 9,
+ M: metadata.Metadata{
Type: model.MetricTypeSummary,
},
},
{
- metricFamilyName: name,
- ls: labels.FromStrings("__name__", name, "quantile", "0.5", "test_label", "test_value"),
- t: ts.UnixMilli(),
- v: 2,
- meta: metadata.Metadata{
+ MF: name,
+ L: labels.FromStrings("__name__", name, "quantile", "0.5", "test_label", "test_value"),
+ T: ts.UnixMilli(),
+ V: 2,
+ M: metadata.Metadata{
Type: model.MetricTypeSummary,
},
},
@@ -1033,54 +1281,57 @@ func createOTelEmptyMetricForTranslator(name string) pmetric.Metric {
return m
}
+// Recommended CLI invocation(s):
+/*
+ export bench=fromMetrics && go test ./storage/remote/otlptranslator/prometheusremotewrite/... \
+ -run '^$' -bench '^BenchmarkPrometheusConverter_FromMetrics' \
+ -benchtime 1s -count 6 -cpu 2 -timeout 999m -benchmem \
+ | tee ${bench}.txt
+*/
func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) {
for _, resourceAttributeCount := range []int{0, 5, 50} {
b.Run(fmt.Sprintf("resource attribute count: %v", resourceAttributeCount), func(b *testing.B) {
- for _, histogramCount := range []int{0, 1000} {
- b.Run(fmt.Sprintf("histogram count: %v", histogramCount), func(b *testing.B) {
- nonHistogramCounts := []int{0, 1000}
+ for _, metricCount := range []struct {
+ histogramCount int
+ nonHistogramCount int
+ }{
+ {histogramCount: 0, nonHistogramCount: 1000},
+ {histogramCount: 1000, nonHistogramCount: 0},
+ {histogramCount: 1000, nonHistogramCount: 1000},
+ } {
+ b.Run(fmt.Sprintf("histogram count: %v/non-histogram count: %v", metricCount.histogramCount, metricCount.nonHistogramCount), func(b *testing.B) {
+ for _, labelsPerMetric := range []int{2, 20} {
+ b.Run(fmt.Sprintf("labels per metric: %v", labelsPerMetric), func(b *testing.B) {
+ for _, exemplarsPerSeries := range []int{0, 5, 10} {
+ b.Run(fmt.Sprintf("exemplars per series: %v", exemplarsPerSeries), func(b *testing.B) {
+ settings := Settings{}
+ payload, _ := createExportRequest(
+ resourceAttributeCount,
+ metricCount.histogramCount,
+ metricCount.nonHistogramCount,
+ labelsPerMetric,
+ exemplarsPerSeries,
+ settings,
+ pmetric.AggregationTemporalityCumulative,
+ )
- if resourceAttributeCount == 0 && histogramCount == 0 {
- // Don't bother running a scenario where we'll generate no series.
- nonHistogramCounts = []int{1000}
- }
+ b.ResetTimer()
+ for b.Loop() {
+ app := &noOpAppender{}
+ converter := NewPrometheusConverter(app)
+ annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
+ require.NoError(b, err)
+ require.Empty(b, annots)
- for _, nonHistogramCount := range nonHistogramCounts {
- b.Run(fmt.Sprintf("non-histogram count: %v", nonHistogramCount), func(b *testing.B) {
- for _, labelsPerMetric := range []int{2, 20} {
- b.Run(fmt.Sprintf("labels per metric: %v", labelsPerMetric), func(b *testing.B) {
- for _, exemplarsPerSeries := range []int{0, 5, 10} {
- b.Run(fmt.Sprintf("exemplars per series: %v", exemplarsPerSeries), func(b *testing.B) {
- settings := Settings{}
- payload, _ := createExportRequest(
- resourceAttributeCount,
- histogramCount,
- nonHistogramCount,
- labelsPerMetric,
- exemplarsPerSeries,
- settings,
- pmetric.AggregationTemporalityCumulative,
- )
- appMetrics := NewCombinedAppenderMetrics(prometheus.NewRegistry())
- noOpLogger := promslog.NewNopLogger()
- b.ResetTimer()
-
- for b.Loop() {
- app := &noOpAppender{}
- mockAppender := NewCombinedAppender(app, noOpLogger, false, false, appMetrics)
- converter := NewPrometheusConverter(mockAppender)
- annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
- require.NoError(b, err)
- require.Empty(b, annots)
- if histogramCount+nonHistogramCount > 0 {
- require.Positive(b, app.samples+app.histograms)
- require.Positive(b, app.metadata)
- } else {
- require.Zero(b, app.samples+app.histograms)
- require.Zero(b, app.metadata)
- }
- }
- })
+ // TODO(bwplotka): This should be tested somewhere else, otherwise we benchmark
+ // mock too.
+ if metricCount.histogramCount+metricCount.nonHistogramCount > 0 {
+ require.Positive(b, app.samples+app.histograms)
+ require.Positive(b, app.metadata)
+ } else {
+ require.Zero(b, app.samples+app.histograms)
+ require.Zero(b, app.metadata)
+ }
}
})
}
@@ -1098,35 +1349,20 @@ type noOpAppender struct {
metadata int
}
-var _ storage.Appender = &noOpAppender{}
+var _ storage.AppenderV2 = &noOpAppender{}
-func (a *noOpAppender) Append(_ storage.SeriesRef, _ labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) {
+func (a *noOpAppender) Append(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ float64, h *histogram.Histogram, _ *histogram.FloatHistogram, opts storage.AOptions) (_ storage.SeriesRef, err error) {
+ if !opts.Metadata.IsEmpty() {
+ a.metadata++
+ }
+ if h != nil {
+ a.histograms++
+ return 1, nil
+ }
a.samples++
return 1, nil
}
-func (*noOpAppender) AppendSTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) {
- return 1, nil
-}
-
-func (a *noOpAppender) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
- a.histograms++
- return 1, nil
-}
-
-func (*noOpAppender) AppendHistogramSTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
- return 1, nil
-}
-
-func (a *noOpAppender) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
- a.metadata++
- return 1, nil
-}
-
-func (*noOpAppender) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) {
- return 1, nil
-}
-
func (*noOpAppender) Commit() error {
return nil
}
@@ -1135,10 +1371,6 @@ func (*noOpAppender) Rollback() error {
return nil
}
-func (*noOpAppender) SetOptions(_ *storage.AppendOptions) {
- panic("not implemented")
-}
-
type wantPrometheusMetric struct {
name string
familyName string
@@ -1323,3 +1555,264 @@ func generateExemplars(exemplars pmetric.ExemplarSlice, count int, ts pcommon.Ti
e.SetTraceID(pcommon.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f})
}
}
+
+// createMultiScopeExportRequest creates an export request with multiple scopes per resource.
+// This is useful for benchmarking resource-level label caching, where cached resource labels
+// (job, instance, promoted attributes) should be computed once and reused across all scopes.
+func createMultiScopeExportRequest(
+ resourceAttributeCount int,
+ scopeCount int,
+ metricsPerScope int,
+ labelsPerMetric int,
+ scopeAttributeCount int,
+) pmetricotlp.ExportRequest {
+ request := pmetricotlp.NewExportRequest()
+ ts := pcommon.NewTimestampFromTime(time.Now())
+
+ rm := request.Metrics().ResourceMetrics().AppendEmpty()
+ generateAttributes(rm.Resource().Attributes(), "resource", resourceAttributeCount)
+
+ // Set service attributes for job/instance label generation
+ rm.Resource().Attributes().PutStr("service.name", "test-service")
+ rm.Resource().Attributes().PutStr("service.namespace", "test-namespace")
+ rm.Resource().Attributes().PutStr("service.instance.id", "instance-1")
+
+ for s := range scopeCount {
+ scopeMetrics := rm.ScopeMetrics().AppendEmpty()
+ scope := scopeMetrics.Scope()
+ scope.SetName(fmt.Sprintf("scope-%d", s))
+ scope.SetVersion("1.0.0")
+ generateAttributes(scope.Attributes(), "scope", scopeAttributeCount)
+
+ metrics := scopeMetrics.Metrics()
+ for m := range metricsPerScope {
+ metric := metrics.AppendEmpty()
+ metric.SetName(fmt.Sprintf("gauge_s%d_m%d", s, m))
+ metric.SetDescription("gauge metric")
+ metric.SetUnit("unit")
+ point := metric.SetEmptyGauge().DataPoints().AppendEmpty()
+ point.SetTimestamp(ts)
+ point.SetDoubleValue(float64(m))
+ generateAttributes(point.Attributes(), "series", labelsPerMetric)
+ }
+ }
+
+ return request
+}
+
+// createRepeatedLabelsExportRequest creates an export request where the same label names
+// appear repeatedly across many datapoints. This is useful for benchmarking the label
+// sanitization cache, which should reduce allocations when the same label names are seen multiple times.
+func createRepeatedLabelsExportRequest(
+ uniqueLabelNames int,
+ datapointCount int,
+ labelsPerDatapoint int,
+) pmetricotlp.ExportRequest {
+ request := pmetricotlp.NewExportRequest()
+ ts := pcommon.NewTimestampFromTime(time.Now())
+
+ rm := request.Metrics().ResourceMetrics().AppendEmpty()
+ rm.Resource().Attributes().PutStr("service.name", "test-service")
+ rm.Resource().Attributes().PutStr("service.instance.id", "instance-1")
+
+ metrics := rm.ScopeMetrics().AppendEmpty().Metrics()
+
+ // Pre-generate label names that will be reused.
+ labelNames := make([]string, uniqueLabelNames)
+ for i := range uniqueLabelNames {
+ labelNames[i] = fmt.Sprintf("label.name.%d", i)
+ }
+
+ for d := range datapointCount {
+ metric := metrics.AppendEmpty()
+ metric.SetName(fmt.Sprintf("gauge_%d", d))
+ metric.SetDescription("gauge metric")
+ metric.SetUnit("unit")
+ point := metric.SetEmptyGauge().DataPoints().AppendEmpty()
+ point.SetTimestamp(ts)
+ point.SetDoubleValue(float64(d))
+
+ // Add labels using the same label names (cycling through them).
+ for l := range labelsPerDatapoint {
+ labelName := labelNames[l%uniqueLabelNames]
+ point.Attributes().PutStr(labelName, fmt.Sprintf("value-%d-%d", d, l))
+ }
+ }
+
+ return request
+}
+
+// createMultiResourceExportRequest creates an export request with multiple ResourceMetrics.
+// This is useful for benchmarking the overhead of cache clearing between resources and
+// verifying that caching still helps within each resource.
+func createMultiResourceExportRequest(
+ resourceCount int,
+ resourceAttributeCount int,
+ metricsPerResource int,
+ labelsPerMetric int,
+) pmetricotlp.ExportRequest {
+ request := pmetricotlp.NewExportRequest()
+ ts := pcommon.NewTimestampFromTime(time.Now())
+
+ for r := range resourceCount {
+ rm := request.Metrics().ResourceMetrics().AppendEmpty()
+ generateAttributes(rm.Resource().Attributes(), "resource", resourceAttributeCount)
+
+ // Set unique service attributes per resource for job/instance label generation.
+ rm.Resource().Attributes().PutStr("service.name", fmt.Sprintf("service-%d", r))
+ rm.Resource().Attributes().PutStr("service.namespace", "test-namespace")
+ rm.Resource().Attributes().PutStr("service.instance.id", fmt.Sprintf("instance-%d", r))
+
+ metrics := rm.ScopeMetrics().AppendEmpty().Metrics()
+ for m := range metricsPerResource {
+ metric := metrics.AppendEmpty()
+ metric.SetName(fmt.Sprintf("gauge_r%d_m%d", r, m))
+ metric.SetDescription("gauge metric")
+ metric.SetUnit("unit")
+ point := metric.SetEmptyGauge().DataPoints().AppendEmpty()
+ point.SetTimestamp(ts)
+ point.SetDoubleValue(float64(m))
+ generateAttributes(point.Attributes(), "series", labelsPerMetric)
+ }
+ }
+
+ return request
+}
+
+// BenchmarkFromMetrics_LabelCaching_MultipleDatapointsPerResource benchmarks the resource-level
+// label caching optimization. With caching, resource labels (job, instance, promoted
+// attributes) should be computed once per ResourceMetrics and reused for all datapoints.
+func BenchmarkFromMetrics_LabelCaching_MultipleDatapointsPerResource(b *testing.B) {
+ const (
+ labelsPerMetric = 5
+ scopeAttributeCount = 3
+ )
+ for _, resourceAttrs := range []int{5, 50} {
+ for _, scopeCount := range []int{1, 10} {
+ for _, metricsPerScope := range []int{10, 100} {
+ b.Run(fmt.Sprintf("res_attrs=%d/scopes=%d/metrics=%d", resourceAttrs, scopeCount, metricsPerScope), func(b *testing.B) {
+ settings := Settings{
+ PromoteResourceAttributes: NewPromoteResourceAttributes(config.OTLPConfig{
+ PromoteAllResourceAttributes: true,
+ }),
+ }
+ payload := createMultiScopeExportRequest(
+ resourceAttrs,
+ scopeCount,
+ metricsPerScope,
+ labelsPerMetric,
+ scopeAttributeCount,
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for b.Loop() {
+ app := &noOpAppender{}
+ converter := NewPrometheusConverter(app)
+ _, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
+ require.NoError(b, err)
+ }
+ })
+ }
+ }
+ }
+}
+
+// BenchmarkFromMetrics_LabelCaching_RepeatedLabelNames benchmarks the label sanitization cache.
+// When the same label names appear across many datapoints, the sanitization should
+// only happen once per unique label name within a ResourceMetrics.
+func BenchmarkFromMetrics_LabelCaching_RepeatedLabelNames(b *testing.B) {
+ const labelsPerDatapoint = 20
+ for _, uniqueLabels := range []int{5, 50} {
+ for _, datapoints := range []int{100, 1000} {
+ b.Run(fmt.Sprintf("unique_labels=%d/datapoints=%d", uniqueLabels, datapoints), func(b *testing.B) {
+ settings := Settings{}
+ payload := createRepeatedLabelsExportRequest(
+ uniqueLabels,
+ datapoints,
+ labelsPerDatapoint,
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for b.Loop() {
+ app := &noOpAppender{}
+ converter := NewPrometheusConverter(app)
+ _, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
+ require.NoError(b, err)
+ }
+ })
+ }
+ }
+}
+
+// BenchmarkFromMetrics_LabelCaching_ScopeMetadata benchmarks scope-level label caching when
+// PromoteScopeMetadata is enabled. Scope metadata labels (otel_scope_name, version, etc.)
+// should be computed once per ScopeMetrics and reused for all metrics within that scope.
+func BenchmarkFromMetrics_LabelCaching_ScopeMetadata(b *testing.B) {
+ const (
+ resourceAttributeCount = 5
+ labelsPerMetric = 5
+ )
+ for _, scopeAttrs := range []int{0, 10} {
+ for _, metricsPerScope := range []int{10, 100} {
+ b.Run(fmt.Sprintf("scope_attrs=%d/metrics=%d", scopeAttrs, metricsPerScope), func(b *testing.B) {
+ settings := Settings{
+ PromoteScopeMetadata: true,
+ }
+ payload := createMultiScopeExportRequest(
+ resourceAttributeCount,
+ 1, // single scope to isolate scope caching benefit
+ metricsPerScope,
+ labelsPerMetric,
+ scopeAttrs,
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for b.Loop() {
+ app := &noOpAppender{}
+ converter := NewPrometheusConverter(app)
+ _, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
+ require.NoError(b, err)
+ }
+ })
+ }
+ }
+}
+
+// BenchmarkFromMetrics_LabelCaching_MultipleResources benchmarks requests with multiple
+// ResourceMetrics. The label sanitization cache is cleared between resources, so this
+// measures the overhead of cache clearing and verifies caching helps within each resource.
+func BenchmarkFromMetrics_LabelCaching_MultipleResources(b *testing.B) {
+ const (
+ resourceAttributeCount = 10
+ labelsPerMetric = 10
+ )
+ for _, resourceCount := range []int{1, 10, 50} {
+ for _, metricsPerResource := range []int{10, 100} {
+ b.Run(fmt.Sprintf("resources=%d/metrics=%d", resourceCount, metricsPerResource), func(b *testing.B) {
+ settings := Settings{
+ PromoteResourceAttributes: NewPromoteResourceAttributes(config.OTLPConfig{
+ PromoteAllResourceAttributes: true,
+ }),
+ }
+ payload := createMultiResourceExportRequest(
+ resourceCount,
+ resourceAttributeCount,
+ metricsPerResource,
+ labelsPerMetric,
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for b.Loop() {
+ app := &noOpAppender{}
+ converter := NewPrometheusConverter(app)
+ _, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
+ require.NoError(b, err)
+ }
+ })
+ }
+ }
+}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
index e3814ce095..3c74ec9382 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
@@ -21,14 +21,17 @@ import (
"math"
"github.com/prometheus/common/model"
- "go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"github.com/prometheus/prometheus/model/value"
+ "github.com/prometheus/prometheus/storage"
)
-func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice,
- resource pcommon.Resource, settings Settings, scope scope, meta Metadata,
+func (c *PrometheusConverter) addGaugeNumberDataPoints(
+ ctx context.Context,
+ dataPoints pmetric.NumberDataPointSlice,
+ settings Settings,
+ appOpts storage.AOptions,
) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
@@ -37,15 +40,13 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data
pt := dataPoints.At(x)
labels, err := c.createAttributes(
- resource,
pt.Attributes(),
- scope,
settings,
- nil,
+ reservedLabelNames,
true,
- meta,
+ appOpts.Metadata,
model.MetricNameLabel,
- meta.MetricFamilyName,
+ appOpts.MetricFamilyName,
)
if err != nil {
return err
@@ -62,7 +63,7 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data
}
ts := convertTimeStamp(pt.Timestamp())
st := convertTimeStamp(pt.StartTimestamp())
- if err := c.appender.AppendSample(labels, meta, st, ts, val, nil); err != nil {
+ if _, err = c.appender.Append(0, labels, st, ts, val, nil, nil, appOpts); err != nil {
return err
}
}
@@ -70,8 +71,11 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data
return nil
}
-func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice,
- resource pcommon.Resource, settings Settings, scope scope, meta Metadata,
+func (c *PrometheusConverter) addSumNumberDataPoints(
+ ctx context.Context,
+ dataPoints pmetric.NumberDataPointSlice,
+ settings Settings,
+ appOpts storage.AOptions,
) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
@@ -80,18 +84,16 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo
pt := dataPoints.At(x)
lbls, err := c.createAttributes(
- resource,
pt.Attributes(),
- scope,
settings,
- nil,
+ reservedLabelNames,
true,
- meta,
+ appOpts.Metadata,
model.MetricNameLabel,
- meta.MetricFamilyName,
+ appOpts.MetricFamilyName,
)
if err != nil {
- return nil
+ return err
}
var val float64
switch pt.ValueType() {
@@ -109,7 +111,9 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo
if err != nil {
return err
}
- if err := c.appender.AppendSample(lbls, meta, st, ts, val, exemplars); err != nil {
+
+ appOpts.Exemplars = exemplars
+ if _, err = c.appender.Append(0, lbls, st, ts, val, nil, nil, appOpts); err != nil {
return err
}
}
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
index 77bc212c76..66e7e4c3bb 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
@@ -29,6 +29,8 @@ import (
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/teststorage"
)
func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
@@ -49,7 +51,7 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
metric func() pmetric.Metric
scope scope
promoteScope bool
- want func() []combinedSample
+ want func() []sample
}{
{
name: "gauge without scope promotion",
@@ -62,17 +64,17 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(pcommon.Timestamp(ts)),
- v: 1,
+ MF: "test",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(pcommon.Timestamp(ts)),
+ V: 1,
},
}
},
@@ -88,7 +90,7 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: true,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test",
"otel_scope_name", defaultScope.name,
@@ -97,13 +99,13 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
"otel_scope_attr1", "value1",
"otel_scope_attr2", "value2",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(pcommon.Timestamp(ts)),
- v: 1,
+ MF: "test",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(pcommon.Timestamp(ts)),
+ V: 1,
},
}
},
@@ -112,24 +114,28 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
metric := tt.metric()
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
+ settings := Settings{
+ PromoteScopeMetadata: tt.promoteScope,
+ }
+ resource := pcommon.NewResource()
+
+ // Initialize resource and scope context as FromMetrics would.
+ require.NoError(t, converter.setResourceContext(resource, settings))
+ require.NoError(t, converter.setScopeContext(tt.scope, settings))
converter.addGaugeNumberDataPoints(
context.Background(),
metric.Gauge().DataPoints(),
- pcommon.NewResource(),
- Settings{
- PromoteScopeMetadata: tt.promoteScope,
- },
- tt.scope,
- Metadata{
+ settings,
+ storage.AOptions{
MetricFamilyName: metric.Name(),
},
)
- require.NoError(t, mockAppender.Commit())
-
- requireEqual(t, tt.want(), mockAppender.samples)
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tt.want(), appTest.ResultSamples())
})
}
}
@@ -152,7 +158,7 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
metric func() pmetric.Metric
scope scope
promoteScope bool
- want func() []combinedSample
+ want func() []sample
}{
{
name: "sum without scope promotion",
@@ -166,17 +172,17 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(ts),
- v: 1,
+ MF: "test",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(ts),
+ V: 1,
},
}
},
@@ -193,7 +199,7 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: true,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test",
"otel_scope_name", defaultScope.name,
@@ -202,13 +208,13 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
"otel_scope_attr1", "value1",
"otel_scope_attr2", "value2",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(ts),
- v: 1,
+ MF: "test",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(ts),
+ V: 1,
},
}
},
@@ -227,18 +233,18 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(ts),
- v: 1,
- es: []exemplar.Exemplar{
+ MF: "test",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(ts),
+ V: 1,
+ ES: []exemplar.Exemplar{
{Value: 2},
},
},
@@ -262,18 +268,18 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_sum",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test_sum",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(ts),
- st: convertTimeStamp(ts),
- v: 1,
+ MF: "test_sum",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(ts),
+ ST: convertTimeStamp(ts),
+ V: 1,
},
}
},
@@ -293,17 +299,17 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_sum",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test_sum",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(ts),
- v: 0,
+ MF: "test_sum",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -323,17 +329,17 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
},
scope: defaultScope,
promoteScope: false,
- want: func() []combinedSample {
+ want: func() []sample {
lbls := labels.FromStrings(
model.MetricNameLabel, "test_sum",
)
- return []combinedSample{
+ return []sample{
{
- metricFamilyName: "test_sum",
- ls: lbls,
- meta: metadata.Metadata{},
- t: convertTimeStamp(ts),
- v: 0,
+ MF: "test_sum",
+ L: lbls,
+ M: metadata.Metadata{},
+ T: convertTimeStamp(ts),
+ V: 0,
},
}
},
@@ -342,24 +348,28 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
metric := tt.metric()
- mockAppender := &mockCombinedAppender{}
- converter := NewPrometheusConverter(mockAppender)
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
+ settings := Settings{
+ PromoteScopeMetadata: tt.promoteScope,
+ }
+ resource := pcommon.NewResource()
+
+ // Initialize resource and scope context as FromMetrics would.
+ require.NoError(t, converter.setResourceContext(resource, settings))
+ require.NoError(t, converter.setScopeContext(tt.scope, settings))
converter.addSumNumberDataPoints(
context.Background(),
metric.Sum().DataPoints(),
- pcommon.NewResource(),
- Settings{
- PromoteScopeMetadata: tt.promoteScope,
- },
- tt.scope,
- Metadata{
+ settings,
+ storage.AOptions{
MetricFamilyName: metric.Name(),
},
)
- require.NoError(t, mockAppender.Commit())
-
- requireEqual(t, tt.want(), mockAppender.samples)
+ require.NoError(t, app.Commit())
+ teststorage.RequireEqual(t, tt.want(), appTest.ResultSamples())
})
}
}
diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go
index 2b26179e58..63cdfb36f4 100644
--- a/storage/remote/queue_manager.go
+++ b/storage/remote/queue_manager.go
@@ -19,6 +19,7 @@ import (
"fmt"
"log/slog"
"math"
+ "slices"
"strconv"
"sync"
"time"
@@ -2105,12 +2106,11 @@ func setAtomicToNewer(value *atomic.Int64, newValue int64) (previous int64, upda
func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeries) bool) ([]prompb.TimeSeries, *timeSeriesStats) {
stats := newTimeSeriesStats()
- keepIdx := 0
- for i, ts := range timeSeries {
+ timeSeries = slices.DeleteFunc(timeSeries, func(ts prompb.TimeSeries) bool {
if filter != nil && filter(ts) {
stats.recordDropped(len(ts.Samples) > 0, len(ts.Exemplars) > 0, len(ts.Histograms) > 0)
- continue
+ return true
}
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
@@ -2123,16 +2123,10 @@ func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeri
if len(ts.Histograms) > 0 {
stats.updateTimestamp(ts.Histograms[0].Timestamp)
}
+ return false
+ })
- if i != keepIdx {
- // We have to swap the kept timeseries with the one which should be dropped.
- // Copying any elements within timeSeries could cause data corruptions when reusing the slice in a next batch (shards.populateTimeSeries).
- timeSeries[keepIdx], timeSeries[i] = timeSeries[i], timeSeries[keepIdx]
- }
- keepIdx++
- }
-
- return timeSeries[:keepIdx], stats
+ return timeSeries, stats
}
func buildWriteRequest(logger *slog.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, filter func(prompb.TimeSeries) bool, buf compression.EncodeBuffer, compr compression.Type) (_ []byte, highest, lowest int64, _ error) {
diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go
index f1462b4406..a4b05d387a 100644
--- a/storage/remote/queue_manager_test.go
+++ b/storage/remote/queue_manager_test.go
@@ -871,7 +871,7 @@ func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([
return samples, series
}
-func createProtoTimeseriesWithOld(numSamples, baseTs int64, _ ...labels.Label) []prompb.TimeSeries {
+func createProtoTimeseriesWithOld(numSamples, baseTs int64) []prompb.TimeSeries {
samples := make([]prompb.TimeSeries, numSamples)
// use a fixed rand source so tests are consistent
r := rand.New(rand.NewSource(99))
@@ -2365,8 +2365,14 @@ func BenchmarkBuildTimeSeries(b *testing.B) {
// Send one sample per series, which is the typical remote_write case
const numSamples = 10000
filter := func(ts prompb.TimeSeries) bool { return filterTsLimit(99, ts) }
+ originalSamples := createProtoTimeseriesWithOld(numSamples, 100)
+
+ b.ReportAllocs()
for b.Loop() {
- samples := createProtoTimeseriesWithOld(numSamples, 100, extraLabels...)
+ b.StopTimer()
+ samples := make([]prompb.TimeSeries, len(originalSamples))
+ copy(samples, originalSamples)
+ b.StartTimer()
result, _ := buildTimeSeries(samples, filter)
require.NotNil(b, result)
}
diff --git a/storage/remote/storage.go b/storage/remote/storage.go
index f482597249..be75d23383 100644
--- a/storage/remote/storage.go
+++ b/storage/remote/storage.go
@@ -63,6 +63,8 @@ type Storage struct {
localStartTimeCallback startTimeCallback
}
+var _ storage.Storage = &Storage{}
+
// NewStorage returns a remote.Storage.
func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, enableTypeAndUnitLabels bool) *Storage {
if l == nil {
@@ -193,6 +195,11 @@ func (s *Storage) Appender(ctx context.Context) storage.Appender {
return s.rws.Appender(ctx)
}
+// AppenderV2 implements storage.Storage.
+func (s *Storage) AppenderV2(ctx context.Context) storage.AppenderV2 {
+ return s.rws.AppenderV2(ctx)
+}
+
// LowestSentTimestamp returns the lowest sent timestamp across all queues.
func (s *Storage) LowestSentTimestamp() int64 {
return s.rws.LowestSentTimestamp()
diff --git a/storage/remote/write.go b/storage/remote/write.go
index 92f447d624..6a336dc06b 100644
--- a/storage/remote/write.go
+++ b/storage/remote/write.go
@@ -238,8 +238,20 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
// Appender implements storage.Storage.
func (rws *WriteStorage) Appender(context.Context) storage.Appender {
return ×tampTracker{
- writeStorage: rws,
- highestRecvTimestamp: rws.highestTimestamp,
+ baseTimestampTracker: baseTimestampTracker{
+ writeStorage: rws,
+ highestRecvTimestamp: rws.highestTimestamp,
+ },
+ }
+}
+
+// AppenderV2 implements storage.Storage.
+func (rws *WriteStorage) AppenderV2(context.Context) storage.AppenderV2 {
+ return ×tampTrackerV2{
+ baseTimestampTracker: baseTimestampTracker{
+ writeStorage: rws,
+ highestRecvTimestamp: rws.highestTimestamp,
+ },
}
}
@@ -282,9 +294,9 @@ func (rws *WriteStorage) Close() error {
return nil
}
-type timestampTracker struct {
- writeStorage *WriteStorage
- appendOptions *storage.AppendOptions
+type baseTimestampTracker struct {
+ writeStorage *WriteStorage
+
samples int64
exemplars int64
histograms int64
@@ -292,6 +304,12 @@ type timestampTracker struct {
highestRecvTimestamp *maxTimestamp
}
+type timestampTracker struct {
+ baseTimestampTracker
+
+ appendOptions *storage.AppendOptions
+}
+
func (t *timestampTracker) SetOptions(opts *storage.AppendOptions) {
t.appendOptions = opts
}
@@ -345,7 +363,7 @@ func (*timestampTracker) UpdateMetadata(storage.SeriesRef, labels.Labels, metada
}
// Commit implements storage.Appender.
-func (t *timestampTracker) Commit() error {
+func (t *baseTimestampTracker) Commit() error {
t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms)
samplesIn.Add(float64(t.samples))
@@ -356,6 +374,25 @@ func (t *timestampTracker) Commit() error {
}
// Rollback implements storage.Appender.
-func (*timestampTracker) Rollback() error {
+func (*baseTimestampTracker) Rollback() error {
return nil
}
+
+type timestampTrackerV2 struct {
+ baseTimestampTracker
+}
+
+// Append implements storage.AppenderV2.
+func (t *timestampTrackerV2) Append(ref storage.SeriesRef, _ labels.Labels, _, ts int64, _ float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
+ switch {
+ case fh != nil, h != nil:
+ t.histograms++
+ default:
+ t.samples++
+ }
+ if ts > t.highestTimestamp {
+ t.highestTimestamp = ts
+ }
+ t.exemplars += int64(len(opts.Exemplars))
+ return ref, nil
+}
diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go
index c29896b843..9fdd750692 100644
--- a/storage/remote/write_handler.go
+++ b/storage/remote/write_handler.go
@@ -23,18 +23,11 @@ import (
"time"
"github.com/gogo/protobuf/proto"
- deltatocumulative "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor"
remoteapi "github.com/prometheus/client_golang/exp/api/remote"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
- "go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/collector/consumer"
- "go.opentelemetry.io/collector/pdata/pmetric"
- "go.opentelemetry.io/collector/processor"
- "go.opentelemetry.io/otel/metric/noop"
- "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
@@ -43,7 +36,6 @@ import (
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/schema"
"github.com/prometheus/prometheus/storage"
- otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
)
type writeHandler struct {
@@ -233,7 +225,8 @@ func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample,
if err != nil {
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
- errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
+ errors.Is(err, storage.ErrDuplicateSampleForTimestamp) ||
+ errors.Is(err, storage.ErrTooOldSample) {
h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
}
return err
@@ -255,7 +248,8 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist
// a note indicating its inclusion in the future.
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
- errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
+ errors.Is(err, storage.ErrDuplicateSampleForTimestamp) ||
+ errors.Is(err, storage.ErrTooOldSample) {
h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
}
return err
@@ -417,7 +411,8 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
// a note indicating its inclusion in the future.
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
- errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
+ errors.Is(err, storage.ErrDuplicateSampleForTimestamp) ||
+ errors.Is(err, storage.ErrTooOldSample) {
// TODO(bwplotka): Not too spammy log?
h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp)
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
@@ -491,197 +486,8 @@ func (*writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage
return ref, err
}
-type OTLPOptions struct {
- // Convert delta samples to their cumulative equivalent by aggregating in-memory
- ConvertDelta bool
- // Store the raw delta samples as metrics with unknown type (we don't have a proper type for delta yet, therefore
- // marking the metric type as unknown for now).
- // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
- NativeDelta bool
- // LookbackDelta is the query lookback delta.
- // Used to calculate the target_info sample timestamp interval.
- LookbackDelta time.Duration
- // Add type and unit labels to the metrics.
- EnableTypeAndUnitLabels bool
- // IngestSTZeroSample enables writing zero samples based on the start time
- // of metrics.
- IngestSTZeroSample bool
- // AppendMetadata enables writing metadata to WAL when metadata-wal-records feature is enabled.
- AppendMetadata bool
-}
-
-// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
-// writes them to the provided appendable.
-func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler {
- if opts.NativeDelta && opts.ConvertDelta {
- // This should be validated when iterating through feature flags, so not expected to fail here.
- panic("cannot enable native delta ingestion and delta2cumulative conversion at the same time")
- }
-
- ex := &rwExporter{
- logger: logger,
- appendable: appendable,
- config: configFunc,
- allowDeltaTemporality: opts.NativeDelta,
- lookbackDelta: opts.LookbackDelta,
- ingestSTZeroSample: opts.IngestSTZeroSample,
- enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels,
- appendMetadata: opts.AppendMetadata,
- // Register metrics.
- metrics: otlptranslator.NewCombinedAppenderMetrics(reg),
- }
-
- wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex}
-
- if opts.ConvertDelta {
- fac := deltatocumulative.NewFactory()
- set := processor.Settings{
- ID: component.NewID(fac.Type()),
- TelemetrySettings: component.TelemetrySettings{MeterProvider: noop.NewMeterProvider()},
- }
- d2c, err := fac.CreateMetrics(context.Background(), set, fac.CreateDefaultConfig(), wh.defaultConsumer)
- if err != nil {
- // fac.CreateMetrics directly calls [deltatocumulativeprocessor.createMetricsProcessor],
- // which only errors if:
- // - cfg.(type) != *Config
- // - telemetry.New fails due to bad set.TelemetrySettings
- //
- // both cannot be the case, as we pass a valid *Config and valid TelemetrySettings.
- // as such, we assume this error to never occur.
- // if it is, our assumptions are broken in which case a panic seems acceptable.
- panic(fmt.Errorf("failed to create metrics processor: %w", err))
- }
- if err := d2c.Start(context.Background(), nil); err != nil {
- // deltatocumulative does not error on start. see above for panic reasoning
- panic(err)
- }
- wh.d2cConsumer = d2c
- }
-
- return wh
-}
-
-type rwExporter struct {
- logger *slog.Logger
- appendable storage.Appendable
- config func() config.Config
- allowDeltaTemporality bool
- lookbackDelta time.Duration
- ingestSTZeroSample bool
- enableTypeAndUnitLabels bool
- appendMetadata bool
-
- // Metrics.
- metrics otlptranslator.CombinedAppenderMetrics
-}
-
-func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
- otlpCfg := rw.config().OTLPConfig
- app := &remoteWriteAppender{
- Appender: rw.appendable.Appender(ctx),
- maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
- }
- combinedAppender := otlptranslator.NewCombinedAppender(app, rw.logger, rw.ingestSTZeroSample, rw.appendMetadata, rw.metrics)
- converter := otlptranslator.NewPrometheusConverter(combinedAppender)
- annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{
- AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(),
- AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(),
- PromoteResourceAttributes: otlptranslator.NewPromoteResourceAttributes(otlpCfg),
- KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes,
- ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB,
- PromoteScopeMetadata: otlpCfg.PromoteScopeMetadata,
- AllowDeltaTemporality: rw.allowDeltaTemporality,
- LookbackDelta: rw.lookbackDelta,
- EnableTypeAndUnitLabels: rw.enableTypeAndUnitLabels,
- LabelNameUnderscoreSanitization: otlpCfg.LabelNameUnderscoreSanitization,
- LabelNamePreserveMultipleUnderscores: otlpCfg.LabelNamePreserveMultipleUnderscores,
- })
-
- defer func() {
- if err != nil {
- _ = app.Rollback()
- return
- }
- err = app.Commit()
- }()
- ws, _ := annots.AsStrings("", 0, 0)
- if len(ws) > 0 {
- rw.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws)
- }
- return err
-}
-
-func (*rwExporter) Capabilities() consumer.Capabilities {
- return consumer.Capabilities{MutatesData: false}
-}
-
-type otlpWriteHandler struct {
- logger *slog.Logger
-
- defaultConsumer consumer.Metrics // stores deltas as-is
- d2cConsumer consumer.Metrics // converts deltas to cumulative
-}
-
-func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- req, err := DecodeOTLPWriteRequest(r)
- if err != nil {
- h.logger.Error("Error decoding OTLP write request", "err", err.Error())
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
-
- md := req.Metrics()
- // If deltatocumulative conversion enabled AND delta samples exist, use slower conversion path.
- // While deltatocumulative can also accept cumulative metrics (and then just forwards them as-is), it currently
- // holds a sync.Mutex when entering ConsumeMetrics. This is slow and not necessary when ingesting cumulative metrics.
- if h.d2cConsumer != nil && hasDelta(md) {
- err = h.d2cConsumer.ConsumeMetrics(r.Context(), md)
- } else {
- // Otherwise use default consumer (alongside cumulative samples, this will accept delta samples and write as-is
- // if native-delta-support is enabled).
- err = h.defaultConsumer.ConsumeMetrics(r.Context(), md)
- }
-
- switch {
- case err == nil:
- case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
- // Indicated an out of order sample is a bad request to prevent retries.
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- default:
- h.logger.Error("Error appending remote write", "err", err.Error())
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- w.WriteHeader(http.StatusOK)
-}
-
-func hasDelta(md pmetric.Metrics) bool {
- for i := range md.ResourceMetrics().Len() {
- sms := md.ResourceMetrics().At(i).ScopeMetrics()
- for i := range sms.Len() {
- ms := sms.At(i).Metrics()
- for i := range ms.Len() {
- temporality := pmetric.AggregationTemporalityUnspecified
- m := ms.At(i)
- switch ms.At(i).Type() {
- case pmetric.MetricTypeSum:
- temporality = m.Sum().AggregationTemporality()
- case pmetric.MetricTypeExponentialHistogram:
- temporality = m.ExponentialHistogram().AggregationTemporality()
- case pmetric.MetricTypeHistogram:
- temporality = m.Histogram().AggregationTemporality()
- }
- if temporality == pmetric.AggregationTemporalityDelta {
- return true
- }
- }
- }
- }
- return false
-}
-
+// TODO(bwplotka): Consider exposing timeLimitAppender and bucketLimitAppender appenders from scrape/target.go
+// to DRY, they do the same.
type remoteWriteAppender struct {
storage.Appender
@@ -734,3 +540,27 @@ func (app *remoteWriteAppender) AppendExemplar(ref storage.SeriesRef, l labels.L
}
return ref, nil
}
+
+type remoteWriteAppenderV2 struct {
+ storage.AppenderV2
+
+ maxTime int64
+}
+
+func (app *remoteWriteAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
+ if t > app.maxTime {
+ return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
+ }
+
+ if h != nil && histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > histogram.ExponentialSchemaMax {
+ if err := h.ReduceResolution(histogram.ExponentialSchemaMax); err != nil {
+ return 0, err
+ }
+ }
+ if fh != nil && histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > histogram.ExponentialSchemaMax {
+ if err := fh.ReduceResolution(histogram.ExponentialSchemaMax); err != nil {
+ return 0, err
+ }
+ }
+ return app.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
+}
diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go
index ac75d56095..2cf1217933 100644
--- a/storage/remote/write_handler_test.go
+++ b/storage/remote/write_handler_test.go
@@ -1267,6 +1267,7 @@ func genSeriesWithSample(numSeries int, ts int64) []prompb.TimeSeries {
return series
}
+// TODO(bwplotka): Delete and switch all to teststorage.Appendable.
type mockAppendable struct {
latestSample map[uint64]int64
samples []mockSample
diff --git a/storage/remote/write_otlp_handler.go b/storage/remote/write_otlp_handler.go
new file mode 100644
index 0000000000..6cb4a0fff0
--- /dev/null
+++ b/storage/remote/write_otlp_handler.go
@@ -0,0 +1,276 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "time"
+
+ deltatocumulative "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+ "go.opentelemetry.io/collector/processor"
+ "go.opentelemetry.io/otel/metric/noop"
+
+ "github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/timestamp"
+ "github.com/prometheus/prometheus/storage"
+ otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
+)
+
+type OTLPOptions struct {
+ // Convert delta samples to their cumulative equivalent by aggregating in-memory
+ ConvertDelta bool
+ // Store the raw delta samples as metrics with unknown type (we don't have a proper type for delta yet, therefore
+ // marking the metric type as unknown for now).
+ // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
+ NativeDelta bool
+ // LookbackDelta is the query lookback delta.
+ // Used to calculate the target_info sample timestamp interval.
+ LookbackDelta time.Duration
+ // Add type and unit labels to the metrics.
+ EnableTypeAndUnitLabels bool
+}
+
+// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
+// writes them to the provided appendable.
+func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.AppendableV2, configFunc func() config.Config, opts OTLPOptions) http.Handler {
+ if opts.NativeDelta && opts.ConvertDelta {
+ // This should be validated when iterating through feature flags, so not expected to fail here.
+ panic("cannot enable native delta ingestion and delta2cumulative conversion at the same time")
+ }
+
+ ex := &rwExporter{
+ logger: logger,
+ appendable: newOTLPInstrumentedAppendable(reg, appendable),
+ config: configFunc,
+ allowDeltaTemporality: opts.NativeDelta,
+ lookbackDelta: opts.LookbackDelta,
+ enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels,
+ }
+
+ wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex}
+
+ if opts.ConvertDelta {
+ fac := deltatocumulative.NewFactory()
+ set := processor.Settings{
+ ID: component.NewID(fac.Type()),
+ TelemetrySettings: component.TelemetrySettings{MeterProvider: noop.NewMeterProvider()},
+ }
+ d2c, err := fac.CreateMetrics(context.Background(), set, fac.CreateDefaultConfig(), wh.defaultConsumer)
+ if err != nil {
+ // fac.CreateMetrics directly calls [deltatocumulativeprocessor.createMetricsProcessor],
+ // which only errors if:
+ // - cfg.(type) != *Config
+ // - telemetry.New fails due to bad set.TelemetrySettings
+ //
+ // both cannot be the case, as we pass a valid *Config and valid TelemetrySettings.
+ // as such, we assume this error to never occur.
+ // if it is, our assumptions are broken in which case a panic seems acceptable.
+ panic(fmt.Errorf("failed to create metrics processor: %w", err))
+ }
+ if err := d2c.Start(context.Background(), nil); err != nil {
+ // deltatocumulative does not error on start. see above for panic reasoning
+ panic(err)
+ }
+ wh.d2cConsumer = d2c
+ }
+
+ return wh
+}
+
+type rwExporter struct {
+ logger *slog.Logger
+ appendable storage.AppendableV2
+ config func() config.Config
+ allowDeltaTemporality bool
+ lookbackDelta time.Duration
+ enableTypeAndUnitLabels bool
+}
+
+func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
+ otlpCfg := rw.config().OTLPConfig
+ app := &remoteWriteAppenderV2{
+ AppenderV2: rw.appendable.AppenderV2(ctx),
+ maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
+ }
+ converter := otlptranslator.NewPrometheusConverter(app)
+ annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{
+ AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(),
+ AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(),
+ PromoteResourceAttributes: otlptranslator.NewPromoteResourceAttributes(otlpCfg),
+ KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes,
+ ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB,
+ PromoteScopeMetadata: otlpCfg.PromoteScopeMetadata,
+ AllowDeltaTemporality: rw.allowDeltaTemporality,
+ LookbackDelta: rw.lookbackDelta,
+ EnableTypeAndUnitLabels: rw.enableTypeAndUnitLabels,
+ LabelNameUnderscoreSanitization: otlpCfg.LabelNameUnderscoreSanitization,
+ LabelNamePreserveMultipleUnderscores: otlpCfg.LabelNamePreserveMultipleUnderscores,
+ })
+
+ defer func() {
+ if err != nil {
+ _ = app.Rollback()
+ return
+ }
+ err = app.Commit()
+ }()
+ ws, _ := annots.AsStrings("", 0, 0)
+ if len(ws) > 0 {
+ rw.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws)
+ }
+ return err
+}
+
+func (*rwExporter) Capabilities() consumer.Capabilities {
+ return consumer.Capabilities{MutatesData: false}
+}
+
+type otlpWriteHandler struct {
+ logger *slog.Logger
+
+ defaultConsumer consumer.Metrics // stores deltas as-is
+ d2cConsumer consumer.Metrics // converts deltas to cumulative
+}
+
+func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ req, err := DecodeOTLPWriteRequest(r)
+ if err != nil {
+ h.logger.Error("Error decoding OTLP write request", "err", err.Error())
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ md := req.Metrics()
+ // If deltatocumulative conversion enabled AND delta samples exist, use slower conversion path.
+ // While deltatocumulative can also accept cumulative metrics (and then just forwards them as-is), it currently
+ // holds a sync.Mutex when entering ConsumeMetrics. This is slow and not necessary when ingesting cumulative metrics.
+ if h.d2cConsumer != nil && hasDelta(md) {
+ err = h.d2cConsumer.ConsumeMetrics(r.Context(), md)
+ } else {
+ // Otherwise use default consumer (alongside cumulative samples, this will accept delta samples and write as-is
+ // if native-delta-support is enabled).
+ err = h.defaultConsumer.ConsumeMetrics(r.Context(), md)
+ }
+
+ switch {
+ case err == nil:
+ case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
+ // Indicated an out of order sample is a bad request to prevent retries.
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ default:
+ h.logger.Error("Error appending remote write", "err", err.Error())
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.WriteHeader(http.StatusOK)
+}
+
+func hasDelta(md pmetric.Metrics) bool {
+ for i := range md.ResourceMetrics().Len() {
+ sms := md.ResourceMetrics().At(i).ScopeMetrics()
+ for i := range sms.Len() {
+ ms := sms.At(i).Metrics()
+ for i := range ms.Len() {
+ temporality := pmetric.AggregationTemporalityUnspecified
+ m := ms.At(i)
+ switch ms.At(i).Type() {
+ case pmetric.MetricTypeSum:
+ temporality = m.Sum().AggregationTemporality()
+ case pmetric.MetricTypeExponentialHistogram:
+ temporality = m.ExponentialHistogram().AggregationTemporality()
+ case pmetric.MetricTypeHistogram:
+ temporality = m.Histogram().AggregationTemporality()
+ }
+ if temporality == pmetric.AggregationTemporalityDelta {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+type otlpInstrumentedAppendable struct {
+ storage.AppendableV2
+
+ samplesAppendedWithoutMetadata prometheus.Counter
+ outOfOrderExemplars prometheus.Counter
+}
+
+// newOTLPInstrumentedAppendable instruments some OTLP metrics per append and
+// handles partial errors, so the caller does not need to.
+func newOTLPInstrumentedAppendable(reg prometheus.Registerer, app storage.AppendableV2) *otlpInstrumentedAppendable {
+ return &otlpInstrumentedAppendable{
+ AppendableV2: app,
+ samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Namespace: "prometheus",
+ Subsystem: "api",
+ Name: "otlp_appended_samples_without_metadata_total",
+ Help: "The total number of samples ingested from OTLP without corresponding metadata.",
+ }),
+ outOfOrderExemplars: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Namespace: "prometheus",
+ Subsystem: "api",
+ Name: "otlp_out_of_order_exemplars_total",
+ Help: "The total number of received OTLP exemplars which were rejected because they were out of order.",
+ }),
+ }
+}
+
+func (a *otlpInstrumentedAppendable) AppenderV2(ctx context.Context) storage.AppenderV2 {
+ return &otlpInstrumentedAppender{
+ AppenderV2: a.AppendableV2.AppenderV2(ctx),
+
+ samplesAppendedWithoutMetadata: a.samplesAppendedWithoutMetadata,
+ outOfOrderExemplars: a.outOfOrderExemplars,
+ }
+}
+
+type otlpInstrumentedAppender struct {
+ storage.AppenderV2
+
+ samplesAppendedWithoutMetadata prometheus.Counter
+ outOfOrderExemplars prometheus.Counter
+}
+
+func (app *otlpInstrumentedAppender) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
+ ref, err := app.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
+ if err != nil {
+ var partialErr *storage.AppendPartialError
+ partialErr, hErr := partialErr.Handle(err)
+ if hErr != nil {
+ // Not a partial error, return err.
+ return 0, err
+ }
+ app.outOfOrderExemplars.Add(float64(len(partialErr.ExemplarErrors)))
+ // Hide the partial error as otlp converter does not handle it.
+ }
+ if opts.Metadata.IsEmpty() {
+ app.samplesAppendedWithoutMetadata.Inc()
+ }
+ return ref, nil
+}
diff --git a/storage/remote/write_otlp_handler_test.go b/storage/remote/write_otlp_handler_test.go
new file mode 100644
index 0000000000..be3482f440
--- /dev/null
+++ b/storage/remote/write_otlp_handler_test.go
@@ -0,0 +1,759 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "log/slog"
+ "math/rand/v2"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/testutil"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/otlptranslator"
+ "github.com/stretchr/testify/require"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+ "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
+
+ "github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/model/timestamp"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/teststorage"
+)
+
+type sample = teststorage.Sample
+
+func TestOTLPWriteHandler(t *testing.T) {
+ ts := time.Now()
+ st := ts.Add(-1 * time.Millisecond)
+
+ // Expected samples passed via OTLP request without details (labels for now) that
+ // depend on translation or type and unit labels options.
+ expectedSamplesWithoutLabelsFn := func() []sample {
+ return []sample{
+ {
+ M: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
+ V: 10.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts), ES: []exemplar.Exemplar{
+ {
+ Labels: labels.FromStrings("span_id", "0001020304050607", "trace_id", "000102030405060708090a0b0c0d0e0f"),
+ Value: 10, Ts: timestamp.FromTime(ts), HasTs: true,
+ },
+ },
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
+ V: 10.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 30.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 12.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 2.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 4.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 6.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 8.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 10.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 12.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
+ V: 12.0, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
+ H: &histogram.Histogram{
+ Count: 10,
+ Sum: 30.0,
+ Schema: 2,
+ ZeroThreshold: 1e-128,
+ ZeroCount: 2,
+ PositiveSpans: []histogram.Span{{Offset: 1, Length: 5}},
+ PositiveBuckets: []int64{2, 0, 0, 0, 0},
+ }, ST: timestamp.FromTime(st), T: timestamp.FromTime(ts),
+ },
+ {
+ M: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, V: 1, T: timestamp.FromTime(ts),
+ },
+ }
+ }
+
+ exportRequest := generateOTLPWriteRequest(ts, st)
+ for _, testCase := range []struct {
+ name string
+ otlpCfg config.OTLPConfig
+ typeAndUnitLabels bool
+ expectedLabelsAndMFs []sample
+ }{
+ {
+ name: "NoTranslation/NoTypeAndUnitLabels",
+ otlpCfg: config.OTLPConfig{
+ TranslationStrategy: otlptranslator.NoTranslation,
+ },
+ expectedLabelsAndMFs: []sample{
+ {MF: "test.counter", L: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.gauge", L: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {MF: "test.exponential.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "target_info", L: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service")},
+ },
+ },
+ {
+ name: "NoTranslation/WithTypeAndUnitLabels",
+ otlpCfg: config.OTLPConfig{
+ TranslationStrategy: otlptranslator.NoTranslation,
+ },
+ typeAndUnitLabels: true,
+ expectedLabelsAndMFs: []sample{
+ {MF: "test.counter", L: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.gauge", L: labels.FromStrings(model.MetricNameLabel, "test.gauge", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {MF: "test.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {MF: "test.exponential.histogram", L: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "target_info", L: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service")},
+ },
+ },
+ // For the following cases, skip type and unit cases, it has nothing todo with translation.
+ {
+ name: "UnderscoreEscapingWithSuffixes",
+ otlpCfg: config.OTLPConfig{
+ TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
+ },
+ expectedLabelsAndMFs: []sample{
+ {MF: "test_counter_bytes_total", L: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_gauge_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {MF: "test_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {MF: "test_exponential_histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "target_info", L: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service")},
+ },
+ },
+ {
+ name: "UnderscoreEscapingWithoutSuffixes",
+ otlpCfg: config.OTLPConfig{
+ TranslationStrategy: otlptranslator.UnderscoreEscapingWithoutSuffixes,
+ },
+ expectedLabelsAndMFs: []sample{
+ {MF: "test_counter", L: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_gauge", L: labels.FromStrings(model.MetricNameLabel, "test_gauge", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {MF: "test_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {MF: "test_exponential_histogram", L: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram", "foo_bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "target_info", L: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service")},
+ },
+ },
+ {
+ name: "NoUTF8EscapingWithSuffixes",
+ otlpCfg: config.OTLPConfig{
+ TranslationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes,
+ },
+ expectedLabelsAndMFs: []sample{
+ // TODO: Counter MF name looks likea bug. Uncovered in unrelated refactor. fix it.
+ {MF: "test.counter_bytes_total", L: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.gauge_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5")},
+ {MF: "test.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf")},
+ {MF: "test.exponential.histogram_bytes", L: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service")},
+ {MF: "target_info", L: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service")},
+ },
+ },
+ } {
+ t.Run(testCase.name, func(t *testing.T) {
+ otlpOpts := OTLPOptions{
+ EnableTypeAndUnitLabels: testCase.typeAndUnitLabels,
+ }
+ appendable := handleOTLP(t, exportRequest, testCase.otlpCfg, otlpOpts)
+
+ // Compile final expected samples.
+ expectedSamples := expectedSamplesWithoutLabelsFn()
+ for i, s := range testCase.expectedLabelsAndMFs {
+ expectedSamples[i].L = s.L
+ expectedSamples[i].MF = s.MF
+ }
+ teststorage.RequireEqual(t, expectedSamples, appendable.ResultSamples())
+ })
+ }
+}
+
+func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg config.OTLPConfig, otlpOpts OTLPOptions) *teststorage.Appendable {
+ t.Helper()
+
+ buf, err := exportRequest.MarshalProto()
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("", "", bytes.NewReader(buf))
+ require.NoError(t, err)
+ req.Header.Set("Content-Type", "application/x-protobuf")
+
+ log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
+ appendable := teststorage.NewAppendable()
+ handler := NewOTLPWriteHandler(log, nil, appendable, func() config.Config {
+ return config.Config{
+ OTLPConfig: otlpCfg,
+ }
+ }, otlpOpts)
+ recorder := httptest.NewRecorder()
+ handler.ServeHTTP(recorder, req)
+
+ resp := recorder.Result()
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+
+ return appendable
+}
+
+func generateOTLPWriteRequest(timestamp, startTime time.Time) pmetricotlp.ExportRequest {
+ d := pmetric.NewMetrics()
+
+ // Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram
+ // with resource attributes: service.name="test-service", service.instance.id="test-instance", host.name="test-host"
+ // with metric attribute: foo.bar="baz"
+
+ resourceMetric := d.ResourceMetrics().AppendEmpty()
+ resourceMetric.Resource().Attributes().PutStr("service.name", "test-service")
+ resourceMetric.Resource().Attributes().PutStr("service.instance.id", "test-instance")
+ resourceMetric.Resource().Attributes().PutStr("host.name", "test-host")
+
+ scopeMetric := resourceMetric.ScopeMetrics().AppendEmpty()
+
+ // Generate One Counter
+ counterMetric := scopeMetric.Metrics().AppendEmpty()
+ counterMetric.SetName("test.counter")
+ counterMetric.SetDescription("test-counter-description")
+ counterMetric.SetUnit("By")
+ counterMetric.SetEmptySum()
+ counterMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+ counterMetric.Sum().SetIsMonotonic(true)
+
+ counterDataPoint := counterMetric.Sum().DataPoints().AppendEmpty()
+ counterDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
+ counterDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
+ counterDataPoint.SetDoubleValue(10.0)
+ counterDataPoint.Attributes().PutStr("foo.bar", "baz")
+
+ counterExemplar := counterDataPoint.Exemplars().AppendEmpty()
+
+ counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
+ counterExemplar.SetDoubleValue(10.0)
+ counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7})
+ counterExemplar.SetTraceID(pcommon.TraceID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15})
+
+ // Generate One Gauge
+ gaugeMetric := scopeMetric.Metrics().AppendEmpty()
+ gaugeMetric.SetName("test.gauge")
+ gaugeMetric.SetDescription("test-gauge-description")
+ gaugeMetric.SetUnit("By")
+ gaugeMetric.SetEmptyGauge()
+
+ gaugeDataPoint := gaugeMetric.Gauge().DataPoints().AppendEmpty()
+ gaugeDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
+ gaugeDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
+ gaugeDataPoint.SetDoubleValue(10.0)
+ gaugeDataPoint.Attributes().PutStr("foo.bar", "baz")
+
+ // Generate One Histogram
+ histogramMetric := scopeMetric.Metrics().AppendEmpty()
+ histogramMetric.SetName("test.histogram")
+ histogramMetric.SetDescription("test-histogram-description")
+ histogramMetric.SetUnit("By")
+ histogramMetric.SetEmptyHistogram()
+ histogramMetric.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+
+ histogramDataPoint := histogramMetric.Histogram().DataPoints().AppendEmpty()
+ histogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
+ histogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
+ histogramDataPoint.ExplicitBounds().FromRaw([]float64{0.0, 1.0, 2.0, 3.0, 4.0, 5.0})
+ histogramDataPoint.BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2, 2})
+ histogramDataPoint.SetCount(12)
+ histogramDataPoint.SetSum(30.0)
+ histogramDataPoint.Attributes().PutStr("foo.bar", "baz")
+
+ // Generate One Exponential-Histogram
+ exponentialHistogramMetric := scopeMetric.Metrics().AppendEmpty()
+ exponentialHistogramMetric.SetName("test.exponential.histogram")
+ exponentialHistogramMetric.SetDescription("test-exponential-histogram-description")
+ exponentialHistogramMetric.SetUnit("By")
+ exponentialHistogramMetric.SetEmptyExponentialHistogram()
+ exponentialHistogramMetric.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+
+ exponentialHistogramDataPoint := exponentialHistogramMetric.ExponentialHistogram().DataPoints().AppendEmpty()
+ exponentialHistogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
+ exponentialHistogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
+ exponentialHistogramDataPoint.SetScale(2.0)
+ exponentialHistogramDataPoint.Positive().BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2})
+ exponentialHistogramDataPoint.SetZeroCount(2)
+ exponentialHistogramDataPoint.SetCount(10)
+ exponentialHistogramDataPoint.SetSum(30.0)
+ exponentialHistogramDataPoint.Attributes().PutStr("foo.bar", "baz")
+
+ return pmetricotlp.NewExportRequestFromMetrics(d)
+}
+
+func TestOTLPDelta(t *testing.T) {
+ log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
+ appendable := teststorage.NewAppendable()
+ cfg := func() config.Config {
+ return config.Config{OTLPConfig: config.DefaultOTLPConfig}
+ }
+ handler := NewOTLPWriteHandler(log, nil, appendable, cfg, OTLPOptions{ConvertDelta: true})
+
+ md := pmetric.NewMetrics()
+ ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics()
+
+ m := ms.AppendEmpty()
+ m.SetName("some.delta.total")
+
+ sum := m.SetEmptySum()
+ sum.SetAggregationTemporality(pmetric.AggregationTemporalityDelta)
+
+ ts := time.Date(2000, 1, 2, 3, 4, 0, 0, time.UTC)
+ for i := range 3 {
+ dp := sum.DataPoints().AppendEmpty()
+ dp.SetIntValue(int64(i))
+ dp.SetTimestamp(pcommon.NewTimestampFromTime(ts.Add(time.Duration(i) * time.Second)))
+ }
+
+ proto, err := pmetricotlp.NewExportRequestFromMetrics(md).MarshalProto()
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("", "", bytes.NewReader(proto))
+ require.NoError(t, err)
+ req.Header.Set("Content-Type", "application/x-protobuf")
+
+ rec := httptest.NewRecorder()
+ handler.ServeHTTP(rec, req)
+ require.Equal(t, http.StatusOK, rec.Result().StatusCode)
+
+ ls := labels.FromStrings("__name__", "some_delta_total")
+ milli := func(sec int) int64 {
+ return time.Date(2000, 1, 2, 3, 4, sec, 0, time.UTC).UnixMilli()
+ }
+
+ want := []sample{
+ {MF: "some_delta_total", M: metadata.Metadata{Type: model.MetricTypeGauge}, T: milli(0), L: ls, V: 0}, // +0
+ {MF: "some_delta_total", M: metadata.Metadata{Type: model.MetricTypeGauge}, T: milli(1), L: ls, V: 1}, // +1
+ {MF: "some_delta_total", M: metadata.Metadata{Type: model.MetricTypeGauge}, T: milli(2), L: ls, V: 3}, // +2
+ }
+ if diff := cmp.Diff(want, appendable.ResultSamples(), cmp.Exporter(func(reflect.Type) bool { return true })); diff != "" {
+ t.Fatal(diff)
+ }
+}
+
+func BenchmarkOTLP(b *testing.B) {
+ start := time.Date(2000, 1, 2, 3, 4, 5, 0, time.UTC)
+
+ type Type struct {
+ name string
+ data func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric
+ }
+ types := []Type{{
+ name: "sum",
+ data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
+ cumul := make(map[int]float64)
+ return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
+ m := pmetric.NewMetric()
+ sum := m.SetEmptySum()
+ sum.SetAggregationTemporality(mode)
+ dps := sum.DataPoints()
+ for id := range dpc {
+ dp := dps.AppendEmpty()
+ dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
+ dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
+ dp.Attributes().PutStr("id", strconv.Itoa(id))
+ v := float64(rand.IntN(100)) / 10
+ switch mode {
+ case pmetric.AggregationTemporalityDelta:
+ dp.SetDoubleValue(v)
+ case pmetric.AggregationTemporalityCumulative:
+ cumul[id] += v
+ dp.SetDoubleValue(cumul[id])
+ }
+ }
+ return []pmetric.Metric{m}
+ }
+ }(),
+ }, {
+ name: "histogram",
+ data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
+ bounds := [4]float64{1, 10, 100, 1000}
+ type state struct {
+ counts [4]uint64
+ count uint64
+ sum float64
+ }
+ var cumul []state
+ return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
+ if cumul == nil {
+ cumul = make([]state, dpc)
+ }
+ m := pmetric.NewMetric()
+ hist := m.SetEmptyHistogram()
+ hist.SetAggregationTemporality(mode)
+ dps := hist.DataPoints()
+ for id := range dpc {
+ dp := dps.AppendEmpty()
+ dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
+ dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
+ dp.Attributes().PutStr("id", strconv.Itoa(id))
+ dp.ExplicitBounds().FromRaw(bounds[:])
+
+ var obs *state
+ switch mode {
+ case pmetric.AggregationTemporalityDelta:
+ obs = new(state)
+ case pmetric.AggregationTemporalityCumulative:
+ obs = &cumul[id]
+ }
+
+ for i := range obs.counts {
+ v := uint64(rand.IntN(10))
+ obs.counts[i] += v
+ obs.count++
+ obs.sum += float64(v)
+ }
+
+ dp.SetCount(obs.count)
+ dp.SetSum(obs.sum)
+ dp.BucketCounts().FromRaw(obs.counts[:])
+ }
+ return []pmetric.Metric{m}
+ }
+ }(),
+ }, {
+ name: "exponential",
+ data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
+ type state struct {
+ counts [4]uint64
+ count uint64
+ sum float64
+ }
+ var cumul []state
+ return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
+ if cumul == nil {
+ cumul = make([]state, dpc)
+ }
+ m := pmetric.NewMetric()
+ ex := m.SetEmptyExponentialHistogram()
+ ex.SetAggregationTemporality(mode)
+ dps := ex.DataPoints()
+ for id := range dpc {
+ dp := dps.AppendEmpty()
+ dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
+ dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
+ dp.Attributes().PutStr("id", strconv.Itoa(id))
+ dp.SetScale(2)
+
+ var obs *state
+ switch mode {
+ case pmetric.AggregationTemporalityDelta:
+ obs = new(state)
+ case pmetric.AggregationTemporalityCumulative:
+ obs = &cumul[id]
+ }
+
+ for i := range obs.counts {
+ v := uint64(rand.IntN(10))
+ obs.counts[i] += v
+ obs.count++
+ obs.sum += float64(v)
+ }
+
+ dp.Positive().BucketCounts().FromRaw(obs.counts[:])
+ dp.SetCount(obs.count)
+ dp.SetSum(obs.sum)
+ }
+
+ return []pmetric.Metric{m}
+ }
+ }(),
+ }}
+
+ modes := []struct {
+ name string
+ data func(func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, int) []pmetric.Metric
+ }{{
+ name: "cumulative",
+ data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
+ return data(pmetric.AggregationTemporalityCumulative, 10, epoch)
+ },
+ }, {
+ name: "delta",
+ data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
+ return data(pmetric.AggregationTemporalityDelta, 10, epoch)
+ },
+ }, {
+ name: "mixed",
+ data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
+ cumul := data(pmetric.AggregationTemporalityCumulative, 5, epoch)
+ delta := data(pmetric.AggregationTemporalityDelta, 5, epoch)
+ out := append(cumul, delta...)
+ rand.Shuffle(len(out), func(i, j int) { out[i], out[j] = out[j], out[i] })
+ return out
+ },
+ }}
+
+ configs := []struct {
+ name string
+ opts OTLPOptions
+ }{
+ {name: "default"},
+ {name: "convert", opts: OTLPOptions{ConvertDelta: true}},
+ }
+
+ Workers := runtime.GOMAXPROCS(0)
+ for _, cs := range types {
+ for _, mode := range modes {
+ for _, cfg := range configs {
+ b.Run(fmt.Sprintf("type=%s/temporality=%s/cfg=%s", cs.name, mode.name, cfg.name), func(b *testing.B) {
+ if !cfg.opts.ConvertDelta && (mode.name == "delta" || mode.name == "mixed") {
+ b.Skip("not possible")
+ }
+
+ var total int
+
+ // reqs is a [b.N]*http.Request, divided across the workers.
+ // deltatocumulative requires timestamps to be strictly in
+ // order on a per-series basis. to ensure this, each reqs[k]
+ // contains samples of differently named series, sorted
+ // strictly in time order
+ reqs := make([][]*http.Request, Workers)
+ for n := range b.N {
+ k := n % Workers
+
+ md := pmetric.NewMetrics()
+ ms := md.ResourceMetrics().AppendEmpty().
+ ScopeMetrics().AppendEmpty().
+ Metrics()
+
+ for i, m := range mode.data(cs.data, n) {
+ m.SetName(fmt.Sprintf("benchmark_%d_%d", k, i))
+ m.MoveTo(ms.AppendEmpty())
+ }
+
+ total += sampleCount(md)
+
+ ex := pmetricotlp.NewExportRequestFromMetrics(md)
+ data, err := ex.MarshalProto()
+ require.NoError(b, err)
+
+ req, err := http.NewRequest("", "", bytes.NewReader(data))
+ require.NoError(b, err)
+ req.Header.Set("Content-Type", "application/x-protobuf")
+
+ reqs[k] = append(reqs[k], req)
+ }
+
+ log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
+
+ appendable := teststorage.NewAppendable()
+ cfgfn := func() config.Config {
+ return config.Config{OTLPConfig: config.DefaultOTLPConfig}
+ }
+ handler := NewOTLPWriteHandler(log, nil, appendable, cfgfn, cfg.opts)
+
+ fail := make(chan struct{})
+ done := make(chan struct{})
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ // we use multiple workers to mimic a real-world scenario
+ // where multiple OTel collectors are sending their
+ // time-series in parallel.
+ // this is necessary to exercise potential lock-contention
+ // in this benchmark
+ for k := range Workers {
+ go func() {
+ rec := httptest.NewRecorder()
+ for _, req := range reqs[k] {
+ handler.ServeHTTP(rec, req)
+ if rec.Result().StatusCode != http.StatusOK {
+ fail <- struct{}{}
+ return
+ }
+ }
+ done <- struct{}{}
+ }()
+ }
+
+ for range Workers {
+ select {
+ case <-fail:
+ b.FailNow()
+ case <-done:
+ }
+ }
+
+ require.Len(b, appendable.ResultSamples(), total)
+ })
+ }
+ }
+ }
+}
+
+func sampleCount(md pmetric.Metrics) int {
+ var total int
+ rms := md.ResourceMetrics()
+ for i := range rms.Len() {
+ sms := rms.At(i).ScopeMetrics()
+ for i := range sms.Len() {
+ ms := sms.At(i).Metrics()
+ for i := range ms.Len() {
+ m := ms.At(i)
+ switch m.Type() {
+ case pmetric.MetricTypeSum:
+ total += m.Sum().DataPoints().Len()
+ case pmetric.MetricTypeGauge:
+ total += m.Gauge().DataPoints().Len()
+ case pmetric.MetricTypeHistogram:
+ dps := m.Histogram().DataPoints()
+ for i := range dps.Len() {
+ total += dps.At(i).BucketCounts().Len()
+ total++ // le=+Inf series
+ total++ // _sum series
+ total++ // _count series
+ }
+ case pmetric.MetricTypeExponentialHistogram:
+ total += m.ExponentialHistogram().DataPoints().Len()
+ case pmetric.MetricTypeSummary:
+ total += m.Summary().DataPoints().Len()
+ }
+ }
+ }
+ }
+ return total
+}
+
+func TestOTLPInstrumentedAppendable(t *testing.T) {
+ t.Run("no problems", func(t *testing.T) {
+ appTest := teststorage.NewAppendable()
+ oa := newOTLPInstrumentedAppendable(prometheus.NewRegistry(), appTest)
+
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.outOfOrderExemplars))
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.samplesAppendedWithoutMetadata))
+
+ app := oa.AppenderV2(t.Context())
+ _, err := app.Append(0, labels.EmptyLabels(), -1, 1, 2, nil, nil, storage.AOptions{Metadata: metadata.Metadata{Help: "yo"}})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Len(t, appTest.ResultSamples(), 1)
+
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.outOfOrderExemplars))
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.samplesAppendedWithoutMetadata))
+ })
+ t.Run("without metadata", func(t *testing.T) {
+ appTest := teststorage.NewAppendable()
+ oa := newOTLPInstrumentedAppendable(prometheus.NewRegistry(), appTest)
+
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.outOfOrderExemplars))
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.samplesAppendedWithoutMetadata))
+
+ app := oa.AppenderV2(t.Context())
+ _, err := app.Append(0, labels.EmptyLabels(), -1, 1, 2, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Len(t, appTest.ResultSamples(), 1)
+
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.outOfOrderExemplars))
+ require.Equal(t, 1.0, testutil.ToFloat64(oa.samplesAppendedWithoutMetadata))
+ })
+ t.Run("without metadata; 2 exemplar OOO errors", func(t *testing.T) {
+ appTest := teststorage.NewAppendable().WithErrs(nil, errors.New("exemplar error"), nil)
+ oa := newOTLPInstrumentedAppendable(prometheus.NewRegistry(), appTest)
+
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.outOfOrderExemplars))
+ require.Equal(t, 0.0, testutil.ToFloat64(oa.samplesAppendedWithoutMetadata))
+
+ app := oa.AppenderV2(t.Context())
+ _, err := app.Append(0, labels.EmptyLabels(), -1, 1, 2, nil, nil, storage.AOptions{Exemplars: []exemplar.Exemplar{{}, {}}})
+ // Partial errors should be handled in the middleware, OTLP converter does not handle it.
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Len(t, appTest.ResultSamples(), 1)
+
+ require.Equal(t, 2.0, testutil.ToFloat64(oa.outOfOrderExemplars))
+ require.Equal(t, 1.0, testutil.ToFloat64(oa.samplesAppendedWithoutMetadata))
+ })
+}
diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go
index 099a2f1cab..1b1b86ff1e 100644
--- a/storage/remote/write_test.go
+++ b/storage/remote/write_test.go
@@ -14,40 +14,20 @@
package remote
import (
- "bytes"
- "context"
"errors"
- "fmt"
- "log/slog"
- "math/rand/v2"
- "net/http"
- "net/http/httptest"
"net/url"
- "os"
- "reflect"
- "runtime"
- "strconv"
- "sync"
"testing"
"time"
- "github.com/google/go-cmp/cmp"
remoteapi "github.com/prometheus/client_golang/exp/api/remote"
"github.com/prometheus/client_golang/prometheus"
common_config "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
- "github.com/prometheus/otlptranslator"
"github.com/stretchr/testify/require"
- "go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
- "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"github.com/prometheus/prometheus/config"
- "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
- "github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/relabel"
- "github.com/prometheus/prometheus/storage"
)
func testRemoteWriteConfig() *config.RemoteWriteConfig {
@@ -385,1233 +365,6 @@ func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) {
require.NoError(t, s.Close())
}
-func TestOTLPWriteHandler(t *testing.T) {
- timestamp := time.Now()
- var zeroTime time.Time
- exportRequest := generateOTLPWriteRequest(timestamp, zeroTime)
- for _, testCase := range []struct {
- name string
- otlpCfg config.OTLPConfig
- typeAndUnitLabels bool
- expectedSamples []mockSample
- expectedMetadata []mockMetadata
- }{
- {
- name: "NoTranslation/NoTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.NoTranslation,
- },
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "NoTranslation/WithTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.NoTranslation,
- },
- typeAndUnitLabels: true,
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- {
- // Metadata labels follow series labels.
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "UnderscoreEscapingWithSuffixes/NoTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
- },
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- // All get _bytes unit suffix and counter also gets _total.
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "UnderscoreEscapingWithoutSuffixes",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.UnderscoreEscapingWithoutSuffixes,
- },
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_gauge", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "UnderscoreEscapingWithSuffixes/WithTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
- },
- typeAndUnitLabels: true,
- expectedSamples: []mockSample{
- {
- l: labels.New(labels.Label{Name: "__name__", Value: "test_counter_bytes_total"},
- labels.Label{Name: "__type__", Value: "counter"},
- labels.Label{Name: "__unit__", Value: "bytes"},
- labels.Label{Name: "foo_bar", Value: "baz"},
- labels.Label{Name: "instance", Value: "test-instance"},
- labels.Label{Name: "job", Value: "test-service"}),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.New(
- labels.Label{Name: "__name__", Value: "target_info"},
- labels.Label{Name: "host_name", Value: "test-host"},
- labels.Label{Name: "instance", Value: "test-instance"},
- labels.Label{Name: "job", Value: "test-service"},
- ),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "__type__", "gauge", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "NoUTF8EscapingWithSuffixes/NoTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes,
- },
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- // All get _bytes unit suffix and counter also gets _total.
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- {
- name: "NoUTF8EscapingWithSuffixes/WithTypeAndUnitLabels",
- otlpCfg: config.OTLPConfig{
- TranslationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes,
- },
- typeAndUnitLabels: true,
- expectedSamples: []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1,
- },
- },
- expectedMetadata: []mockMetadata{
- // All get _bytes unit suffix and counter also gets _total.
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
- },
- },
- },
- } {
- t.Run(testCase.name, func(t *testing.T) {
- otlpOpts := OTLPOptions{
- EnableTypeAndUnitLabels: testCase.typeAndUnitLabels,
- AppendMetadata: true,
- }
- appendable := handleOTLP(t, exportRequest, testCase.otlpCfg, otlpOpts)
- for _, sample := range testCase.expectedSamples {
- requireContainsSample(t, appendable.samples, sample)
- }
- for _, meta := range testCase.expectedMetadata {
- requireContainsMetadata(t, appendable.metadata, meta)
- }
- require.Len(t, appendable.samples, 12) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count)
- require.Len(t, appendable.histograms, 1) // 1 (exponential histogram)
- require.Len(t, appendable.metadata, 13) // for each float and histogram sample
- require.Len(t, appendable.exemplars, 1) // 1 (exemplar)
- })
- }
-}
-
-// Check that start time is ingested if ingestSTZeroSample is enabled
-// and the start time is actually set (non-zero).
-func TestOTLPWriteHandler_StartTime(t *testing.T) {
- timestamp := time.Now()
- startTime := timestamp.Add(-1 * time.Millisecond)
- var zeroTime time.Time
-
- expectedSamples := []mockSample{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 30.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 12.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
- t: timestamp.UnixMilli(),
- v: 2.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
- t: timestamp.UnixMilli(),
- v: 4.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
- t: timestamp.UnixMilli(),
- v: 6.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
- t: timestamp.UnixMilli(),
- v: 8.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
- t: timestamp.UnixMilli(),
- v: 10.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
- t: timestamp.UnixMilli(),
- v: 12.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
- t: timestamp.UnixMilli(),
- v: 12.0,
- },
- {
- l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- v: 1.0,
- },
- }
- expectedHistograms := []mockHistogram{
- {
- l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
- t: timestamp.UnixMilli(),
- h: &histogram.Histogram{
- Schema: 2,
- ZeroThreshold: 1e-128,
- ZeroCount: 2,
- Count: 10,
- Sum: 30,
- PositiveSpans: []histogram.Span{{Offset: 1, Length: 5}},
- PositiveBuckets: []int64{2, 0, 0, 0, 0},
- },
- },
- }
-
- expectedSamplesWithSTZero := make([]mockSample, 0, len(expectedSamples)*2-1) // All samples will get ST zero, except target_info.
- for _, s := range expectedSamples {
- if s.l.Get(model.MetricNameLabel) != "target_info" {
- expectedSamplesWithSTZero = append(expectedSamplesWithSTZero, mockSample{
- l: s.l.Copy(),
- t: startTime.UnixMilli(),
- v: 0,
- })
- }
- expectedSamplesWithSTZero = append(expectedSamplesWithSTZero, s)
- }
- expectedHistogramsWithSTZero := make([]mockHistogram, 0, len(expectedHistograms)*2)
- for _, s := range expectedHistograms {
- if s.l.Get(model.MetricNameLabel) != "target_info" {
- expectedHistogramsWithSTZero = append(expectedHistogramsWithSTZero, mockHistogram{
- l: s.l.Copy(),
- t: startTime.UnixMilli(),
- h: &histogram.Histogram{},
- })
- }
- expectedHistogramsWithSTZero = append(expectedHistogramsWithSTZero, s)
- }
-
- for _, testCase := range []struct {
- name string
- otlpOpts OTLPOptions
- startTime time.Time
- expectSTZero bool
- expectedSamples []mockSample
- expectedHistograms []mockHistogram
- }{
- {
- name: "IngestSTZero=false/startTime=0",
- otlpOpts: OTLPOptions{
- IngestSTZeroSample: false,
- },
- startTime: zeroTime,
- expectedSamples: expectedSamples,
- expectedHistograms: expectedHistograms,
- },
- {
- name: "IngestSTZero=true/startTime=0",
- otlpOpts: OTLPOptions{
- IngestSTZeroSample: true,
- },
- startTime: zeroTime,
- expectedSamples: expectedSamples,
- expectedHistograms: expectedHistograms,
- },
- {
- name: "IngestSTZero=false/startTime=ts-1ms",
- otlpOpts: OTLPOptions{
- IngestSTZeroSample: false,
- },
- startTime: startTime,
- expectedSamples: expectedSamples,
- expectedHistograms: expectedHistograms,
- },
- {
- name: "IngestSTZero=true/startTime=ts-1ms",
- otlpOpts: OTLPOptions{
- IngestSTZeroSample: true,
- },
- startTime: startTime,
- expectedSamples: expectedSamplesWithSTZero,
- expectedHistograms: expectedHistogramsWithSTZero,
- },
- } {
- t.Run(testCase.name, func(t *testing.T) {
- exportRequest := generateOTLPWriteRequest(timestamp, testCase.startTime)
- appendable := handleOTLP(t, exportRequest, config.OTLPConfig{
- TranslationStrategy: otlptranslator.NoTranslation,
- }, testCase.otlpOpts)
- for i, expect := range testCase.expectedSamples {
- actual := appendable.samples[i]
- require.True(t, labels.Equal(expect.l, actual.l), "sample labels,pos=%v", i)
- require.Equal(t, expect.t, actual.t, "sample timestamp,pos=%v", i)
- require.Equal(t, expect.v, actual.v, "sample value,pos=%v", i)
- }
- for i, expect := range testCase.expectedHistograms {
- actual := appendable.histograms[i]
- require.True(t, labels.Equal(expect.l, actual.l), "histogram labels,pos=%v", i)
- require.Equal(t, expect.t, actual.t, "histogram timestamp,pos=%v", i)
- require.True(t, expect.h.Equals(actual.h), "histogram value,pos=%v", i)
- }
- require.Len(t, appendable.samples, len(testCase.expectedSamples))
- require.Len(t, appendable.histograms, len(testCase.expectedHistograms))
- })
- }
-}
-
-func requireContainsSample(t *testing.T, actual []mockSample, expected mockSample) {
- t.Helper()
-
- for _, got := range actual {
- if labels.Equal(expected.l, got.l) && expected.t == got.t && expected.v == got.v {
- return
- }
- }
- require.Fail(t, fmt.Sprintf("Sample not found: \n"+
- "expected: %v\n"+
- "actual : %v", expected, actual))
-}
-
-func requireContainsMetadata(t *testing.T, actual []mockMetadata, expected mockMetadata) {
- t.Helper()
-
- for _, got := range actual {
- if labels.Equal(expected.l, got.l) && expected.m.Type == got.m.Type && expected.m.Unit == got.m.Unit && expected.m.Help == got.m.Help {
- return
- }
- }
- require.Fail(t, fmt.Sprintf("Metadata not found: \n"+
- "expected: %v\n"+
- "actual : %v", expected, actual))
-}
-
-func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg config.OTLPConfig, otlpOpts OTLPOptions) *mockAppendable {
- buf, err := exportRequest.MarshalProto()
- require.NoError(t, err)
-
- req, err := http.NewRequest("", "", bytes.NewReader(buf))
- require.NoError(t, err)
- req.Header.Set("Content-Type", "application/x-protobuf")
-
- log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
- appendable := &mockAppendable{}
- handler := NewOTLPWriteHandler(log, nil, appendable, func() config.Config {
- return config.Config{
- OTLPConfig: otlpCfg,
- }
- }, otlpOpts)
- recorder := httptest.NewRecorder()
- handler.ServeHTTP(recorder, req)
-
- resp := recorder.Result()
- require.Equal(t, http.StatusOK, resp.StatusCode)
-
- return appendable
-}
-
-func generateOTLPWriteRequest(timestamp, startTime time.Time) pmetricotlp.ExportRequest {
- d := pmetric.NewMetrics()
-
- // Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram
- // with resource attributes: service.name="test-service", service.instance.id="test-instance", host.name="test-host"
- // with metric attribute: foo.bar="baz"
-
- resourceMetric := d.ResourceMetrics().AppendEmpty()
- resourceMetric.Resource().Attributes().PutStr("service.name", "test-service")
- resourceMetric.Resource().Attributes().PutStr("service.instance.id", "test-instance")
- resourceMetric.Resource().Attributes().PutStr("host.name", "test-host")
-
- scopeMetric := resourceMetric.ScopeMetrics().AppendEmpty()
-
- // Generate One Counter
- counterMetric := scopeMetric.Metrics().AppendEmpty()
- counterMetric.SetName("test.counter")
- counterMetric.SetDescription("test-counter-description")
- counterMetric.SetUnit("By")
- counterMetric.SetEmptySum()
- counterMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- counterMetric.Sum().SetIsMonotonic(true)
-
- counterDataPoint := counterMetric.Sum().DataPoints().AppendEmpty()
- counterDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
- counterDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
- counterDataPoint.SetDoubleValue(10.0)
- counterDataPoint.Attributes().PutStr("foo.bar", "baz")
-
- counterExemplar := counterDataPoint.Exemplars().AppendEmpty()
-
- counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
- counterExemplar.SetDoubleValue(10.0)
- counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7})
- counterExemplar.SetTraceID(pcommon.TraceID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15})
-
- // Generate One Gauge
- gaugeMetric := scopeMetric.Metrics().AppendEmpty()
- gaugeMetric.SetName("test.gauge")
- gaugeMetric.SetDescription("test-gauge-description")
- gaugeMetric.SetUnit("By")
- gaugeMetric.SetEmptyGauge()
-
- gaugeDataPoint := gaugeMetric.Gauge().DataPoints().AppendEmpty()
- gaugeDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
- gaugeDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
- gaugeDataPoint.SetDoubleValue(10.0)
- gaugeDataPoint.Attributes().PutStr("foo.bar", "baz")
-
- // Generate One Histogram
- histogramMetric := scopeMetric.Metrics().AppendEmpty()
- histogramMetric.SetName("test.histogram")
- histogramMetric.SetDescription("test-histogram-description")
- histogramMetric.SetUnit("By")
- histogramMetric.SetEmptyHistogram()
- histogramMetric.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
-
- histogramDataPoint := histogramMetric.Histogram().DataPoints().AppendEmpty()
- histogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
- histogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
- histogramDataPoint.ExplicitBounds().FromRaw([]float64{0.0, 1.0, 2.0, 3.0, 4.0, 5.0})
- histogramDataPoint.BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2, 2})
- histogramDataPoint.SetCount(12)
- histogramDataPoint.SetSum(30.0)
- histogramDataPoint.Attributes().PutStr("foo.bar", "baz")
-
- // Generate One Exponential-Histogram
- exponentialHistogramMetric := scopeMetric.Metrics().AppendEmpty()
- exponentialHistogramMetric.SetName("test.exponential.histogram")
- exponentialHistogramMetric.SetDescription("test-exponential-histogram-description")
- exponentialHistogramMetric.SetUnit("By")
- exponentialHistogramMetric.SetEmptyExponentialHistogram()
- exponentialHistogramMetric.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
-
- exponentialHistogramDataPoint := exponentialHistogramMetric.ExponentialHistogram().DataPoints().AppendEmpty()
- exponentialHistogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
- exponentialHistogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
- exponentialHistogramDataPoint.SetScale(2.0)
- exponentialHistogramDataPoint.Positive().BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2})
- exponentialHistogramDataPoint.SetZeroCount(2)
- exponentialHistogramDataPoint.SetCount(10)
- exponentialHistogramDataPoint.SetSum(30.0)
- exponentialHistogramDataPoint.Attributes().PutStr("foo.bar", "baz")
-
- return pmetricotlp.NewExportRequestFromMetrics(d)
-}
-
-func TestOTLPDelta(t *testing.T) {
- log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
- appendable := &mockAppendable{}
- cfg := func() config.Config {
- return config.Config{OTLPConfig: config.DefaultOTLPConfig}
- }
- handler := NewOTLPWriteHandler(log, nil, appendable, cfg, OTLPOptions{ConvertDelta: true})
-
- md := pmetric.NewMetrics()
- ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics()
-
- m := ms.AppendEmpty()
- m.SetName("some.delta.total")
-
- sum := m.SetEmptySum()
- sum.SetAggregationTemporality(pmetric.AggregationTemporalityDelta)
-
- ts := time.Date(2000, 1, 2, 3, 4, 0, 0, time.UTC)
- for i := range 3 {
- dp := sum.DataPoints().AppendEmpty()
- dp.SetIntValue(int64(i))
- dp.SetTimestamp(pcommon.NewTimestampFromTime(ts.Add(time.Duration(i) * time.Second)))
- }
-
- proto, err := pmetricotlp.NewExportRequestFromMetrics(md).MarshalProto()
- require.NoError(t, err)
-
- req, err := http.NewRequest("", "", bytes.NewReader(proto))
- require.NoError(t, err)
- req.Header.Set("Content-Type", "application/x-protobuf")
-
- rec := httptest.NewRecorder()
- handler.ServeHTTP(rec, req)
- require.Equal(t, http.StatusOK, rec.Result().StatusCode)
-
- ls := labels.FromStrings("__name__", "some_delta_total")
- milli := func(sec int) int64 {
- return time.Date(2000, 1, 2, 3, 4, sec, 0, time.UTC).UnixMilli()
- }
-
- want := []mockSample{
- {t: milli(0), l: ls, v: 0}, // +0
- {t: milli(1), l: ls, v: 1}, // +1
- {t: milli(2), l: ls, v: 3}, // +2
- }
- if diff := cmp.Diff(want, appendable.samples, cmp.Exporter(func(reflect.Type) bool { return true })); diff != "" {
- t.Fatal(diff)
- }
-}
-
-func BenchmarkOTLP(b *testing.B) {
- start := time.Date(2000, 1, 2, 3, 4, 5, 0, time.UTC)
-
- type Type struct {
- name string
- data func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric
- }
- types := []Type{{
- name: "sum",
- data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
- cumul := make(map[int]float64)
- return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
- m := pmetric.NewMetric()
- sum := m.SetEmptySum()
- sum.SetAggregationTemporality(mode)
- dps := sum.DataPoints()
- for id := range dpc {
- dp := dps.AppendEmpty()
- dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
- dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
- dp.Attributes().PutStr("id", strconv.Itoa(id))
- v := float64(rand.IntN(100)) / 10
- switch mode {
- case pmetric.AggregationTemporalityDelta:
- dp.SetDoubleValue(v)
- case pmetric.AggregationTemporalityCumulative:
- cumul[id] += v
- dp.SetDoubleValue(cumul[id])
- }
- }
- return []pmetric.Metric{m}
- }
- }(),
- }, {
- name: "histogram",
- data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
- bounds := [4]float64{1, 10, 100, 1000}
- type state struct {
- counts [4]uint64
- count uint64
- sum float64
- }
- var cumul []state
- return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
- if cumul == nil {
- cumul = make([]state, dpc)
- }
- m := pmetric.NewMetric()
- hist := m.SetEmptyHistogram()
- hist.SetAggregationTemporality(mode)
- dps := hist.DataPoints()
- for id := range dpc {
- dp := dps.AppendEmpty()
- dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
- dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
- dp.Attributes().PutStr("id", strconv.Itoa(id))
- dp.ExplicitBounds().FromRaw(bounds[:])
-
- var obs *state
- switch mode {
- case pmetric.AggregationTemporalityDelta:
- obs = new(state)
- case pmetric.AggregationTemporalityCumulative:
- obs = &cumul[id]
- }
-
- for i := range obs.counts {
- v := uint64(rand.IntN(10))
- obs.counts[i] += v
- obs.count++
- obs.sum += float64(v)
- }
-
- dp.SetCount(obs.count)
- dp.SetSum(obs.sum)
- dp.BucketCounts().FromRaw(obs.counts[:])
- }
- return []pmetric.Metric{m}
- }
- }(),
- }, {
- name: "exponential",
- data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
- type state struct {
- counts [4]uint64
- count uint64
- sum float64
- }
- var cumul []state
- return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
- if cumul == nil {
- cumul = make([]state, dpc)
- }
- m := pmetric.NewMetric()
- ex := m.SetEmptyExponentialHistogram()
- ex.SetAggregationTemporality(mode)
- dps := ex.DataPoints()
- for id := range dpc {
- dp := dps.AppendEmpty()
- dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
- dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
- dp.Attributes().PutStr("id", strconv.Itoa(id))
- dp.SetScale(2)
-
- var obs *state
- switch mode {
- case pmetric.AggregationTemporalityDelta:
- obs = new(state)
- case pmetric.AggregationTemporalityCumulative:
- obs = &cumul[id]
- }
-
- for i := range obs.counts {
- v := uint64(rand.IntN(10))
- obs.counts[i] += v
- obs.count++
- obs.sum += float64(v)
- }
-
- dp.Positive().BucketCounts().FromRaw(obs.counts[:])
- dp.SetCount(obs.count)
- dp.SetSum(obs.sum)
- }
-
- return []pmetric.Metric{m}
- }
- }(),
- }}
-
- modes := []struct {
- name string
- data func(func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, int) []pmetric.Metric
- }{{
- name: "cumulative",
- data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
- return data(pmetric.AggregationTemporalityCumulative, 10, epoch)
- },
- }, {
- name: "delta",
- data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
- return data(pmetric.AggregationTemporalityDelta, 10, epoch)
- },
- }, {
- name: "mixed",
- data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
- cumul := data(pmetric.AggregationTemporalityCumulative, 5, epoch)
- delta := data(pmetric.AggregationTemporalityDelta, 5, epoch)
- out := append(cumul, delta...)
- rand.Shuffle(len(out), func(i, j int) { out[i], out[j] = out[j], out[i] })
- return out
- },
- }}
-
- configs := []struct {
- name string
- opts OTLPOptions
- }{
- {name: "default"},
- {name: "convert", opts: OTLPOptions{ConvertDelta: true}},
- }
-
- Workers := runtime.GOMAXPROCS(0)
- for _, cs := range types {
- for _, mode := range modes {
- for _, cfg := range configs {
- b.Run(fmt.Sprintf("type=%s/temporality=%s/cfg=%s", cs.name, mode.name, cfg.name), func(b *testing.B) {
- if !cfg.opts.ConvertDelta && (mode.name == "delta" || mode.name == "mixed") {
- b.Skip("not possible")
- }
-
- var total int
-
- // reqs is a [b.N]*http.Request, divided across the workers.
- // deltatocumulative requires timestamps to be strictly in
- // order on a per-series basis. to ensure this, each reqs[k]
- // contains samples of differently named series, sorted
- // strictly in time order
- reqs := make([][]*http.Request, Workers)
- for n := range b.N {
- k := n % Workers
-
- md := pmetric.NewMetrics()
- ms := md.ResourceMetrics().AppendEmpty().
- ScopeMetrics().AppendEmpty().
- Metrics()
-
- for i, m := range mode.data(cs.data, n) {
- m.SetName(fmt.Sprintf("benchmark_%d_%d", k, i))
- m.MoveTo(ms.AppendEmpty())
- }
-
- total += sampleCount(md)
-
- ex := pmetricotlp.NewExportRequestFromMetrics(md)
- data, err := ex.MarshalProto()
- require.NoError(b, err)
-
- req, err := http.NewRequest("", "", bytes.NewReader(data))
- require.NoError(b, err)
- req.Header.Set("Content-Type", "application/x-protobuf")
-
- reqs[k] = append(reqs[k], req)
- }
-
- log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
- mock := new(mockAppendable)
- appendable := syncAppendable{Appendable: mock, lock: new(sync.Mutex)}
- cfgfn := func() config.Config {
- return config.Config{OTLPConfig: config.DefaultOTLPConfig}
- }
- handler := NewOTLPWriteHandler(log, nil, appendable, cfgfn, cfg.opts)
-
- fail := make(chan struct{})
- done := make(chan struct{})
-
- b.ResetTimer()
- b.ReportAllocs()
-
- // we use multiple workers to mimic a real-world scenario
- // where multiple OTel collectors are sending their
- // time-series in parallel.
- // this is necessary to exercise potential lock-contention
- // in this benchmark
- for k := range Workers {
- go func() {
- rec := httptest.NewRecorder()
- for _, req := range reqs[k] {
- handler.ServeHTTP(rec, req)
- if rec.Result().StatusCode != http.StatusOK {
- fail <- struct{}{}
- return
- }
- }
- done <- struct{}{}
- }()
- }
-
- for range Workers {
- select {
- case <-fail:
- b.FailNow()
- case <-done:
- }
- }
-
- require.Equal(b, total, len(mock.samples)+len(mock.histograms))
- })
- }
- }
- }
-}
-
-func sampleCount(md pmetric.Metrics) int {
- var total int
- rms := md.ResourceMetrics()
- for i := range rms.Len() {
- sms := rms.At(i).ScopeMetrics()
- for i := range sms.Len() {
- ms := sms.At(i).Metrics()
- for i := range ms.Len() {
- m := ms.At(i)
- switch m.Type() {
- case pmetric.MetricTypeSum:
- total += m.Sum().DataPoints().Len()
- case pmetric.MetricTypeGauge:
- total += m.Gauge().DataPoints().Len()
- case pmetric.MetricTypeHistogram:
- dps := m.Histogram().DataPoints()
- for i := range dps.Len() {
- total += dps.At(i).BucketCounts().Len()
- total++ // le=+Inf series
- total++ // _sum series
- total++ // _count series
- }
- case pmetric.MetricTypeExponentialHistogram:
- total += m.ExponentialHistogram().DataPoints().Len()
- case pmetric.MetricTypeSummary:
- total += m.Summary().DataPoints().Len()
- }
- }
- }
- }
- return total
-}
-
-type syncAppendable struct {
- lock sync.Locker
- storage.Appendable
-}
-
-type syncAppender struct {
- lock sync.Locker
- storage.Appender
-}
-
-func (s syncAppendable) Appender(ctx context.Context) storage.Appender {
- return syncAppender{Appender: s.Appendable.Appender(ctx), lock: s.lock}
-}
-
-func (s syncAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
- s.lock.Lock()
- defer s.lock.Unlock()
- return s.Appender.Append(ref, l, t, v)
-}
-
-func (s syncAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, f *histogram.FloatHistogram) (storage.SeriesRef, error) {
- s.lock.Lock()
- defer s.lock.Unlock()
- return s.Appender.AppendHistogram(ref, l, t, h, f)
-}
-
func TestWriteStorage_CanRegisterMetricsAfterClosing(t *testing.T) {
dir := t.TempDir()
reg := prometheus.NewPedanticRegistry()
diff --git a/storage/series.go b/storage/series.go
index 7e130d494d..bf6df7db3e 100644
--- a/storage/series.go
+++ b/storage/series.go
@@ -138,6 +138,11 @@ func (it *listSeriesIterator) AtT() int64 {
return s.T()
}
+func (it *listSeriesIterator) AtST() int64 {
+ s := it.samples.Get(it.idx)
+ return s.ST()
+}
+
func (it *listSeriesIterator) Next() chunkenc.ValueType {
it.idx++
if it.idx >= it.samples.Len() {
@@ -355,18 +360,20 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
lastType = typ
var (
- t int64
- v float64
- h *histogram.Histogram
- fh *histogram.FloatHistogram
+ st, t int64
+ v float64
+ h *histogram.Histogram
+ fh *histogram.FloatHistogram
)
switch typ {
case chunkenc.ValFloat:
t, v = seriesIter.At()
- app.Append(t, v)
+ st = seriesIter.AtST()
+ app.Append(st, t, v)
case chunkenc.ValHistogram:
t, h = seriesIter.AtHistogram(nil)
- newChk, recoded, app, err = app.AppendHistogram(nil, t, h, false)
+ st = seriesIter.AtST()
+ newChk, recoded, app, err = app.AppendHistogram(nil, st, t, h, false)
if err != nil {
return errChunksIterator{err: err}
}
@@ -381,7 +388,8 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
}
case chunkenc.ValFloatHistogram:
t, fh = seriesIter.AtFloatHistogram(nil)
- newChk, recoded, app, err = app.AppendFloatHistogram(nil, t, fh, false)
+ st = seriesIter.AtST()
+ newChk, recoded, app, err = app.AppendFloatHistogram(nil, st, t, fh, false)
if err != nil {
return errChunksIterator{err: err}
}
@@ -439,16 +447,26 @@ func (e errChunksIterator) Err() error { return e.err }
// ExpandSamples iterates over all samples in the iterator, buffering all in slice.
// Optionally it takes samples constructor, useful when you want to compare sample slices with different
// sample implementations. if nil, sample type from this package will be used.
-func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample) ([]chunks.Sample, error) {
+// For float sample, NaN values are replaced with -42.
+func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(st, t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample) ([]chunks.Sample, error) {
+ return expandSamples(iter, true, newSampleFn)
+}
+
+// ExpandSamplesWithoutReplacingNaNs is same as ExpandSamples but it does not replace float sample NaN values with anything.
+func ExpandSamplesWithoutReplacingNaNs(iter chunkenc.Iterator, newSampleFn func(st, t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample) ([]chunks.Sample, error) {
+ return expandSamples(iter, false, newSampleFn)
+}
+
+func expandSamples(iter chunkenc.Iterator, replaceNaN bool, newSampleFn func(st, t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample) ([]chunks.Sample, error) {
if newSampleFn == nil {
- newSampleFn = func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample {
+ newSampleFn = func(st, t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample {
switch {
case h != nil:
- return hSample{t, h}
+ return hSample{st, t, h}
case fh != nil:
- return fhSample{t, fh}
+ return fhSample{st, t, fh}
default:
- return fSample{t, f}
+ return fSample{st, t, f}
}
}
}
@@ -460,17 +478,20 @@ func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64,
return result, iter.Err()
case chunkenc.ValFloat:
t, f := iter.At()
+ st := iter.AtST()
// NaNs can't be compared normally, so substitute for another value.
- if math.IsNaN(f) {
+ if replaceNaN && math.IsNaN(f) {
f = -42
}
- result = append(result, newSampleFn(t, f, nil, nil))
+ result = append(result, newSampleFn(st, t, f, nil, nil))
case chunkenc.ValHistogram:
t, h := iter.AtHistogram(nil)
- result = append(result, newSampleFn(t, 0, h, nil))
+ st := iter.AtST()
+ result = append(result, newSampleFn(st, t, 0, h, nil))
case chunkenc.ValFloatHistogram:
t, fh := iter.AtFloatHistogram(nil)
- result = append(result, newSampleFn(t, 0, nil, fh))
+ st := iter.AtST()
+ result = append(result, newSampleFn(st, t, 0, nil, fh))
}
}
}
diff --git a/storage/series_test.go b/storage/series_test.go
index 954d62f1b3..b33d6cb1b3 100644
--- a/storage/series_test.go
+++ b/storage/series_test.go
@@ -28,11 +28,11 @@ import (
func TestListSeriesIterator(t *testing.T) {
it := NewListSeriesIterator(samples{
- fSample{0, 0},
- fSample{1, 1},
- fSample{1, 1.5},
- fSample{2, 2},
- fSample{3, 3},
+ fSample{-10, 0, 0},
+ fSample{-9, 1, 1},
+ fSample{-8, 1, 1.5},
+ fSample{-7, 2, 2},
+ fSample{-6, 3, 3},
})
// Seek to the first sample with ts=1.
@@ -40,30 +40,35 @@ func TestListSeriesIterator(t *testing.T) {
ts, v := it.At()
require.Equal(t, int64(1), ts)
require.Equal(t, 1., v)
+ require.Equal(t, int64(-9), it.AtST())
// Seek one further, next sample still has ts=1.
require.Equal(t, chunkenc.ValFloat, it.Next())
ts, v = it.At()
require.Equal(t, int64(1), ts)
require.Equal(t, 1.5, v)
+ require.Equal(t, int64(-8), it.AtST())
// Seek again to 1 and make sure we stay where we are.
require.Equal(t, chunkenc.ValFloat, it.Seek(1))
ts, v = it.At()
require.Equal(t, int64(1), ts)
require.Equal(t, 1.5, v)
+ require.Equal(t, int64(-8), it.AtST())
// Another seek.
require.Equal(t, chunkenc.ValFloat, it.Seek(3))
ts, v = it.At()
require.Equal(t, int64(3), ts)
require.Equal(t, 3., v)
+ require.Equal(t, int64(-6), it.AtST())
// And we don't go back.
require.Equal(t, chunkenc.ValFloat, it.Seek(2))
ts, v = it.At()
require.Equal(t, int64(3), ts)
require.Equal(t, 3., v)
+ require.Equal(t, int64(-6), it.AtST())
// Seek beyond the end.
require.Equal(t, chunkenc.ValNone, it.Seek(5))
diff --git a/tracing/tracing.go b/tracing/tracing.go
index b35673b2b4..36a8d0fe10 100644
--- a/tracing/tracing.go
+++ b/tracing/tracing.go
@@ -29,7 +29,7 @@ import (
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
- semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop"
"google.golang.org/grpc/credentials"
diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go
index 1b29b223d7..460ceb7c04 100644
--- a/tsdb/agent/db.go
+++ b/tsdb/agent/db.go
@@ -92,6 +92,11 @@ type Options struct {
// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
// is implemented.
EnableSTAsZeroSample bool
+
+ // EnableSTStorage determines whether agent DB should write a Start Timestamp (ST)
+ // per sample to WAL.
+ // TODO(bwplotka): Implement this option as per PROM-60, currently it's noop.
+ EnableSTStorage bool
}
// DefaultOptions used for the WAL storage. They are reasonable for setups using
diff --git a/tsdb/block.go b/tsdb/block.go
index 3f089b9da7..118dd672ef 100644
--- a/tsdb/block.go
+++ b/tsdb/block.go
@@ -33,7 +33,6 @@ import (
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
@@ -228,6 +227,18 @@ func (bm *BlockMetaCompaction) FromOutOfOrder() bool {
return slices.Contains(bm.Hints, CompactionHintFromOutOfOrder)
}
+func (bm *BlockMetaCompaction) SetStaleSeries() {
+ if bm.FromStaleSeries() {
+ return
+ }
+ bm.Hints = append(bm.Hints, CompactionHintFromStaleSeries)
+ slices.Sort(bm.Hints)
+}
+
+func (bm *BlockMetaCompaction) FromStaleSeries() bool {
+ return slices.Contains(bm.Hints, CompactionHintFromStaleSeries)
+}
+
const (
indexFilename = "index"
metaFilename = "meta.json"
@@ -236,6 +247,10 @@ const (
// CompactionHintFromOutOfOrder is a hint noting that the block
// was created from out-of-order chunks.
CompactionHintFromOutOfOrder = "from-out-of-order"
+
+ // CompactionHintFromStaleSeries is a hint noting that the block
+ // was created from stale series.
+ CompactionHintFromStaleSeries = "from-stale-series"
)
func chunkDir(dir string) string { return filepath.Join(dir, "chunks") }
@@ -281,12 +296,12 @@ func writeMetaFile(logger *slog.Logger, dir string, meta *BlockMeta) (int64, err
n, err := f.Write(jsonMeta)
if err != nil {
- return 0, tsdb_errors.NewMulti(err, f.Close()).Err()
+ return 0, errors.Join(err, f.Close())
}
// Force the kernel to persist the file on disk to avoid data loss if the host crashes.
if err := f.Sync(); err != nil {
- return 0, tsdb_errors.NewMulti(err, f.Close()).Err()
+ return 0, errors.Join(err, f.Close())
}
if err := f.Close(); err != nil {
return 0, err
@@ -328,7 +343,7 @@ func OpenBlock(logger *slog.Logger, dir string, pool chunkenc.Pool, postingsDeco
var closers []io.Closer
defer func() {
if err != nil {
- err = tsdb_errors.NewMulti(err, tsdb_errors.CloseAll(closers)).Err()
+ err = errors.Join(err, closeAll(closers))
}
}()
meta, sizeMeta, err := readMetaFile(dir)
@@ -382,11 +397,11 @@ func (pb *Block) Close() error {
pb.pendingReaders.Wait()
- return tsdb_errors.NewMulti(
+ return errors.Join(
pb.chunkr.Close(),
pb.indexr.Close(),
pb.tombstones.Close(),
- ).Err()
+ )
}
func (pb *Block) String() string {
diff --git a/tsdb/block_test.go b/tsdb/block_test.go
index 855fa5638a..edd2df7415 100644
--- a/tsdb/block_test.go
+++ b/tsdb/block_test.go
@@ -176,7 +176,7 @@ func TestCorruptedChunk(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
tmpdir := t.TempDir()
- series := storage.NewListSeries(labels.FromStrings("a", "b"), []chunks.Sample{sample{1, 1, nil, nil}})
+ series := storage.NewListSeries(labels.FromStrings("a", "b"), []chunks.Sample{sample{0, 1, 1, nil, nil}})
blockDir := createBlock(t, tmpdir, []storage.Series{series})
files, err := sequenceFiles(chunkDir(blockDir))
require.NoError(t, err)
@@ -236,7 +236,7 @@ func TestLabelValuesWithMatchers(t *testing.T) {
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
"tens", fmt.Sprintf("value%d", i/10),
"unique", fmt.Sprintf("value%d", i),
- ), []chunks.Sample{sample{100, 0, nil, nil}}))
+ ), []chunks.Sample{sample{0, 100, 0, nil, nil}}))
}
blockDir := createBlock(t, tmpdir, seriesEntries)
@@ -319,7 +319,7 @@ func TestBlockQuerierReturnsSortedLabelValues(t *testing.T) {
for i := 100; i > 0; i-- {
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
"__name__", fmt.Sprintf("value%d", i),
- ), []chunks.Sample{sample{100, 0, nil, nil}}))
+ ), []chunks.Sample{sample{0, 100, 0, nil, nil}}))
}
blockDir := createBlock(t, tmpdir, seriesEntries)
@@ -436,7 +436,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) {
"a_unique", fmt.Sprintf("value%d", i),
"b_tens", fmt.Sprintf("value%d", i/(metricCount/10)),
"c_ninety", fmt.Sprintf("value%d", i/(metricCount/10)/9), // "0" for the first 90%, then "1"
- ), []chunks.Sample{sample{100, 0, nil, nil}}))
+ ), []chunks.Sample{sample{0, 100, 0, nil, nil}}))
}
blockDir := createBlock(b, tmpdir, seriesEntries)
@@ -472,13 +472,13 @@ func TestLabelNamesWithMatchers(t *testing.T) {
for i := range 100 {
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
"unique", fmt.Sprintf("value%d", i),
- ), []chunks.Sample{sample{100, 0, nil, nil}}))
+ ), []chunks.Sample{sample{0, 100, 0, nil, nil}}))
if i%10 == 0 {
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
"tens", fmt.Sprintf("value%d", i/10),
"unique", fmt.Sprintf("value%d", i),
- ), []chunks.Sample{sample{100, 0, nil, nil}}))
+ ), []chunks.Sample{sample{0, 100, 0, nil, nil}}))
}
if i%20 == 0 {
@@ -486,7 +486,7 @@ func TestLabelNamesWithMatchers(t *testing.T) {
"tens", fmt.Sprintf("value%d", i/10),
"twenties", fmt.Sprintf("value%d", i/20),
"unique", fmt.Sprintf("value%d", i),
- ), []chunks.Sample{sample{100, 0, nil, nil}}))
+ ), []chunks.Sample{sample{0, 100, 0, nil, nil}}))
}
}
@@ -542,7 +542,7 @@ func TestBlockIndexReader_PostingsForLabelMatching(t *testing.T) {
testPostingsForLabelMatching(t, 2, func(t *testing.T, series []labels.Labels) IndexReader {
var seriesEntries []storage.Series
for _, s := range series {
- seriesEntries = append(seriesEntries, storage.NewListSeries(s, []chunks.Sample{sample{100, 0, nil, nil}}))
+ seriesEntries = append(seriesEntries, storage.NewListSeries(s, []chunks.Sample{sample{0, 100, 0, nil, nil}}))
}
blockDir := createBlock(t, t.TempDir(), seriesEntries)
diff --git a/tsdb/chunkenc/chunk.go b/tsdb/chunkenc/chunk.go
index fed28c5701..711966ec39 100644
--- a/tsdb/chunkenc/chunk.go
+++ b/tsdb/chunkenc/chunk.go
@@ -99,9 +99,9 @@ type Iterable interface {
Iterator(Iterator) Iterator
}
-// Appender adds sample pairs to a chunk.
+// Appender adds sample with start timestamp, timestamp, and value to a chunk.
type Appender interface {
- Append(int64, float64)
+ Append(st, t int64, v float64)
// AppendHistogram and AppendFloatHistogram append a histogram sample to a histogram or float histogram chunk.
// Appending a histogram may require creating a completely new chunk or recoding (changing) the current chunk.
@@ -114,8 +114,8 @@ type Appender interface {
// The returned bool isRecoded can be used to distinguish between the new Chunk c being a completely new Chunk
// or the current Chunk recoded to a new Chunk.
// The Appender app that can be used for the next append is always returned.
- AppendHistogram(prev *HistogramAppender, t int64, h *histogram.Histogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error)
- AppendFloatHistogram(prev *FloatHistogramAppender, t int64, h *histogram.FloatHistogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error)
+ AppendHistogram(prev *HistogramAppender, st, t int64, h *histogram.Histogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error)
+ AppendFloatHistogram(prev *FloatHistogramAppender, st, t int64, h *histogram.FloatHistogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error)
}
// Iterator is a simple iterator that can only get the next value.
@@ -151,6 +151,10 @@ type Iterator interface {
// AtT returns the current timestamp.
// Before the iterator has advanced, the behaviour is unspecified.
AtT() int64
+ // AtST returns the current start timestamp.
+ // Returns 0 if the start timestamp is not implemented or not set.
+ // Before the iterator has advanced, the behaviour is unspecified.
+ AtST() int64
// Err returns the current error. It should be used only after the
// iterator is exhausted, i.e. `Next` or `Seek` have returned ValNone.
Err() error
@@ -208,25 +212,30 @@ func (v ValueType) NewChunk() (Chunk, error) {
}
}
-// MockSeriesIterator returns an iterator for a mock series with custom timeStamps and values.
-func MockSeriesIterator(timestamps []int64, values []float64) Iterator {
+// MockSeriesIterator returns an iterator for a mock series with custom
+// start timestamp, timestamps, and values.
+// Start timestamps is optional, pass nil or empty slice to indicate no start
+// timestamps.
+func MockSeriesIterator(startTimestamps, timestamps []int64, values []float64) Iterator {
return &mockSeriesIterator{
- timeStamps: timestamps,
- values: values,
- currIndex: -1,
+ startTimestamps: startTimestamps,
+ timestamps: timestamps,
+ values: values,
+ currIndex: -1,
}
}
type mockSeriesIterator struct {
- timeStamps []int64
- values []float64
- currIndex int
+ timestamps []int64
+ startTimestamps []int64
+ values []float64
+ currIndex int
}
func (*mockSeriesIterator) Seek(int64) ValueType { return ValNone }
func (it *mockSeriesIterator) At() (int64, float64) {
- return it.timeStamps[it.currIndex], it.values[it.currIndex]
+ return it.timestamps[it.currIndex], it.values[it.currIndex]
}
func (*mockSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
@@ -238,11 +247,18 @@ func (*mockSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *
}
func (it *mockSeriesIterator) AtT() int64 {
- return it.timeStamps[it.currIndex]
+ return it.timestamps[it.currIndex]
+}
+
+func (it *mockSeriesIterator) AtST() int64 {
+ if len(it.startTimestamps) == 0 {
+ return 0
+ }
+ return it.startTimestamps[it.currIndex]
}
func (it *mockSeriesIterator) Next() ValueType {
- if it.currIndex < len(it.timeStamps)-1 {
+ if it.currIndex < len(it.timestamps)-1 {
it.currIndex++
return ValFloat
}
@@ -268,8 +284,9 @@ func (nopIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogra
func (nopIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
return math.MinInt64, nil
}
-func (nopIterator) AtT() int64 { return math.MinInt64 }
-func (nopIterator) Err() error { return nil }
+func (nopIterator) AtT() int64 { return math.MinInt64 }
+func (nopIterator) AtST() int64 { return 0 }
+func (nopIterator) Err() error { return nil }
// Pool is used to create and reuse chunk references to avoid allocations.
type Pool interface {
diff --git a/tsdb/chunkenc/chunk_test.go b/tsdb/chunkenc/chunk_test.go
index d2d0e4c053..41bb23ddd1 100644
--- a/tsdb/chunkenc/chunk_test.go
+++ b/tsdb/chunkenc/chunk_test.go
@@ -65,7 +65,7 @@ func testChunk(t *testing.T, c Chunk) {
require.NoError(t, err)
}
- app.Append(ts, v)
+ app.Append(0, ts, v)
exp = append(exp, pair{t: ts, v: v})
}
@@ -226,7 +226,7 @@ func benchmarkIterator(b *testing.B, newChunk func() Chunk) {
if j > 250 {
break
}
- a.Append(p.t, p.v)
+ a.Append(0, p.t, p.v)
j++
}
}
@@ -303,7 +303,7 @@ func benchmarkAppender(b *testing.B, deltas func() (int64, float64), newChunk fu
b.Fatalf("get appender: %s", err)
}
for _, p := range exp {
- a.Append(p.t, p.v)
+ a.Append(0, p.t, p.v)
}
}
}
diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go
index 797bc596b5..6af2fa68e2 100644
--- a/tsdb/chunkenc/float_histogram.go
+++ b/tsdb/chunkenc/float_histogram.go
@@ -195,7 +195,7 @@ func (a *FloatHistogramAppender) NumSamples() int {
// Append implements Appender. This implementation panics because normal float
// samples must never be appended to a histogram chunk.
-func (*FloatHistogramAppender) Append(int64, float64) {
+func (*FloatHistogramAppender) Append(int64, int64, float64) {
panic("appended a float sample to a histogram chunk")
}
@@ -682,11 +682,11 @@ func (*FloatHistogramAppender) recodeHistogram(
}
}
-func (*FloatHistogramAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
+func (*FloatHistogramAppender) AppendHistogram(*HistogramAppender, int64, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
panic("appended a histogram sample to a float histogram chunk")
}
-func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppender, t int64, h *histogram.FloatHistogram, appendOnly bool) (Chunk, bool, Appender, error) {
+func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppender, _, t int64, h *histogram.FloatHistogram, appendOnly bool) (Chunk, bool, Appender, error) {
if a.NumSamples() == 0 {
a.appendFloatHistogram(t, h)
if h.CounterResetHint == histogram.GaugeType {
@@ -938,6 +938,10 @@ func (it *floatHistogramIterator) AtT() int64 {
return it.t
}
+func (*floatHistogramIterator) AtST() int64 {
+ return 0
+}
+
func (it *floatHistogramIterator) Err() error {
return it.err
}
diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go
index f27de97516..cbeb3171ce 100644
--- a/tsdb/chunkenc/float_histogram_test.go
+++ b/tsdb/chunkenc/float_histogram_test.go
@@ -63,7 +63,7 @@ func TestFirstFloatHistogramExplicitCounterReset(t *testing.T) {
chk := NewFloatHistogramChunk()
app, err := chk.Appender()
require.NoError(t, err)
- newChk, recoded, newApp, err := app.AppendFloatHistogram(nil, 0, h, false)
+ newChk, recoded, newApp, err := app.AppendFloatHistogram(nil, 0, 0, h, false)
require.NoError(t, err)
require.Nil(t, newChk)
require.False(t, recoded)
@@ -101,7 +101,7 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) {
},
NegativeBuckets: []int64{2, 1, -1, -1}, // counts: 2, 3, 2, 1 (total 8)
}
- chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.ToFloat(nil), false)
+ chk, _, app, err := app.AppendFloatHistogram(nil, 0, ts, h.ToFloat(nil), false)
require.NoError(t, err)
require.Nil(t, chk)
exp = append(exp, floatResult{t: ts, h: h.ToFloat(nil)})
@@ -115,7 +115,7 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) {
h.Sum = 24.4
h.PositiveBuckets = []int64{5, -2, 1, -2} // counts: 5, 3, 4, 2 (total 14)
h.NegativeBuckets = []int64{4, -1, 1, -1} // counts: 4, 3, 4, 4 (total 15)
- chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(nil), false)
+ chk, _, _, err = app.AppendFloatHistogram(nil, 0, ts, h.ToFloat(nil), false)
require.NoError(t, err)
require.Nil(t, chk)
expH := h.ToFloat(nil)
@@ -134,7 +134,7 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) {
h.Sum = 24.4
h.PositiveBuckets = []int64{6, 1, -3, 6} // counts: 6, 7, 4, 10 (total 27)
h.NegativeBuckets = []int64{5, 1, -2, 3} // counts: 5, 6, 4, 7 (total 22)
- chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(nil), false)
+ chk, _, _, err = app.AppendFloatHistogram(nil, 0, ts, h.ToFloat(nil), false)
require.NoError(t, err)
require.Nil(t, chk)
expH = h.ToFloat(nil)
@@ -224,7 +224,7 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) {
NegativeBuckets: []int64{1},
}
- chk, _, app, err := app.AppendFloatHistogram(nil, ts1, h1.ToFloat(nil), false)
+ chk, _, app, err := app.AppendFloatHistogram(nil, 0, ts1, h1.ToFloat(nil), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
@@ -260,7 +260,7 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) {
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
- chk, _, _, err = app.AppendFloatHistogram(nil, ts2, h2.ToFloat(nil), false)
+ chk, _, _, err = app.AppendFloatHistogram(nil, 0, ts2, h2.ToFloat(nil), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 2, c.NumSamples())
@@ -330,7 +330,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
ts := int64(1234567890)
- chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.Copy(), false)
+ chk, _, app, err := app.AppendFloatHistogram(nil, 0, ts, h.Copy(), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
@@ -557,7 +557,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
nextChunk := NewFloatHistogramChunk()
app, err := nextChunk.Appender()
require.NoError(t, err)
- newChunk, recoded, newApp, err := app.AppendFloatHistogram(hApp, ts+1, h2, false)
+ newChunk, recoded, newApp, err := app.AppendFloatHistogram(hApp, 0, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
@@ -575,7 +575,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
nextChunk := NewFloatHistogramChunk()
app, err := nextChunk.Appender()
require.NoError(t, err)
- newChunk, recoded, newApp, err := app.AppendFloatHistogram(hApp, ts+1, h2, false)
+ newChunk, recoded, newApp, err := app.AppendFloatHistogram(hApp, 0, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
@@ -602,7 +602,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
nextChunk := NewFloatHistogramChunk()
app, err := nextChunk.Appender()
require.NoError(t, err)
- newChunk, recoded, newApp, err := app.AppendFloatHistogram(hApp, ts+1, h2, false)
+ newChunk, recoded, newApp, err := app.AppendFloatHistogram(hApp, 0, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
@@ -717,7 +717,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader, expectHint histogram.CounterResetHint) {
oldChunkBytes := oldChunk.Bytes()
- newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false)
+ newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, 0, ts, h, false)
require.Equal(t, oldChunkBytes, oldChunk.Bytes()) // Sanity check that previous chunk is untouched.
require.NoError(t, err)
require.NotNil(t, newChunk)
@@ -732,7 +732,7 @@ func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *Fl
func assertNoNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
oldChunkBytes := oldChunk.Bytes()
- newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false)
+ newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, 0, ts, h, false)
require.Greater(t, len(oldChunk.Bytes()), len(oldChunkBytes)) // Check that current chunk is bigger than previously.
require.NoError(t, err)
require.Nil(t, newChunk)
@@ -745,7 +745,7 @@ func assertNoNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *
func assertRecodedFloatHistogramChunkOnAppend(t *testing.T, prevChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
prevChunkBytes := prevChunk.Bytes()
- newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false)
+ newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, 0, ts, h, false)
require.Equal(t, prevChunkBytes, prevChunk.Bytes()) // Sanity check that previous chunk is untouched. This may change in the future if we implement in-place recoding.
require.NoError(t, err)
require.NotNil(t, newChunk)
@@ -959,7 +959,7 @@ func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
- _, _, _, err = app.AppendFloatHistogram(nil, 1, tc.h1, true)
+ _, _, _, err = app.AppendFloatHistogram(nil, 0, 1, tc.h1, true)
require.NoError(t, err)
require.Equal(t, 1, c.NumSamples())
hApp, _ := app.(*FloatHistogramAppender)
@@ -1019,7 +1019,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
ts := int64(1234567890)
- chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.Copy(), false)
+ chk, _, app, err := app.AppendFloatHistogram(nil, 0, ts, h.Copy(), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
@@ -1259,7 +1259,7 @@ func TestFloatHistogramAppendOnlyErrors(t *testing.T) {
h := tsdbutil.GenerateTestFloatHistogram(0)
var isRecoded bool
- c, isRecoded, app, err = app.AppendFloatHistogram(nil, 1, h, true)
+ c, isRecoded, app, err = app.AppendFloatHistogram(nil, 0, 1, h, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.NoError(t, err)
@@ -1267,7 +1267,7 @@ func TestFloatHistogramAppendOnlyErrors(t *testing.T) {
// Add erroring histogram.
h2 := h.Copy()
h2.Schema++
- c, isRecoded, _, err = app.AppendFloatHistogram(nil, 2, h2, true)
+ c, isRecoded, _, err = app.AppendFloatHistogram(nil, 0, 2, h2, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.EqualError(t, err, "float histogram schema change")
@@ -1281,7 +1281,7 @@ func TestFloatHistogramAppendOnlyErrors(t *testing.T) {
h := tsdbutil.GenerateTestFloatHistogram(0)
var isRecoded bool
- c, isRecoded, app, err = app.AppendFloatHistogram(nil, 1, h, true)
+ c, isRecoded, app, err = app.AppendFloatHistogram(nil, 0, 1, h, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.NoError(t, err)
@@ -1289,7 +1289,7 @@ func TestFloatHistogramAppendOnlyErrors(t *testing.T) {
// Add erroring histogram.
h2 := h.Copy()
h2.CounterResetHint = histogram.CounterReset
- c, isRecoded, _, err = app.AppendFloatHistogram(nil, 2, h2, true)
+ c, isRecoded, _, err = app.AppendFloatHistogram(nil, 0, 2, h2, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.EqualError(t, err, "float histogram counter reset")
@@ -1303,7 +1303,7 @@ func TestFloatHistogramAppendOnlyErrors(t *testing.T) {
h := tsdbutil.GenerateTestCustomBucketsFloatHistogram(0)
var isRecoded bool
- c, isRecoded, app, err = app.AppendFloatHistogram(nil, 1, h, true)
+ c, isRecoded, app, err = app.AppendFloatHistogram(nil, 0, 1, h, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.NoError(t, err)
@@ -1311,7 +1311,7 @@ func TestFloatHistogramAppendOnlyErrors(t *testing.T) {
// Add erroring histogram.
h2 := h.Copy()
h2.CustomValues = []float64{0, 1, 2, 3, 4, 5, 6, 7}
- c, isRecoded, _, err = app.AppendFloatHistogram(nil, 2, h2, true)
+ c, isRecoded, _, err = app.AppendFloatHistogram(nil, 0, 2, h2, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.EqualError(t, err, "float histogram counter reset")
@@ -1344,10 +1344,10 @@ func TestFloatHistogramUniqueSpansAfterNext(t *testing.T) {
app, err := c.Appender()
require.NoError(t, err)
- _, _, _, err = app.AppendFloatHistogram(nil, 0, h1, false)
+ _, _, _, err = app.AppendFloatHistogram(nil, 0, 0, h1, false)
require.NoError(t, err)
- _, _, _, err = app.AppendFloatHistogram(nil, 1, h2, false)
+ _, _, _, err = app.AppendFloatHistogram(nil, 0, 1, h2, false)
require.NoError(t, err)
// Create an iterator and advance to the first histogram.
@@ -1390,10 +1390,10 @@ func TestFloatHistogramUniqueCustomValuesAfterNext(t *testing.T) {
app, err := c.Appender()
require.NoError(t, err)
- _, _, _, err = app.AppendFloatHistogram(nil, 0, h1, false)
+ _, _, _, err = app.AppendFloatHistogram(nil, 0, 0, h1, false)
require.NoError(t, err)
- _, _, _, err = app.AppendFloatHistogram(nil, 1, h2, false)
+ _, _, _, err = app.AppendFloatHistogram(nil, 0, 1, h2, false)
require.NoError(t, err)
// Create an iterator and advance to the first histogram.
@@ -1435,7 +1435,7 @@ func TestFloatHistogramEmptyBucketsWithGaps(t *testing.T) {
c := NewFloatHistogramChunk()
app, err := c.Appender()
require.NoError(t, err)
- _, _, _, err = app.AppendFloatHistogram(nil, 1, h1, false)
+ _, _, _, err = app.AppendFloatHistogram(nil, 0, 1, h1, false)
require.NoError(t, err)
h2 := &histogram.FloatHistogram{
@@ -1448,7 +1448,7 @@ func TestFloatHistogramEmptyBucketsWithGaps(t *testing.T) {
}
require.NoError(t, h2.Validate())
- newC, recoded, _, err := app.AppendFloatHistogram(nil, 2, h2, false)
+ newC, recoded, _, err := app.AppendFloatHistogram(nil, 0, 2, h2, false)
require.NoError(t, err)
require.True(t, recoded)
require.NotNil(t, newC)
@@ -1483,7 +1483,7 @@ func TestFloatHistogramIteratorFailIfSchemaInValid(t *testing.T) {
app, err := c.Appender()
require.NoError(t, err)
- _, _, _, err = app.AppendFloatHistogram(nil, 1, h, false)
+ _, _, _, err = app.AppendFloatHistogram(nil, 0, 1, h, false)
require.NoError(t, err)
it := c.Iterator(nil)
@@ -1512,7 +1512,7 @@ func TestFloatHistogramIteratorReduceSchema(t *testing.T) {
app, err := c.Appender()
require.NoError(t, err)
- _, _, _, err = app.AppendFloatHistogram(nil, 1, h, false)
+ _, _, _, err = app.AppendFloatHistogram(nil, 0, 1, h, false)
require.NoError(t, err)
it := c.Iterator(nil)
diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go
index e05c49c81d..4e77f387d3 100644
--- a/tsdb/chunkenc/histogram.go
+++ b/tsdb/chunkenc/histogram.go
@@ -219,7 +219,7 @@ func (a *HistogramAppender) NumSamples() int {
// Append implements Appender. This implementation panics because normal float
// samples must never be appended to a histogram chunk.
-func (*HistogramAppender) Append(int64, float64) {
+func (*HistogramAppender) Append(int64, int64, float64) {
panic("appended a float sample to a histogram chunk")
}
@@ -734,11 +734,11 @@ func (a *HistogramAppender) writeSumDelta(v float64) {
xorWrite(a.b, v, a.sum, &a.leading, &a.trailing)
}
-func (*HistogramAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
+func (*HistogramAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
panic("appended a float histogram sample to a histogram chunk")
}
-func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h *histogram.Histogram, appendOnly bool) (Chunk, bool, Appender, error) {
+func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, _, t int64, h *histogram.Histogram, appendOnly bool) (Chunk, bool, Appender, error) {
if a.NumSamples() == 0 {
a.appendHistogram(t, h)
if h.CounterResetHint == histogram.GaugeType {
@@ -1075,6 +1075,10 @@ func (it *histogramIterator) AtT() int64 {
return it.t
}
+func (*histogramIterator) AtST() int64 {
+ return 0
+}
+
func (it *histogramIterator) Err() error {
return it.err
}
diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go
index 38bbd58465..6ac8500e64 100644
--- a/tsdb/chunkenc/histogram_test.go
+++ b/tsdb/chunkenc/histogram_test.go
@@ -64,7 +64,7 @@ func TestFirstHistogramExplicitCounterReset(t *testing.T) {
chk := NewHistogramChunk()
app, err := chk.Appender()
require.NoError(t, err)
- newChk, recoded, newApp, err := app.AppendHistogram(nil, 0, h, false)
+ newChk, recoded, newApp, err := app.AppendHistogram(nil, 0, 0, h, false)
require.NoError(t, err)
require.Nil(t, newChk)
require.False(t, recoded)
@@ -102,7 +102,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) {
},
NegativeBuckets: []int64{2, 1, -1, -1}, // counts: 2, 3, 2, 1 (total 8)
}
- chk, _, app, err := app.AppendHistogram(nil, ts, h, false)
+ chk, _, app, err := app.AppendHistogram(nil, 0, ts, h, false)
require.NoError(t, err)
require.Nil(t, chk)
exp = append(exp, result{t: ts, h: h, fh: h.ToFloat(nil)})
@@ -116,7 +116,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) {
h.Sum = 24.4
h.PositiveBuckets = []int64{5, -2, 1, -2} // counts: 5, 3, 4, 2 (total 14)
h.NegativeBuckets = []int64{4, -1, 1, -1} // counts: 4, 3, 4, 4 (total 15)
- chk, _, _, err = app.AppendHistogram(nil, ts, h, false)
+ chk, _, _, err = app.AppendHistogram(nil, 0, ts, h, false)
require.NoError(t, err)
require.Nil(t, chk)
hExp := h.Copy()
@@ -135,7 +135,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) {
h.Sum = 24.4
h.PositiveBuckets = []int64{6, 1, -3, 6} // counts: 6, 7, 4, 10 (total 27)
h.NegativeBuckets = []int64{5, 1, -2, 3} // counts: 5, 6, 4, 7 (total 22)
- chk, _, _, err = app.AppendHistogram(nil, ts, h, false)
+ chk, _, _, err = app.AppendHistogram(nil, 0, ts, h, false)
require.NoError(t, err)
require.Nil(t, chk)
hExp = h.Copy()
@@ -235,7 +235,7 @@ func TestHistogramChunkBucketChanges(t *testing.T) {
NegativeBuckets: []int64{1},
}
- chk, _, app, err := app.AppendHistogram(nil, ts1, h1, false)
+ chk, _, app, err := app.AppendHistogram(nil, 0, ts1, h1, false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
@@ -271,7 +271,7 @@ func TestHistogramChunkBucketChanges(t *testing.T) {
require.True(t, ok) // Only new buckets came in.
require.Equal(t, NotCounterReset, cr)
c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
- chk, _, _, err = app.AppendHistogram(nil, ts2, h2, false)
+ chk, _, _, err = app.AppendHistogram(nil, 0, ts2, h2, false)
require.NoError(t, err)
require.Nil(t, chk)
@@ -344,7 +344,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
ts := int64(1234567890)
- chk, _, app, err := app.AppendHistogram(nil, ts, h.Copy(), false)
+ chk, _, app, err := app.AppendHistogram(nil, 0, ts, h.Copy(), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
@@ -581,7 +581,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
nextChunk := NewHistogramChunk()
app, err := nextChunk.Appender()
require.NoError(t, err)
- newChunk, recoded, newApp, err := app.AppendHistogram(hApp, ts+1, h2, false)
+ newChunk, recoded, newApp, err := app.AppendHistogram(hApp, 0, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
@@ -599,7 +599,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
nextChunk := NewHistogramChunk()
app, err := nextChunk.Appender()
require.NoError(t, err)
- newChunk, recoded, newApp, err := app.AppendHistogram(hApp, ts+1, h2, false)
+ newChunk, recoded, newApp, err := app.AppendHistogram(hApp, 0, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
@@ -629,7 +629,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
nextChunk := NewHistogramChunk()
app, err := nextChunk.Appender()
require.NoError(t, err)
- newChunk, recoded, newApp, err := app.AppendHistogram(hApp, ts+1, h2, false)
+ newChunk, recoded, newApp, err := app.AppendHistogram(hApp, 0, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
@@ -776,7 +776,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader, expectHint histogram.CounterResetHint) {
oldChunkBytes := oldChunk.Bytes()
- newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false)
+ newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, 0, ts, h, false)
require.Equal(t, oldChunkBytes, oldChunk.Bytes()) // Sanity check that previous chunk is untouched.
require.NoError(t, err)
require.NotNil(t, newChunk)
@@ -791,7 +791,7 @@ func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *Histogr
func assertNoNewHistogramChunkOnAppend(t *testing.T, currChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
prevChunkBytes := currChunk.Bytes()
- newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false)
+ newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, 0, ts, h, false)
require.Greater(t, len(currChunk.Bytes()), len(prevChunkBytes)) // Check that current chunk is bigger than previously.
require.NoError(t, err)
require.Nil(t, newChunk)
@@ -804,7 +804,7 @@ func assertNoNewHistogramChunkOnAppend(t *testing.T, currChunk Chunk, hApp *Hist
func assertRecodedHistogramChunkOnAppend(t *testing.T, prevChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
prevChunkBytes := prevChunk.Bytes()
- newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false)
+ newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, 0, ts, h, false)
require.Equal(t, prevChunkBytes, prevChunk.Bytes()) // Sanity check that previous chunk is untouched. This may change in the future if we implement in-place recoding.
require.NoError(t, err)
require.NotNil(t, newChunk)
@@ -1029,7 +1029,7 @@ func TestHistogramChunkAppendableWithEmptySpan(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
- _, _, _, err = app.AppendHistogram(nil, 1, tc.h1, true)
+ _, _, _, err = app.AppendHistogram(nil, 1, 0, tc.h1, true)
require.NoError(t, err)
require.Equal(t, 1, c.NumSamples())
hApp, _ := app.(*HistogramAppender)
@@ -1172,7 +1172,7 @@ func TestAtFloatHistogram(t *testing.T) {
app, err := chk.Appender()
require.NoError(t, err)
for i := range input {
- newc, _, _, err := app.AppendHistogram(nil, int64(i), &input[i], false)
+ newc, _, _, err := app.AppendHistogram(nil, 0, int64(i), &input[i], false)
require.NoError(t, err)
require.Nil(t, newc)
}
@@ -1230,7 +1230,7 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
ts := int64(1234567890)
- chk, _, app, err := app.AppendHistogram(nil, ts, h.Copy(), false)
+ chk, _, app, err := app.AppendHistogram(nil, 0, ts, h.Copy(), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
@@ -1471,7 +1471,7 @@ func TestHistogramAppendOnlyErrors(t *testing.T) {
h := tsdbutil.GenerateTestHistogram(0)
var isRecoded bool
- c, isRecoded, app, err = app.AppendHistogram(nil, 1, h, true)
+ c, isRecoded, app, err = app.AppendHistogram(nil, 0, 1, h, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.NoError(t, err)
@@ -1479,7 +1479,7 @@ func TestHistogramAppendOnlyErrors(t *testing.T) {
// Add erroring histogram.
h2 := h.Copy()
h2.Schema++
- c, isRecoded, _, err = app.AppendHistogram(nil, 2, h2, true)
+ c, isRecoded, _, err = app.AppendHistogram(nil, 0, 2, h2, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.EqualError(t, err, "histogram schema change")
@@ -1493,7 +1493,7 @@ func TestHistogramAppendOnlyErrors(t *testing.T) {
h := tsdbutil.GenerateTestHistogram(0)
var isRecoded bool
- c, isRecoded, app, err = app.AppendHistogram(nil, 1, h, true)
+ c, isRecoded, app, err = app.AppendHistogram(nil, 0, 1, h, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.NoError(t, err)
@@ -1501,7 +1501,7 @@ func TestHistogramAppendOnlyErrors(t *testing.T) {
// Add erroring histogram.
h2 := h.Copy()
h2.CounterResetHint = histogram.CounterReset
- c, isRecoded, _, err = app.AppendHistogram(nil, 2, h2, true)
+ c, isRecoded, _, err = app.AppendHistogram(nil, 0, 2, h2, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.EqualError(t, err, "histogram counter reset")
@@ -1515,7 +1515,7 @@ func TestHistogramAppendOnlyErrors(t *testing.T) {
h := tsdbutil.GenerateTestCustomBucketsHistogram(0)
var isRecoded bool
- c, isRecoded, app, err = app.AppendHistogram(nil, 1, h, true)
+ c, isRecoded, app, err = app.AppendHistogram(nil, 0, 1, h, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.NoError(t, err)
@@ -1523,7 +1523,7 @@ func TestHistogramAppendOnlyErrors(t *testing.T) {
// Add erroring histogram.
h2 := h.Copy()
h2.CustomValues = []float64{0, 1, 2, 3, 4, 5, 6, 7}
- c, isRecoded, _, err = app.AppendHistogram(nil, 2, h2, true)
+ c, isRecoded, _, err = app.AppendHistogram(nil, 0, 2, h2, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.EqualError(t, err, "histogram counter reset")
@@ -1556,10 +1556,10 @@ func TestHistogramUniqueSpansAfterNextWithAtHistogram(t *testing.T) {
app, err := c.Appender()
require.NoError(t, err)
- _, _, _, err = app.AppendHistogram(nil, 0, h1, false)
+ _, _, _, err = app.AppendHistogram(nil, 0, 0, h1, false)
require.NoError(t, err)
- _, _, _, err = app.AppendHistogram(nil, 1, h2, false)
+ _, _, _, err = app.AppendHistogram(nil, 0, 1, h2, false)
require.NoError(t, err)
// Create an iterator and advance to the first histogram.
@@ -1607,10 +1607,10 @@ func TestHistogramUniqueSpansAfterNextWithAtFloatHistogram(t *testing.T) {
app, err := c.Appender()
require.NoError(t, err)
- _, _, _, err = app.AppendHistogram(nil, 0, h1, false)
+ _, _, _, err = app.AppendHistogram(nil, 0, 0, h1, false)
require.NoError(t, err)
- _, _, _, err = app.AppendHistogram(nil, 1, h2, false)
+ _, _, _, err = app.AppendHistogram(nil, 0, 1, h2, false)
require.NoError(t, err)
// Create an iterator and advance to the first histogram.
@@ -1653,10 +1653,10 @@ func TestHistogramCustomValuesInternedAfterNextWithAtHistogram(t *testing.T) {
app, err := c.Appender()
require.NoError(t, err)
- _, _, _, err = app.AppendHistogram(nil, 0, h1, false)
+ _, _, _, err = app.AppendHistogram(nil, 0, 0, h1, false)
require.NoError(t, err)
- _, _, _, err = app.AppendHistogram(nil, 1, h2, false)
+ _, _, _, err = app.AppendHistogram(nil, 0, 1, h2, false)
require.NoError(t, err)
// Create an iterator and advance to the first histogram.
@@ -1699,10 +1699,10 @@ func TestHistogramCustomValuesInternedAfterNextWithAtFloatHistogram(t *testing.T
app, err := c.Appender()
require.NoError(t, err)
- _, _, _, err = app.AppendHistogram(nil, 0, h1, false)
+ _, _, _, err = app.AppendHistogram(nil, 0, 0, h1, false)
require.NoError(t, err)
- _, _, _, err = app.AppendHistogram(nil, 1, h2, false)
+ _, _, _, err = app.AppendHistogram(nil, 0, 1, h2, false)
require.NoError(t, err)
// Create an iterator and advance to the first histogram.
@@ -1754,7 +1754,7 @@ func BenchmarkAppendable(b *testing.B) {
b.Fatal(err)
}
- _, _, _, err = app.AppendHistogram(nil, 1, h, true)
+ _, _, _, err = app.AppendHistogram(nil, 0, 1, h, true)
if err != nil {
b.Fatal(err)
}
@@ -1791,7 +1791,7 @@ func TestIntHistogramEmptyBucketsWithGaps(t *testing.T) {
c := NewHistogramChunk()
app, err := c.Appender()
require.NoError(t, err)
- _, _, _, err = app.AppendHistogram(nil, 1, h1, false)
+ _, _, _, err = app.AppendHistogram(nil, 0, 1, h1, false)
require.NoError(t, err)
h2 := &histogram.Histogram{
@@ -1804,7 +1804,7 @@ func TestIntHistogramEmptyBucketsWithGaps(t *testing.T) {
}
require.NoError(t, h2.Validate())
- newC, recoded, _, err := app.AppendHistogram(nil, 2, h2, false)
+ newC, recoded, _, err := app.AppendHistogram(nil, 0, 2, h2, false)
require.NoError(t, err)
require.True(t, recoded)
require.NotNil(t, newC)
@@ -1839,7 +1839,7 @@ func TestHistogramIteratorFailIfSchemaInValid(t *testing.T) {
app, err := c.Appender()
require.NoError(t, err)
- _, _, _, err = app.AppendHistogram(nil, 1, h, false)
+ _, _, _, err = app.AppendHistogram(nil, 0, 1, h, false)
require.NoError(t, err)
it := c.Iterator(nil)
@@ -1868,7 +1868,7 @@ func TestHistogramIteratorReduceSchema(t *testing.T) {
app, err := c.Appender()
require.NoError(t, err)
- _, _, _, err = app.AppendHistogram(nil, 1, h, false)
+ _, _, _, err = app.AppendHistogram(nil, 0, 1, h, false)
require.NoError(t, err)
it := c.Iterator(nil)
diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go
index bbe12a893b..5a9a59dc22 100644
--- a/tsdb/chunkenc/xor.go
+++ b/tsdb/chunkenc/xor.go
@@ -158,7 +158,7 @@ type xorAppender struct {
trailing uint8
}
-func (a *xorAppender) Append(t int64, v float64) {
+func (a *xorAppender) Append(_, t int64, v float64) {
var tDelta uint64
num := binary.BigEndian.Uint16(a.b.bytes())
switch num {
@@ -225,11 +225,11 @@ func (a *xorAppender) writeVDelta(v float64) {
xorWrite(a.b, v, a.v, &a.leading, &a.trailing)
}
-func (*xorAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
+func (*xorAppender) AppendHistogram(*HistogramAppender, int64, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
panic("appended a histogram sample to a float chunk")
}
-func (*xorAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
+func (*xorAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
panic("appended a float histogram sample to a float chunk")
}
@@ -277,6 +277,10 @@ func (it *xorIterator) AtT() int64 {
return it.t
}
+func (*xorIterator) AtST() int64 {
+ return 0
+}
+
func (it *xorIterator) Err() error {
return it.err
}
diff --git a/tsdb/chunkenc/xor_test.go b/tsdb/chunkenc/xor_test.go
index 904e536b49..b30c65283d 100644
--- a/tsdb/chunkenc/xor_test.go
+++ b/tsdb/chunkenc/xor_test.go
@@ -24,7 +24,7 @@ func BenchmarkXorRead(b *testing.B) {
app, err := c.Appender()
require.NoError(b, err)
for i := int64(0); i < 120*1000; i += 1000 {
- app.Append(i, float64(i)+float64(i)/10+float64(i)/100+float64(i)/1000)
+ app.Append(0, i, float64(i)+float64(i)/10+float64(i)/100+float64(i)/1000)
}
b.ReportAllocs()
diff --git a/tsdb/chunks/chunks.go b/tsdb/chunks/chunks.go
index f8fc9a2e95..9b4e011562 100644
--- a/tsdb/chunks/chunks.go
+++ b/tsdb/chunks/chunks.go
@@ -135,6 +135,7 @@ type Meta struct {
}
// ChunkFromSamples requires all samples to have the same type.
+// TODO(krajorama): test with ST when chunk formats support it.
func ChunkFromSamples(s []Sample) (Meta, error) {
return ChunkFromSamplesGeneric(SampleSlice(s))
}
@@ -164,9 +165,9 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
for i := 0; i < s.Len(); i++ {
switch sampleType {
case chunkenc.ValFloat:
- ca.Append(s.Get(i).T(), s.Get(i).F())
+ ca.Append(s.Get(i).ST(), s.Get(i).T(), s.Get(i).F())
case chunkenc.ValHistogram:
- newChunk, _, ca, err = ca.AppendHistogram(nil, s.Get(i).T(), s.Get(i).H(), false)
+ newChunk, _, ca, err = ca.AppendHistogram(nil, s.Get(i).ST(), s.Get(i).T(), s.Get(i).H(), false)
if err != nil {
return emptyChunk, err
}
@@ -174,7 +175,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
return emptyChunk, errors.New("did not expect to start a second chunk")
}
case chunkenc.ValFloatHistogram:
- newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).T(), s.Get(i).FH(), false)
+ newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).ST(), s.Get(i).T(), s.Get(i).FH(), false)
if err != nil {
return emptyChunk, err
}
@@ -776,7 +777,7 @@ func sequenceFiles(dir string) ([]string, error) {
return res, nil
}
-// closeAll closes all given closers while recording error in MultiError.
+// closeAll closes all given closers while recording all errors.
func closeAll(cs []io.Closer) error {
var errs []error
for _, c := range cs {
diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go
index 17efd44aa6..c3cbc5a618 100644
--- a/tsdb/chunks/head_chunks_test.go
+++ b/tsdb/chunks/head_chunks_test.go
@@ -559,7 +559,7 @@ func randomChunk(t *testing.T) chunkenc.Chunk {
app, err := chunk.Appender()
require.NoError(t, err)
for range length {
- app.Append(rand.Int63(), rand.Float64())
+ app.Append(0, rand.Int63(), rand.Float64())
}
return chunk
}
diff --git a/tsdb/chunks/samples.go b/tsdb/chunks/samples.go
index 8097bcd72b..280f2dd606 100644
--- a/tsdb/chunks/samples.go
+++ b/tsdb/chunks/samples.go
@@ -25,6 +25,7 @@ type Samples interface {
type Sample interface {
T() int64
+ ST() int64
F() float64
H() *histogram.Histogram
FH() *histogram.FloatHistogram
@@ -38,16 +39,20 @@ func (s SampleSlice) Get(i int) Sample { return s[i] }
func (s SampleSlice) Len() int { return len(s) }
type sample struct {
- t int64
- f float64
- h *histogram.Histogram
- fh *histogram.FloatHistogram
+ st, t int64
+ f float64
+ h *histogram.Histogram
+ fh *histogram.FloatHistogram
}
func (s sample) T() int64 {
return s.t
}
+func (s sample) ST() int64 {
+ return s.st
+}
+
func (s sample) F() float64 {
return s.f
}
diff --git a/tsdb/compact.go b/tsdb/compact.go
index 7c21cbcc13..7091d34d50 100644
--- a/tsdb/compact.go
+++ b/tsdb/compact.go
@@ -32,7 +32,6 @@ import (
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
@@ -263,6 +262,13 @@ func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
return nil, err
}
if c.blockExcludeFunc != nil && c.blockExcludeFunc(meta) {
+ // Compactions work from oldest to newest, uploads do the same (usually).
+ // If you continue here you'll skip compactions on this one block, but:
+ // * all further blocks are NOT yet uploaded
+ // * some or all further blocks are uploaded
+ //
+ // If we continue and there are newer blocks to pick from,
+ // then you will compact in a non-continuous way, leaving gaps of individual un-compacted blocks.
break
}
dms = append(dms, dirMeta{dir, meta})
@@ -565,16 +571,16 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
return []ulid.ULID{uid}, nil
}
- errs := tsdb_errors.NewMulti(err)
+ errs := []error{err}
if !errors.Is(err, context.Canceled) {
for _, b := range bs {
if err := b.setCompactionFailed(); err != nil {
- errs.Add(fmt.Errorf("setting compaction failed for block: %s: %w", b.Dir(), err))
+ errs = append(errs, fmt.Errorf("setting compaction failed for block: %s: %w", b.Dir(), err))
}
}
}
- return nil, errs.Err()
+ return nil, errors.Join(errs...)
}
func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) ([]ulid.ULID, error) {
@@ -598,6 +604,9 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b
if base.Compaction.FromOutOfOrder() {
meta.Compaction.SetOutOfOrder()
}
+ if base.Compaction.FromStaleSeries() {
+ meta.Compaction.SetStaleSeries()
+ }
}
err := c.write(dest, meta, DefaultBlockPopulator{}, b)
@@ -651,7 +660,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl
tmp := dir + tmpForCreationBlockDirSuffix
var closers []io.Closer
defer func(t time.Time) {
- err = tsdb_errors.NewMulti(err, tsdb_errors.CloseAll(closers)).Err()
+ err = errors.Join(err, closeAll(closers))
// RemoveAll returns no error when tmp doesn't exist so it is safe to always run it.
if err := os.RemoveAll(tmp); err != nil {
@@ -708,13 +717,13 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl
// though these are covered under defer. This is because in Windows,
// you cannot delete these unless they are closed and the defer is to
// make sure they are closed if the function exits due to an error above.
- errs := tsdb_errors.NewMulti()
+ var errs []error
for _, w := range closers {
- errs.Add(w.Close())
+ errs = append(errs, w.Close())
}
closers = closers[:0] // Avoid closing the writers twice in the defer.
- if errs.Err() != nil {
- return errs.Err()
+ if err := errors.Join(errs...); err != nil {
+ return err
}
// Populated block is empty, so exit early.
@@ -793,11 +802,9 @@ func (DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compact
overlapping bool
)
defer func() {
- errs := tsdb_errors.NewMulti(err)
- if cerr := tsdb_errors.CloseAll(closers); cerr != nil {
- errs.Add(fmt.Errorf("close: %w", cerr))
+ if cerr := closeAll(closers); cerr != nil {
+ err = errors.Join(err, fmt.Errorf("close: %w", cerr))
}
- err = errs.Err()
metrics.PopulatingBlocks.Set(0)
}()
metrics.PopulatingBlocks.Set(1)
diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go
index 29b90d9bbc..afe15a5f31 100644
--- a/tsdb/compact_test.go
+++ b/tsdb/compact_test.go
@@ -173,214 +173,274 @@ func TestNoPanicFor0Tombstones(t *testing.T) {
c.plan(metas)
}
-func TestLeveledCompactor_plan(t *testing.T) {
- // This mimics our default ExponentialBlockRanges with min block size equals to 20.
- compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{
- 20,
- 60,
- 180,
- 540,
- 1620,
- }, nil, nil)
- require.NoError(t, err)
+func TestLeveledCompactor(t *testing.T) {
+ // Tests for the private plan() method.
+ t.Run("plan", func(t *testing.T) {
+ // This mimics our default ExponentialBlockRanges with min block size equals to 20.
+ compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{
+ 20,
+ 60,
+ 180,
+ 540,
+ 1620,
+ }, nil, nil)
+ require.NoError(t, err)
- cases := map[string]struct {
- metas []dirMeta
- expected []string
- }{
- "Outside Range": {
- metas: []dirMeta{
- metaRange("1", 0, 20, nil),
+ cases := map[string]struct {
+ metas []dirMeta
+ expected []string
+ }{
+ "Outside Range": {
+ metas: []dirMeta{
+ metaRange("1", 0, 20, nil),
+ },
+ expected: nil,
},
- expected: nil,
- },
- "We should wait for four blocks of size 20 to appear before compacting.": {
- metas: []dirMeta{
- metaRange("1", 0, 20, nil),
- metaRange("2", 20, 40, nil),
+ "We should wait for four blocks of size 20 to appear before compacting.": {
+ metas: []dirMeta{
+ metaRange("1", 0, 20, nil),
+ metaRange("2", 20, 40, nil),
+ },
+ expected: nil,
},
- expected: nil,
- },
- `We should wait for a next block of size 20 to appear before compacting
- the existing ones. We have three, but we ignore the fresh one from WAl`: {
- metas: []dirMeta{
- metaRange("1", 0, 20, nil),
- metaRange("2", 20, 40, nil),
- metaRange("3", 40, 60, nil),
+ `We should wait for a next block of size 20 to appear before compacting
+ the existing ones. We have three, but we ignore the fresh one from WAl`: {
+ metas: []dirMeta{
+ metaRange("1", 0, 20, nil),
+ metaRange("2", 20, 40, nil),
+ metaRange("3", 40, 60, nil),
+ },
+ expected: nil,
},
- expected: nil,
- },
- "Block to fill the entire parent range appeared – should be compacted": {
- metas: []dirMeta{
- metaRange("1", 0, 20, nil),
- metaRange("2", 20, 40, nil),
- metaRange("3", 40, 60, nil),
- metaRange("4", 60, 80, nil),
+ "Block to fill the entire parent range appeared – should be compacted": {
+ metas: []dirMeta{
+ metaRange("1", 0, 20, nil),
+ metaRange("2", 20, 40, nil),
+ metaRange("3", 40, 60, nil),
+ metaRange("4", 60, 80, nil),
+ },
+ expected: []string{"1", "2", "3"},
},
- expected: []string{"1", "2", "3"},
- },
- `Block for the next parent range appeared with gap with size 20. Nothing will happen in the first one
- anymore but we ignore fresh one still, so no compaction`: {
- metas: []dirMeta{
- metaRange("1", 0, 20, nil),
- metaRange("2", 20, 40, nil),
- metaRange("3", 60, 80, nil),
+ `Block for the next parent range appeared with gap with size 20. Nothing will happen in the first one
+ anymore but we ignore fresh one still, so no compaction`: {
+ metas: []dirMeta{
+ metaRange("1", 0, 20, nil),
+ metaRange("2", 20, 40, nil),
+ metaRange("3", 60, 80, nil),
+ },
+ expected: nil,
},
- expected: nil,
- },
- `Block for the next parent range appeared, and we have a gap with size 20 between second and third block.
- We will not get this missed gap anymore and we should compact just these two.`: {
- metas: []dirMeta{
- metaRange("1", 0, 20, nil),
- metaRange("2", 20, 40, nil),
- metaRange("3", 60, 80, nil),
- metaRange("4", 80, 100, nil),
+ `Block for the next parent range appeared, and we have a gap with size 20 between second and third block.
+ We will not get this missed gap anymore and we should compact just these two.`: {
+ metas: []dirMeta{
+ metaRange("1", 0, 20, nil),
+ metaRange("2", 20, 40, nil),
+ metaRange("3", 60, 80, nil),
+ metaRange("4", 80, 100, nil),
+ },
+ expected: []string{"1", "2"},
},
- expected: []string{"1", "2"},
- },
- "We have 20, 20, 20, 60, 60 range blocks. '5' is marked as fresh one": {
- metas: []dirMeta{
- metaRange("1", 0, 20, nil),
- metaRange("2", 20, 40, nil),
- metaRange("3", 40, 60, nil),
- metaRange("4", 60, 120, nil),
- metaRange("5", 120, 180, nil),
+ "We have 20, 20, 20, 60, 60 range blocks. '5' is marked as fresh one": {
+ metas: []dirMeta{
+ metaRange("1", 0, 20, nil),
+ metaRange("2", 20, 40, nil),
+ metaRange("3", 40, 60, nil),
+ metaRange("4", 60, 120, nil),
+ metaRange("5", 120, 180, nil),
+ },
+ expected: []string{"1", "2", "3"},
},
- expected: []string{"1", "2", "3"},
- },
- "We have 20, 60, 20, 60, 240 range blocks. We can compact 20 + 60 + 60": {
- metas: []dirMeta{
- metaRange("2", 20, 40, nil),
- metaRange("4", 60, 120, nil),
- metaRange("5", 960, 980, nil), // Fresh one.
- metaRange("6", 120, 180, nil),
- metaRange("7", 720, 960, nil),
+ "We have 20, 60, 20, 60, 240 range blocks. We can compact 20 + 60 + 60": {
+ metas: []dirMeta{
+ metaRange("2", 20, 40, nil),
+ metaRange("4", 60, 120, nil),
+ metaRange("5", 960, 980, nil), // Fresh one.
+ metaRange("6", 120, 180, nil),
+ metaRange("7", 720, 960, nil),
+ },
+ expected: []string{"2", "4", "6"},
},
- expected: []string{"2", "4", "6"},
- },
- "Do not select large blocks that have many tombstones when there is no fresh block": {
- metas: []dirMeta{
- metaRange("1", 0, 540, &BlockStats{
- NumSeries: 10,
- NumTombstones: 3,
- }),
+ "Do not select large blocks that have many tombstones when there is no fresh block": {
+ metas: []dirMeta{
+ metaRange("1", 0, 540, &BlockStats{
+ NumSeries: 10,
+ NumTombstones: 3,
+ }),
+ },
+ expected: nil,
},
- expected: nil,
- },
- "Select large blocks that have many tombstones when fresh appears": {
- metas: []dirMeta{
- metaRange("1", 0, 540, &BlockStats{
- NumSeries: 10,
- NumTombstones: 3,
- }),
- metaRange("2", 540, 560, nil),
+ "Select large blocks that have many tombstones when fresh appears": {
+ metas: []dirMeta{
+ metaRange("1", 0, 540, &BlockStats{
+ NumSeries: 10,
+ NumTombstones: 3,
+ }),
+ metaRange("2", 540, 560, nil),
+ },
+ expected: []string{"1"},
},
- expected: []string{"1"},
- },
- "For small blocks, do not compact tombstones, even when fresh appears.": {
- metas: []dirMeta{
- metaRange("1", 0, 60, &BlockStats{
- NumSeries: 10,
- NumTombstones: 3,
- }),
- metaRange("2", 60, 80, nil),
+ "For small blocks, do not compact tombstones, even when fresh appears.": {
+ metas: []dirMeta{
+ metaRange("1", 0, 60, &BlockStats{
+ NumSeries: 10,
+ NumTombstones: 3,
+ }),
+ metaRange("2", 60, 80, nil),
+ },
+ expected: nil,
},
- expected: nil,
- },
- `Regression test: we were stuck in a compact loop where we always recompacted
- the same block when tombstones and series counts were zero`: {
- metas: []dirMeta{
- metaRange("1", 0, 540, &BlockStats{
- NumSeries: 0,
- NumTombstones: 0,
- }),
- metaRange("2", 540, 560, nil),
+ `Regression test: we were stuck in a compact loop where we always recompacted
+ the same block when tombstones and series counts were zero`: {
+ metas: []dirMeta{
+ metaRange("1", 0, 540, &BlockStats{
+ NumSeries: 0,
+ NumTombstones: 0,
+ }),
+ metaRange("2", 540, 560, nil),
+ },
+ expected: nil,
},
- expected: nil,
- },
- `Regression test: we were wrongly assuming that new block is fresh from WAL when its ULID is newest.
- We need to actually look on max time instead.
+ `Regression test: we were wrongly assuming that new block is fresh from WAL when its ULID is newest.
+ We need to actually look on max time instead.
- With previous, wrong approach "8" block was ignored, so we were wrongly compacting 5 and 7 and introducing
- block overlaps`: {
- metas: []dirMeta{
- metaRange("5", 0, 360, nil),
- metaRange("6", 540, 560, nil), // Fresh one.
- metaRange("7", 360, 420, nil),
- metaRange("8", 420, 540, nil),
+ With previous, wrong approach "8" block was ignored, so we were wrongly compacting 5 and 7 and introducing
+ block overlaps`: {
+ metas: []dirMeta{
+ metaRange("5", 0, 360, nil),
+ metaRange("6", 540, 560, nil), // Fresh one.
+ metaRange("7", 360, 420, nil),
+ metaRange("8", 420, 540, nil),
+ },
+ expected: []string{"7", "8"},
},
- expected: []string{"7", "8"},
- },
- // |--------------|
- // |----------------|
- // |--------------|
- "Overlapping blocks 1": {
- metas: []dirMeta{
- metaRange("1", 0, 20, nil),
- metaRange("2", 19, 40, nil),
- metaRange("3", 40, 60, nil),
+ // |--------------|
+ // |----------------|
+ // |--------------|
+ "Overlapping blocks 1": {
+ metas: []dirMeta{
+ metaRange("1", 0, 20, nil),
+ metaRange("2", 19, 40, nil),
+ metaRange("3", 40, 60, nil),
+ },
+ expected: []string{"1", "2"},
},
- expected: []string{"1", "2"},
- },
- // |--------------|
- // |--------------|
- // |--------------|
- "Overlapping blocks 2": {
- metas: []dirMeta{
- metaRange("1", 0, 20, nil),
- metaRange("2", 20, 40, nil),
- metaRange("3", 30, 50, nil),
+ // |--------------|
+ // |--------------|
+ // |--------------|
+ "Overlapping blocks 2": {
+ metas: []dirMeta{
+ metaRange("1", 0, 20, nil),
+ metaRange("2", 20, 40, nil),
+ metaRange("3", 30, 50, nil),
+ },
+ expected: []string{"2", "3"},
},
- expected: []string{"2", "3"},
- },
- // |--------------|
- // |---------------------|
- // |--------------|
- "Overlapping blocks 3": {
- metas: []dirMeta{
- metaRange("1", 0, 20, nil),
- metaRange("2", 10, 40, nil),
- metaRange("3", 30, 50, nil),
+ // |--------------|
+ // |---------------------|
+ // |--------------|
+ "Overlapping blocks 3": {
+ metas: []dirMeta{
+ metaRange("1", 0, 20, nil),
+ metaRange("2", 10, 40, nil),
+ metaRange("3", 30, 50, nil),
+ },
+ expected: []string{"1", "2", "3"},
},
- expected: []string{"1", "2", "3"},
- },
- // |--------------|
- // |--------------------------------|
- // |--------------|
- // |--------------|
- "Overlapping blocks 4": {
- metas: []dirMeta{
- metaRange("5", 0, 360, nil),
- metaRange("6", 340, 560, nil),
- metaRange("7", 360, 420, nil),
- metaRange("8", 420, 540, nil),
+ // |--------------|
+ // |--------------------------------|
+ // |--------------|
+ // |--------------|
+ "Overlapping blocks 4": {
+ metas: []dirMeta{
+ metaRange("5", 0, 360, nil),
+ metaRange("6", 340, 560, nil),
+ metaRange("7", 360, 420, nil),
+ metaRange("8", 420, 540, nil),
+ },
+ expected: []string{"5", "6", "7", "8"},
},
- expected: []string{"5", "6", "7", "8"},
- },
- // |--------------|
- // |--------------|
- // |--------------|
- // |--------------|
- "Overlapping blocks 5": {
- metas: []dirMeta{
- metaRange("1", 0, 10, nil),
- metaRange("2", 9, 20, nil),
- metaRange("3", 30, 40, nil),
- metaRange("4", 39, 50, nil),
+ // |--------------|
+ // |--------------|
+ // |--------------|
+ // |--------------|
+ "Overlapping blocks 5": {
+ metas: []dirMeta{
+ metaRange("1", 0, 10, nil),
+ metaRange("2", 9, 20, nil),
+ metaRange("3", 30, 40, nil),
+ metaRange("4", 39, 50, nil),
+ },
+ expected: []string{"1", "2"},
},
- expected: []string{"1", "2"},
- },
- }
-
- for title, c := range cases {
- if !t.Run(title, func(t *testing.T) {
- res, err := compactor.plan(c.metas)
- require.NoError(t, err)
- require.Equal(t, c.expected, res)
- }) {
- return
}
- }
+
+ for title, c := range cases {
+ if !t.Run(title, func(t *testing.T) {
+ res, err := compactor.plan(c.metas)
+ require.NoError(t, err)
+ require.Equal(t, c.expected, res)
+ }) {
+ return
+ }
+ }
+ })
+
+ // Tests for the public Plan() method.
+ t.Run("Plan", func(t *testing.T) {
+ // Verify that when a BlockExcludeFilter excludes a block in the middle of
+ // the list, subsequent blocks are not processed.
+ t.Run("BlockExcludeFilter stops iteration", func(t *testing.T) {
+ dir := t.TempDir()
+
+ // Create 4 blocks with sequential ULIDs.
+ block1ULID := ulid.MustNew(1, nil)
+ block2ULID := ulid.MustNew(2, nil)
+ block3ULID := ulid.MustNew(3, nil)
+ block4ULID := ulid.MustNew(4, nil)
+
+ for i, uid := range []ulid.ULID{block1ULID, block2ULID, block3ULID, block4ULID} {
+ blockDir := filepath.Join(dir, uid.String())
+ require.NoError(t, os.MkdirAll(blockDir, 0o777))
+
+ meta := &BlockMeta{
+ ULID: uid,
+ MinTime: int64(i * 10),
+ MaxTime: int64((i + 1) * 10),
+ }
+ meta.Compaction.Level = 1
+ _, err := writeMetaFile(promslog.NewNopLogger(), blockDir, meta)
+ require.NoError(t, err)
+ }
+
+ // Track which blocks were evaluated by the exclude function.
+ var evaluatedBlocks []ulid.ULID
+ excludeFunc := func(meta *BlockMeta) bool {
+ evaluatedBlocks = append(evaluatedBlocks, meta.ULID)
+ return meta.ULID == block2ULID
+ }
+
+ c, err := NewLeveledCompactorWithOptions(
+ context.Background(),
+ nil,
+ promslog.NewNopLogger(),
+ []int64{20},
+ chunkenc.NewPool(),
+ LeveledCompactorOptions{
+ BlockExcludeFilter: excludeFunc,
+ EnableOverlappingCompaction: true,
+ },
+ )
+ require.NoError(t, err)
+
+ // Plan should evaluate all blocks.
+ _, err = c.Plan(dir)
+ require.NoError(t, err)
+
+ require.Len(t, evaluatedBlocks, 2, "Expected only 2 blocks to be evaluated")
+ require.Contains(t, evaluatedBlocks, block1ULID)
+ require.Contains(t, evaluatedBlocks, block2ULID)
+ })
+ })
}
func TestRangeWithFailedCompactionWontGetSelected(t *testing.T) {
@@ -1361,7 +1421,6 @@ func TestCancelCompactions(t *testing.T) {
// Make sure that no blocks were marked as compaction failed.
// This checks that the `context.Canceled` error is properly checked at all levels:
- // - tsdb_errors.NewMulti() should have the Is() method implemented for correct checks.
// - callers should check with errors.Is() instead of ==.
readOnlyDB, err := OpenDBReadOnly(tmpdirCopy, "", promslog.NewNopLogger())
require.NoError(t, err)
@@ -1452,9 +1511,6 @@ func TestHeadCompactionWithHistograms(t *testing.T) {
t.Run(fmt.Sprintf("float=%t", floatTest), func(t *testing.T) {
head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
require.NoError(t, head.Init(0))
- t.Cleanup(func() {
- require.NoError(t, head.Close())
- })
minute := func(m int) int64 { return int64(m) * time.Minute.Milliseconds() }
ctx := context.Background()
@@ -1631,13 +1687,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) {
),
func(t *testing.T) {
oldHead, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
- t.Cleanup(func() {
- require.NoError(t, oldHead.Close())
- })
sparseHead, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
- t.Cleanup(func() {
- require.NoError(t, sparseHead.Close())
- })
var allSparseSeries []struct {
baseLabels labels.Labels
diff --git a/tsdb/db.go b/tsdb/db.go
index 3f8bf16209..a4a4a77f3c 100644
--- a/tsdb/db.go
+++ b/tsdb/db.go
@@ -41,7 +41,6 @@ import (
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
_ "github.com/prometheus/prometheus/tsdb/goversion" // Load the package into main to make sure minimum Go version is met.
"github.com/prometheus/prometheus/tsdb/tsdbutil"
@@ -100,6 +99,10 @@ func DefaultOptions() *Options {
// Options of the DB storage.
type Options struct {
+ // staleSeriesCompactionThreshold is same as below option with same name, but is atomic so that we can do live updates without locks.
+ // This is the one that must be used by the code.
+ staleSeriesCompactionThreshold atomic.Float64
+
// Segments (wal files) max size.
// WALSegmentSize = 0, segment size is default size.
// WALSegmentSize > 0, segment size is WALSegmentSize.
@@ -231,6 +234,11 @@ type Options struct {
// is implemented.
EnableSTAsZeroSample bool
+ // EnableSTStorage determines whether TSDB should write a Start Timestamp (ST)
+ // per sample to WAL.
+ // TODO(bwplotka): Implement this option as per PROM-60, currently it's noop.
+ EnableSTStorage bool
+
// EnableMetadataWALRecords represents 'metadata-wal-records' feature flag.
// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
// is implemented.
@@ -245,6 +253,10 @@ type Options struct {
// FeatureRegistry is used to register TSDB features.
FeatureRegistry features.Collector
+
+ // StaleSeriesCompactionThreshold is a number between 0.0-1.0 indicating the % of stale series in
+ // the in-memory Head block. If the % of stale series crosses this threshold, stale series compaction is run immediately.
+ StaleSeriesCompactionThreshold float64
}
type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error)
@@ -305,6 +317,10 @@ type DB struct {
// out-of-order compaction and vertical queries.
oooWasEnabled atomic.Bool
+ // lastHeadCompactionTime is the last wall clock time when the head block compaction was started,
+ // irrespective of success or failure. This does not include out-of-order compaction and stale series compaction.
+ lastHeadCompactionTime time.Time
+
writeNotified wlog.WriteNotified
registerer prometheus.Registerer
@@ -315,20 +331,23 @@ type DB struct {
}
type dbMetrics struct {
- loadedBlocks prometheus.GaugeFunc
- symbolTableSize prometheus.GaugeFunc
- reloads prometheus.Counter
- reloadsFailed prometheus.Counter
- compactionsFailed prometheus.Counter
- compactionsTriggered prometheus.Counter
- compactionsSkipped prometheus.Counter
- sizeRetentionCount prometheus.Counter
- timeRetentionCount prometheus.Counter
- startTime prometheus.GaugeFunc
- tombCleanTimer prometheus.Histogram
- blocksBytes prometheus.Gauge
- maxBytes prometheus.Gauge
- retentionDuration prometheus.Gauge
+ loadedBlocks prometheus.GaugeFunc
+ symbolTableSize prometheus.GaugeFunc
+ reloads prometheus.Counter
+ reloadsFailed prometheus.Counter
+ compactionsFailed prometheus.Counter
+ compactionsTriggered prometheus.Counter
+ compactionsSkipped prometheus.Counter
+ sizeRetentionCount prometheus.Counter
+ timeRetentionCount prometheus.Counter
+ startTime prometheus.GaugeFunc
+ tombCleanTimer prometheus.Histogram
+ blocksBytes prometheus.Gauge
+ maxBytes prometheus.Gauge
+ retentionDuration prometheus.Gauge
+ staleSeriesCompactionsTriggered prometheus.Counter
+ staleSeriesCompactionsFailed prometheus.Counter
+ staleSeriesCompactionDuration prometheus.Histogram
}
func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
@@ -413,6 +432,22 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
Name: "prometheus_tsdb_size_retentions_total",
Help: "The number of times that blocks were deleted because the maximum number of bytes was exceeded.",
})
+ m.staleSeriesCompactionsTriggered = prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "prometheus_tsdb_stale_series_compactions_triggered_total",
+ Help: "Total number of triggered stale series compactions.",
+ })
+ m.staleSeriesCompactionsFailed = prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "prometheus_tsdb_stale_series_compactions_failed_total",
+ Help: "Total number of stale series compactions that failed.",
+ })
+ m.staleSeriesCompactionDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Name: "prometheus_tsdb_stale_series_compaction_duration_seconds",
+ Help: "Duration of stale series compaction runs.",
+ Buckets: prometheus.ExponentialBuckets(1, 2, 14),
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ })
if r != nil {
r.MustRegister(
@@ -430,6 +465,9 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
m.blocksBytes,
m.maxBytes,
m.retentionDuration,
+ m.staleSeriesCompactionsTriggered,
+ m.staleSeriesCompactionsFailed,
+ m.staleSeriesCompactionDuration,
)
}
return m
@@ -521,11 +559,9 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) {
return err
}
defer func() {
- errs := tsdb_errors.NewMulti(returnErr)
if err := head.Close(); err != nil {
- errs.Add(fmt.Errorf("closing Head: %w", err))
+ returnErr = errors.Join(returnErr, fmt.Errorf("closing Head: %w", err))
}
- returnErr = errs.Err()
}()
// Set the min valid time for the ingested wal samples
// to be no lower than the maxt of the last block.
@@ -680,13 +716,13 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
db.logger.Warn("Closing block failed", "err", err, "block", b)
}
}
- errs := tsdb_errors.NewMulti()
+ var errs []error
for ulid, err := range corrupted {
if err != nil {
- errs.Add(fmt.Errorf("corrupted block %s: %w", ulid.String(), err))
+ errs = append(errs, fmt.Errorf("corrupted block %s: %w", ulid.String(), err))
}
}
- return nil, errs.Err()
+ return nil, errors.Join(errs...)
}
if len(loadable) == 0 {
@@ -797,7 +833,7 @@ func (db *DBReadOnly) Close() error {
}
close(db.closed)
- return tsdb_errors.CloseAll(db.closers)
+ return closeAll(db.closers)
}
// Open returns a new DB in the given directory. If options are empty, DefaultOptions will be used.
@@ -857,6 +893,8 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) {
// configured maximum block duration.
rngs = ExponentialBlockRanges(opts.MinBlockDuration, 10, 3)
}
+
+ opts.staleSeriesCompactionThreshold.Store(opts.StaleSeriesCompactionThreshold)
return opts, rngs
}
@@ -891,9 +929,13 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
for _, tmpDir := range []string{walDir, dir} {
// Remove tmp dirs.
- if err := removeBestEffortTmpDirs(l, tmpDir); err != nil {
+ if err := tsdbutil.RemoveTmpDirs(l, tmpDir, isTmpDir); err != nil {
return nil, fmt.Errorf("remove tmp dirs: %w", err)
}
+ // Remove any temporary checkpoints that might have been interrupted during creation.
+ if err := wlog.DeleteTempCheckpoints(l, tmpDir); err != nil {
+ return nil, fmt.Errorf("delete temp checkpoints: %w", err)
+ }
}
db := &DB{
@@ -915,11 +957,9 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
}
close(db.donec) // DB is never run if it was an error, so close this channel here.
- errs := tsdb_errors.NewMulti(returnedErr)
if err := db.Close(); err != nil {
- errs.Add(fmt.Errorf("close DB after failed startup: %w", err))
+ returnedErr = errors.Join(returnedErr, fmt.Errorf("close DB after failed startup: %w", err))
}
- returnedErr = errs.Err()
}()
if db.blocksToDelete == nil {
@@ -1079,26 +1119,6 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
return db, nil
}
-func removeBestEffortTmpDirs(l *slog.Logger, dir string) error {
- files, err := os.ReadDir(dir)
- if os.IsNotExist(err) {
- return nil
- }
- if err != nil {
- return err
- }
- for _, f := range files {
- if isTmpDir(f) {
- if err := os.RemoveAll(filepath.Join(dir, f.Name())); err != nil {
- l.Error("failed to delete tmp block dir", "dir", filepath.Join(dir, f.Name()), "err", err)
- continue
- }
- l.Info("Found and deleted tmp block dir", "dir", filepath.Join(dir, f.Name()))
- }
- }
- return nil
-}
-
// StartTime implements the Storage interface.
func (db *DB) StartTime() (int64, error) {
db.mtx.RLock()
@@ -1151,6 +1171,29 @@ func (db *DB) run(ctx context.Context) {
}
// We attempt mmapping of head chunks regularly.
db.head.mmapHeadChunks()
+
+ numStaleSeries, numSeries := db.Head().NumStaleSeries(), db.Head().NumSeries()
+ if db.autoCompact && numSeries > 0 && db.opts.staleSeriesCompactionThreshold.Load() > 0 {
+ staleSeriesRatio := float64(numStaleSeries) / float64(numSeries)
+ if staleSeriesRatio >= db.opts.staleSeriesCompactionThreshold.Load() {
+ nextCompactionIsSoon := false
+ if !db.lastHeadCompactionTime.IsZero() {
+ compactionInterval := time.Duration(db.head.chunkRange.Load()) * time.Millisecond
+ nextEstimatedCompactionTime := db.lastHeadCompactionTime.Add(compactionInterval)
+ if time.Now().Add(10 * time.Minute).After(nextEstimatedCompactionTime) {
+ // Next compaction is starting within next 10 mins.
+ nextCompactionIsSoon = true
+ }
+ }
+
+ if !nextCompactionIsSoon {
+ if err := db.CompactStaleHead(); err != nil {
+ db.logger.Error("immediate stale series compaction failed", "err", err)
+ }
+ }
+ }
+ }
+
case <-db.compactc:
db.metrics.compactionsTriggered.Inc()
@@ -1203,7 +1246,7 @@ func (db *DB) ApplyConfig(conf *config.Config) error {
oooTimeWindow := int64(0)
if conf.StorageConfig.TSDBConfig != nil {
oooTimeWindow = conf.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
-
+ db.opts.staleSeriesCompactionThreshold.Store(conf.StorageConfig.TSDBConfig.StaleSeriesCompactionThreshold)
// Update retention configuration if provided.
if conf.StorageConfig.TSDBConfig.Retention != nil {
db.retentionMtx.Lock()
@@ -1217,6 +1260,8 @@ func (db *DB) ApplyConfig(conf *config.Config) error {
}
db.retentionMtx.Unlock()
}
+ } else {
+ db.opts.staleSeriesCompactionThreshold.Store(0)
}
if oooTimeWindow < 0 {
oooTimeWindow = 0
@@ -1348,11 +1393,9 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) {
lastBlockMaxt := int64(math.MinInt64)
defer func() {
- errs := tsdb_errors.NewMulti(returnErr)
if err := db.head.truncateWAL(lastBlockMaxt); err != nil {
- errs.Add(fmt.Errorf("WAL truncation in Compact defer: %w", err))
+ returnErr = errors.Join(returnErr, fmt.Errorf("WAL truncation in Compact defer: %w", err))
}
- returnErr = errs.Err()
}()
start := time.Now()
@@ -1477,13 +1520,13 @@ func (db *DB) compactOOOHead(ctx context.Context) error {
return fmt.Errorf("compact ooo head: %w", err)
}
if err := db.reloadBlocks(); err != nil {
- errs := tsdb_errors.NewMulti(err)
+ errs := []error{err}
for _, uid := range ulids {
if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil {
- errs.Add(errRemoveAll)
+ errs = append(errs, errRemoveAll)
}
}
- return fmt.Errorf("reloadBlocks blocks after failed compact ooo head: %w", errs.Err())
+ return fmt.Errorf("reloadBlocks blocks after failed compact ooo head: %w", errors.Join(errs...))
}
lastWBLFile, minOOOMmapRef := oooHead.LastWBLFile(), oooHead.LastMmapRef()
@@ -1560,19 +1603,23 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID
// compactHead compacts the given RangeHead.
// The db.cmtx should be held before calling this method.
func (db *DB) compactHead(head *RangeHead) error {
+ db.lastHeadCompactionTime = time.Now()
+
uids, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil)
if err != nil {
return fmt.Errorf("persist head block: %w", err)
}
if err := db.reloadBlocks(); err != nil {
- multiErr := tsdb_errors.NewMulti(fmt.Errorf("reloadBlocks blocks: %w", err))
+ errs := []error{
+ fmt.Errorf("reloadBlocks blocks: %w", err),
+ }
for _, uid := range uids {
if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil {
- multiErr.Add(fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll))
+ errs = append(errs, fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll))
}
}
- return multiErr.Err()
+ return errors.Join(errs...)
}
if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil {
return fmt.Errorf("head memory truncate: %w", err)
@@ -1583,6 +1630,61 @@ func (db *DB) compactHead(head *RangeHead) error {
return nil
}
+func (db *DB) CompactStaleHead() (err error) {
+ db.cmtx.Lock()
+ defer func() {
+ db.cmtx.Unlock()
+ if err != nil {
+ db.metrics.staleSeriesCompactionsFailed.Inc()
+ }
+ }()
+
+ db.metrics.staleSeriesCompactionsTriggered.Inc()
+
+ db.logger.Info("Starting stale series compaction")
+ start := time.Now()
+
+ // We get the stale series reference first because this list can change during the compaction below.
+ // It is more efficient and easier to provide an index interface for the stale series when we have a static list.
+ staleSeriesRefs, err := db.head.SortedStaleSeriesRefsNoOOOData(context.Background())
+ if err != nil {
+ return err
+ }
+ meta := &BlockMeta{}
+ meta.Compaction.SetStaleSeries()
+ mint, maxt := db.head.opts.ChunkRange*(db.head.MinTime()/db.head.opts.ChunkRange), db.head.MaxTime()
+ for ; mint < maxt; mint += db.head.chunkRange.Load() {
+ staleHead := NewStaleHead(db.Head(), mint, mint+db.head.chunkRange.Load()-1, staleSeriesRefs)
+
+ uids, err := db.compactor.Write(db.dir, staleHead, staleHead.MinTime(), staleHead.BlockMaxTime(), meta)
+ if err != nil {
+ return fmt.Errorf("persist stale head: %w", err)
+ }
+
+ db.logger.Info("Stale series block created", "ulids", fmt.Sprintf("%v", uids), "min_time", mint, "max_time", maxt)
+
+ if err := db.reloadBlocks(); err != nil {
+ errs := []error{fmt.Errorf("reloadBlocks blocks: %w", err)}
+ for _, uid := range uids {
+ if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil {
+ errs = append(errs, fmt.Errorf("delete persisted stale head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll))
+ }
+ }
+ return errors.Join(errs...)
+ }
+ }
+
+ if err := db.head.truncateStaleSeries(staleSeriesRefs, maxt); err != nil {
+ return fmt.Errorf("head truncate: %w", err)
+ }
+ db.head.RebuildSymbolTable(db.logger)
+
+ elapsed := time.Since(start)
+ db.metrics.staleSeriesCompactionDuration.Observe(elapsed.Seconds())
+ db.logger.Info("Ending stale series compaction", "num_series", len(staleSeriesRefs), "duration", elapsed)
+ return nil
+}
+
// compactBlocks compacts all the eligible on-disk blocks.
// The db.cmtx should be held before calling this method.
func (db *DB) compactBlocks() (err error) {
@@ -1616,13 +1718,13 @@ func (db *DB) compactBlocks() (err error) {
}
if err := db.reloadBlocks(); err != nil {
- errs := tsdb_errors.NewMulti(fmt.Errorf("reloadBlocks blocks: %w", err))
+ errs := []error{fmt.Errorf("reloadBlocks blocks: %w", err)}
for _, uid := range uids {
if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil {
- errs.Add(fmt.Errorf("delete persisted block after failed db reloadBlocks:%s: %w", uid, errRemoveAll))
+ errs = append(errs, fmt.Errorf("delete persisted block after failed db reloadBlocks:%s: %w", uid, errRemoveAll))
}
}
- return errs.Err()
+ return errors.Join(errs...)
}
}
@@ -1702,13 +1804,13 @@ func (db *DB) reloadBlocks() (err error) {
}
}
db.mtx.RUnlock()
- errs := tsdb_errors.NewMulti()
+ var errs []error
for ulid, err := range corrupted {
if err != nil {
- errs.Add(fmt.Errorf("corrupted block %s: %w", ulid.String(), err))
+ errs = append(errs, fmt.Errorf("corrupted block %s: %w", ulid.String(), err))
}
}
- return errs.Err()
+ return errors.Join(errs...)
}
var (
@@ -2042,7 +2144,7 @@ func (db *DB) inOrderBlocksMaxTime() (maxt int64, ok bool) {
maxt, ok = int64(math.MinInt64), false
// If blocks are overlapping, last block might not have the max time. So check all blocks.
for _, b := range db.Blocks() {
- if !b.meta.Compaction.FromOutOfOrder() && b.meta.MaxTime > maxt {
+ if !b.meta.Compaction.FromOutOfOrder() && !b.meta.Compaction.FromStaleSeries() && b.meta.MaxTime > maxt {
ok = true
maxt = b.meta.MaxTime
}
@@ -2080,11 +2182,14 @@ func (db *DB) Close() error {
g.Go(pb.Close)
}
- errs := tsdb_errors.NewMulti(g.Wait(), db.locker.Release())
- if db.head != nil {
- errs.Add(db.head.Close())
+ errs := []error{
+ g.Wait(),
+ db.locker.Release(),
}
- return errs.Err()
+ if db.head != nil {
+ errs = append(errs, db.head.Close())
+ }
+ return errors.Join(errs...)
}
// DisableCompactions disables auto compactions.
@@ -2417,8 +2522,7 @@ func isBlockDir(fi fs.DirEntry) bool {
return err == nil
}
-// isTmpDir returns true if the given file-info contains a block ULID, a checkpoint prefix,
-// or a chunk snapshot prefix and a tmp extension.
+// isTmpDir returns true if the given file-info contains a block ULID, or a chunk snapshot prefix and a tmp extension.
func isTmpDir(fi fs.DirEntry) bool {
if !fi.IsDir() {
return false
@@ -2427,9 +2531,6 @@ func isTmpDir(fi fs.DirEntry) bool {
fn := fi.Name()
ext := filepath.Ext(fn)
if ext == tmpForDeletionBlockDirSuffix || ext == tmpForCreationBlockDirSuffix || ext == tmpLegacy {
- if strings.HasPrefix(fn, wlog.CheckpointPrefix) {
- return true
- }
if strings.HasPrefix(fn, chunkSnapshotPrefix) {
return true
}
@@ -2465,3 +2566,12 @@ func exponential(d, minD, maxD time.Duration) time.Duration {
}
return d
}
+
+// closeAll closes all given closers while recording all errors.
+func closeAll(cs []io.Closer) error {
+ var errs []error
+ for _, c := range cs {
+ errs = append(errs, c.Close())
+ }
+ return errors.Join(errs...)
+}
diff --git a/tsdb/db_append_v2_test.go b/tsdb/db_append_v2_test.go
index 344b1d6943..8083829537 100644
--- a/tsdb/db_append_v2_test.go
+++ b/tsdb/db_append_v2_test.go
@@ -372,7 +372,7 @@ func TestDeleteSimple_AppendV2(t *testing.T) {
expSamples := make([]chunks.Sample, 0, len(c.remaint))
for _, ts := range c.remaint {
- expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
+ expSamples = append(expSamples, sample{0, ts, smpls[ts], nil, nil})
}
expss := newMockSeriesSet([]storage.Series{
@@ -507,7 +507,7 @@ func TestSkippingInvalidValuesInSameTxn_AppendV2(t *testing.T) {
ssMap := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.Equal(t, map[string][]chunks.Sample{
- labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}},
+ labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 0, 1, nil, nil}},
}, ssMap)
// Append Out of Order Value.
@@ -524,7 +524,7 @@ func TestSkippingInvalidValuesInSameTxn_AppendV2(t *testing.T) {
ssMap = query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.Equal(t, map[string][]chunks.Sample{
- labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}, sample{10, 3, nil, nil}},
+ labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 0, 1, nil, nil}, sample{0, 10, 3, nil, nil}},
}, ssMap)
}
@@ -669,7 +669,7 @@ func TestDB_SnapshotWithDelete_AppendV2(t *testing.T) {
expSamples := make([]chunks.Sample, 0, len(c.remaint))
for _, ts := range c.remaint {
- expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
+ expSamples = append(expSamples, sample{0, ts, smpls[ts], nil, nil})
}
expss := newMockSeriesSet([]storage.Series{
@@ -772,7 +772,7 @@ func TestDB_e2e_AppendV2(t *testing.T) {
for range numDatapoints {
v := rand.Float64()
- series = append(series, sample{ts, v, nil, nil})
+ series = append(series, sample{0, ts, v, nil, nil})
_, err := app.Append(0, lset, 0, ts, v, nil, nil, storage.AOptions{})
require.NoError(t, err)
@@ -1094,7 +1094,7 @@ func TestTombstoneClean_AppendV2(t *testing.T) {
expSamples := make([]chunks.Sample, 0, len(c.remaint))
for _, ts := range c.remaint {
- expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
+ expSamples = append(expSamples, sample{0, ts, smpls[ts], nil, nil})
}
expss := newMockSeriesSet([]storage.Series{
@@ -1835,6 +1835,7 @@ func TestBlockRanges_AppendV2(t *testing.T) {
createBlock(t, dir, genSeries(1, 1, 0, firstBlockMaxT))
db, err := open(dir, logger, nil, DefaultOptions(), []int64{10000}, nil)
require.NoError(t, err)
+ db.DisableCompactions()
rangeToTriggerCompaction := db.compactor.(*LeveledCompactor).ranges[0]/2*3 + 1
@@ -1851,21 +1852,16 @@ func TestBlockRanges_AppendV2(t *testing.T) {
require.NoError(t, err)
require.NoError(t, app.Commit())
- for range 100 {
- if len(db.Blocks()) == 2 {
- break
- }
- time.Sleep(100 * time.Millisecond)
- }
- require.Len(t, db.Blocks(), 2, "no new block created after the set timeout")
+ require.NoError(t, db.Compact(ctx))
+ blocks := db.Blocks()
+ require.Len(t, blocks, 2, "no new block after compaction")
- require.LessOrEqual(t, db.Blocks()[1].Meta().MinTime, db.Blocks()[0].Meta().MaxTime,
- "new block overlaps old:%v,new:%v", db.Blocks()[0].Meta(), db.Blocks()[1].Meta())
+ require.GreaterOrEqual(t, blocks[1].Meta().MinTime, blocks[0].Meta().MaxTime,
+ "new block overlaps old:%v,new:%v", blocks[0].Meta(), blocks[1].Meta())
// Test that wal records are skipped when an existing block covers the same time ranges
// and compaction doesn't create an overlapping block.
app = db.AppenderV2(ctx)
- db.DisableCompactions()
_, err = app.Append(0, lbl, 0, secondBlockMaxt+1, rand.Float64(), nil, nil, storage.AOptions{})
require.NoError(t, err)
_, err = app.Append(0, lbl, 0, secondBlockMaxt+2, rand.Float64(), nil, nil, storage.AOptions{})
@@ -1882,6 +1878,7 @@ func TestBlockRanges_AppendV2(t *testing.T) {
db, err = open(dir, logger, nil, DefaultOptions(), []int64{10000}, nil)
require.NoError(t, err)
+ db.DisableCompactions()
defer db.Close()
require.Len(t, db.Blocks(), 3, "db doesn't include expected number of blocks")
@@ -1891,17 +1888,12 @@ func TestBlockRanges_AppendV2(t *testing.T) {
_, err = app.Append(0, lbl, 0, thirdBlockMaxt+rangeToTriggerCompaction, rand.Float64(), nil, nil, storage.AOptions{}) // Trigger a compaction
require.NoError(t, err)
require.NoError(t, app.Commit())
- for range 100 {
- if len(db.Blocks()) == 4 {
- break
- }
- time.Sleep(100 * time.Millisecond)
- }
+ require.NoError(t, db.Compact(ctx))
+ blocks = db.Blocks()
+ require.Len(t, blocks, 4, "no new block after compaction")
- require.Len(t, db.Blocks(), 4, "no new block created after the set timeout")
-
- require.LessOrEqual(t, db.Blocks()[3].Meta().MinTime, db.Blocks()[2].Meta().MaxTime,
- "new block overlaps old:%v,new:%v", db.Blocks()[2].Meta(), db.Blocks()[3].Meta())
+ require.GreaterOrEqual(t, blocks[3].Meta().MinTime, blocks[2].Meta().MaxTime,
+ "new block overlaps old:%v,new:%v", blocks[2].Meta(), blocks[3].Meta())
}
// TestDBReadOnly ensures that opening a DB in readonly mode doesn't modify any files on the disk.
@@ -2310,7 +2302,7 @@ func TestCompactHead_AppendV2(t *testing.T) {
val := rand.Float64()
_, err := app.Append(0, labels.FromStrings("a", "b"), 0, int64(i), val, nil, nil, storage.AOptions{})
require.NoError(t, err)
- expSamples = append(expSamples, sample{int64(i), val, nil, nil})
+ expSamples = append(expSamples, sample{0, int64(i), val, nil, nil})
}
require.NoError(t, app.Commit())
@@ -2337,7 +2329,7 @@ func TestCompactHead_AppendV2(t *testing.T) {
series = seriesSet.At().Iterator(series)
for series.Next() == chunkenc.ValFloat {
time, val := series.At()
- actSamples = append(actSamples, sample{time, val, nil, nil})
+ actSamples = append(actSamples, sample{0, time, val, nil, nil})
}
require.NoError(t, series.Err())
}
@@ -7049,97 +7041,6 @@ func testPanicOnApplyConfigAppendV2(t *testing.T, scenario sampleTypeScenario) {
require.NoError(t, err)
}
-func TestDiskFillingUpAfterDisablingOOO_AppendV2(t *testing.T) {
- t.Parallel()
- for name, scenario := range sampleTypeScenarios {
- t.Run(name, func(t *testing.T) {
- testDiskFillingUpAfterDisablingOOOAppenderV2(t, scenario)
- })
- }
-}
-
-func testDiskFillingUpAfterDisablingOOOAppenderV2(t *testing.T, scenario sampleTypeScenario) {
- t.Parallel()
- ctx := context.Background()
-
- opts := DefaultOptions()
- opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
-
- db := newTestDB(t, withOpts(opts))
- db.DisableCompactions()
-
- series1 := labels.FromStrings("foo", "bar1")
- var allSamples []chunks.Sample
- addSamples := func(fromMins, toMins int64) {
- app := db.AppenderV2(context.Background())
- for m := fromMins; m <= toMins; m++ {
- ts := m * time.Minute.Milliseconds()
- _, s, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series1, ts, ts)
- require.NoError(t, err)
- allSamples = append(allSamples, s)
- }
- require.NoError(t, app.Commit())
- }
-
- // In-order samples.
- addSamples(290, 300)
- // OOO samples.
- addSamples(250, 299)
-
- // Restart DB with OOO disabled.
- require.NoError(t, db.Close())
-
- opts.OutOfOrderTimeWindow = 0
- db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
- db.DisableCompactions()
-
- ms := db.head.series.getByHash(series1.Hash(), series1)
- require.NotEmpty(t, ms.ooo.oooMmappedChunks, "OOO mmap chunk was not replayed")
-
- checkMmapFileContents := func(contains, notContains []string) {
- mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
- files, err := os.ReadDir(mmapDir)
- require.NoError(t, err)
-
- fnames := make([]string, 0, len(files))
- for _, f := range files {
- fnames = append(fnames, f.Name())
- }
-
- for _, f := range contains {
- require.Contains(t, fnames, f)
- }
- for _, f := range notContains {
- require.NotContains(t, fnames, f)
- }
- }
-
- // Add in-order samples until ready for compaction..
- addSamples(301, 500)
-
- // Check that m-map files gets deleted properly after compactions.
-
- db.head.mmapHeadChunks()
- checkMmapFileContents([]string{"000001", "000002"}, nil)
- require.NoError(t, db.Compact(ctx))
- checkMmapFileContents([]string{"000002"}, []string{"000001"})
- require.Nil(t, ms.ooo, "OOO mmap chunk was not compacted")
-
- addSamples(501, 650)
- db.head.mmapHeadChunks()
- checkMmapFileContents([]string{"000002", "000003"}, []string{"000001"})
- require.NoError(t, db.Compact(ctx))
- checkMmapFileContents(nil, []string{"000001", "000002", "000003"})
-
- // Verify that WBL is empty.
- files, err := os.ReadDir(db.head.wbl.Dir())
- require.NoError(t, err)
- require.Len(t, files, 1) // Last empty file after compaction.
- finfo, err := files[0].Info()
- require.NoError(t, err)
- require.Equal(t, int64(0), finfo.Size())
-}
-
func TestHistogramAppendAndQuery_AppendV2(t *testing.T) {
t.Run("integer histograms", func(t *testing.T) {
testHistogramAppendAndQueryHelperAppendV2(t, false)
diff --git a/tsdb/db_test.go b/tsdb/db_test.go
index 299ade8826..3f2861d633 100644
--- a/tsdb/db_test.go
+++ b/tsdb/db_test.go
@@ -52,6 +52,7 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote"
@@ -125,6 +126,7 @@ func newTestDB(t testing.TB, opts ...testDBOpt) (db *DB) {
db, err = open(o.dir, nil, nil, o.opts, o.rngs, nil)
}
require.NoError(t, err)
+
t.Cleanup(func() {
// Always close. DB is safe for close-after-close.
require.NoError(t, db.Close())
@@ -145,6 +147,16 @@ func TestDBClose_AfterClose(t *testing.T) {
// query runs a matcher query against the querier and fully expands its data.
func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[string][]chunks.Sample {
+ return queryHelper(t, q, true, matchers...)
+}
+
+// queryWithoutReplacingNaNs runs a matcher query against the querier and fully expands its data.
+func queryWithoutReplacingNaNs(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[string][]chunks.Sample {
+ return queryHelper(t, q, false, matchers...)
+}
+
+// queryHelper runs a matcher query against the querier and fully expands its data.
+func queryHelper(t testing.TB, q storage.Querier, withNaNReplacement bool, matchers ...*labels.Matcher) map[string][]chunks.Sample {
ss := q.Select(context.Background(), false, nil, matchers...)
defer func() {
require.NoError(t, q.Close())
@@ -156,7 +168,13 @@ func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[str
series := ss.At()
it = series.Iterator(it)
- samples, err := storage.ExpandSamples(it, newSample)
+ var samples []chunks.Sample
+ var err error
+ if withNaNReplacement {
+ samples, err = storage.ExpandSamples(it, newSample)
+ } else {
+ samples, err = storage.ExpandSamplesWithoutReplacingNaNs(it, newSample)
+ }
require.NoError(t, err)
require.NoError(t, it.Err())
@@ -546,7 +564,7 @@ func TestDeleteSimple(t *testing.T) {
expSamples := make([]chunks.Sample, 0, len(c.remaint))
for _, ts := range c.remaint {
- expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
+ expSamples = append(expSamples, sample{0, ts, smpls[ts], nil, nil})
}
expss := newMockSeriesSet([]storage.Series{
@@ -691,7 +709,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
ssMap := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.Equal(t, map[string][]chunks.Sample{
- labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}},
+ labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 0, 1, nil, nil}},
}, ssMap)
// Append Out of Order Value.
@@ -708,7 +726,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
ssMap = query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.Equal(t, map[string][]chunks.Sample{
- labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}, sample{10, 3, nil, nil}},
+ labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 0, 1, nil, nil}, sample{0, 10, 3, nil, nil}},
}, ssMap)
}
@@ -853,7 +871,7 @@ func TestDB_SnapshotWithDelete(t *testing.T) {
expSamples := make([]chunks.Sample, 0, len(c.remaint))
for _, ts := range c.remaint {
- expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
+ expSamples = append(expSamples, sample{0, ts, smpls[ts], nil, nil})
}
expss := newMockSeriesSet([]storage.Series{
@@ -956,7 +974,7 @@ func TestDB_e2e(t *testing.T) {
for range numDatapoints {
v := rand.Float64()
- series = append(series, sample{ts, v, nil, nil})
+ series = append(series, sample{0, ts, v, nil, nil})
_, err := app.Append(0, lset, ts, v)
require.NoError(t, err)
@@ -1278,7 +1296,7 @@ func TestTombstoneClean(t *testing.T) {
expSamples := make([]chunks.Sample, 0, len(c.remaint))
for _, ts := range c.remaint {
- expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
+ expSamples = append(expSamples, sample{0, ts, smpls[ts], nil, nil})
}
expss := newMockSeriesSet([]storage.Series{
@@ -2390,6 +2408,7 @@ func TestBlockRanges(t *testing.T) {
createBlock(t, dir, genSeries(1, 1, 0, firstBlockMaxT))
db, err := open(dir, logger, nil, DefaultOptions(), []int64{10000}, nil)
require.NoError(t, err)
+ db.DisableCompactions()
rangeToTriggerCompaction := db.compactor.(*LeveledCompactor).ranges[0]/2*3 + 1
@@ -2406,21 +2425,16 @@ func TestBlockRanges(t *testing.T) {
require.NoError(t, err)
require.NoError(t, app.Commit())
- for range 100 {
- if len(db.Blocks()) == 2 {
- break
- }
- time.Sleep(100 * time.Millisecond)
- }
- require.Len(t, db.Blocks(), 2, "no new block created after the set timeout")
+ require.NoError(t, db.Compact(ctx))
+ blocks := db.Blocks()
+ require.Len(t, blocks, 2, "no new block after compaction")
- require.LessOrEqual(t, db.Blocks()[1].Meta().MinTime, db.Blocks()[0].Meta().MaxTime,
- "new block overlaps old:%v,new:%v", db.Blocks()[0].Meta(), db.Blocks()[1].Meta())
+ require.GreaterOrEqual(t, blocks[1].Meta().MinTime, blocks[0].Meta().MaxTime,
+ "new block overlaps old:%v,new:%v", blocks[0].Meta(), blocks[1].Meta())
// Test that wal records are skipped when an existing block covers the same time ranges
// and compaction doesn't create an overlapping block.
app = db.Appender(ctx)
- db.DisableCompactions()
_, err = app.Append(0, lbl, secondBlockMaxt+1, rand.Float64())
require.NoError(t, err)
_, err = app.Append(0, lbl, secondBlockMaxt+2, rand.Float64())
@@ -2437,6 +2451,7 @@ func TestBlockRanges(t *testing.T) {
db, err = open(dir, logger, nil, DefaultOptions(), []int64{10000}, nil)
require.NoError(t, err)
+ db.DisableCompactions()
defer db.Close()
require.Len(t, db.Blocks(), 3, "db doesn't include expected number of blocks")
@@ -2446,17 +2461,12 @@ func TestBlockRanges(t *testing.T) {
_, err = app.Append(0, lbl, thirdBlockMaxt+rangeToTriggerCompaction, rand.Float64()) // Trigger a compaction
require.NoError(t, err)
require.NoError(t, app.Commit())
- for range 100 {
- if len(db.Blocks()) == 4 {
- break
- }
- time.Sleep(100 * time.Millisecond)
- }
+ require.NoError(t, db.Compact(ctx))
+ blocks = db.Blocks()
+ require.Len(t, blocks, 4, "no new block after compaction")
- require.Len(t, db.Blocks(), 4, "no new block created after the set timeout")
-
- require.LessOrEqual(t, db.Blocks()[3].Meta().MinTime, db.Blocks()[2].Meta().MaxTime,
- "new block overlaps old:%v,new:%v", db.Blocks()[2].Meta(), db.Blocks()[3].Meta())
+ require.GreaterOrEqual(t, blocks[3].Meta().MinTime, blocks[2].Meta().MaxTime,
+ "new block overlaps old:%v,new:%v", blocks[2].Meta(), blocks[3].Meta())
}
// TestDBReadOnly ensures that opening a DB in readonly mode doesn't modify any files on the disk.
@@ -2610,7 +2620,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
db.DisableCompactions()
app := db.Appender(ctx)
maxt = 1000
- for i := 0; i < maxt; i++ {
+ for i := range maxt {
_, err := app.Append(0, labels.FromStrings(defaultLabelName, "flush"), int64(i), 1.0)
require.NoError(t, err)
}
@@ -2863,11 +2873,11 @@ func assureChunkFromSamples(t *testing.T, samples []chunks.Sample) chunks.Meta {
// TestChunkWriter_ReadAfterWrite ensures that chunk segment are cut at the set segment size and
// that the resulted segments includes the expected chunks data.
func TestChunkWriter_ReadAfterWrite(t *testing.T) {
- chk1 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 1, nil, nil}})
- chk2 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 2, nil, nil}})
- chk3 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 3, nil, nil}})
- chk4 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 4, nil, nil}})
- chk5 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 5, nil, nil}})
+ chk1 := assureChunkFromSamples(t, []chunks.Sample{sample{0, 1, 1, nil, nil}})
+ chk2 := assureChunkFromSamples(t, []chunks.Sample{sample{0, 1, 2, nil, nil}})
+ chk3 := assureChunkFromSamples(t, []chunks.Sample{sample{0, 1, 3, nil, nil}})
+ chk4 := assureChunkFromSamples(t, []chunks.Sample{sample{0, 1, 4, nil, nil}})
+ chk5 := assureChunkFromSamples(t, []chunks.Sample{sample{0, 1, 5, nil, nil}})
chunkSize := len(chk1.Chunk.Bytes()) + chunks.MaxChunkLengthFieldSize + chunks.ChunkEncodingSize + crc32.Size
tests := []struct {
@@ -3069,11 +3079,11 @@ func TestRangeForTimestamp(t *testing.T) {
func TestChunkReader_ConcurrentReads(t *testing.T) {
t.Parallel()
chks := []chunks.Meta{
- assureChunkFromSamples(t, []chunks.Sample{sample{1, 1, nil, nil}}),
- assureChunkFromSamples(t, []chunks.Sample{sample{1, 2, nil, nil}}),
- assureChunkFromSamples(t, []chunks.Sample{sample{1, 3, nil, nil}}),
- assureChunkFromSamples(t, []chunks.Sample{sample{1, 4, nil, nil}}),
- assureChunkFromSamples(t, []chunks.Sample{sample{1, 5, nil, nil}}),
+ assureChunkFromSamples(t, []chunks.Sample{sample{0, 1, 1, nil, nil}}),
+ assureChunkFromSamples(t, []chunks.Sample{sample{0, 1, 2, nil, nil}}),
+ assureChunkFromSamples(t, []chunks.Sample{sample{0, 1, 3, nil, nil}}),
+ assureChunkFromSamples(t, []chunks.Sample{sample{0, 1, 4, nil, nil}}),
+ assureChunkFromSamples(t, []chunks.Sample{sample{0, 1, 5, nil, nil}}),
}
tempDir := t.TempDir()
@@ -3133,7 +3143,7 @@ func TestCompactHead(t *testing.T) {
val := rand.Float64()
_, err := app.Append(0, labels.FromStrings("a", "b"), int64(i), val)
require.NoError(t, err)
- expSamples = append(expSamples, sample{int64(i), val, nil, nil})
+ expSamples = append(expSamples, sample{0, int64(i), val, nil, nil})
}
require.NoError(t, app.Commit())
@@ -3160,7 +3170,7 @@ func TestCompactHead(t *testing.T) {
series = seriesSet.At().Iterator(series)
for series.Next() == chunkenc.ValFloat {
time, val := series.At()
- actSamples = append(actSamples, sample{time, val, nil, nil})
+ actSamples = append(actSamples, sample{0, time, val, nil, nil})
}
require.NoError(t, series.Err())
}
@@ -8287,16 +8297,22 @@ func testPanicOnApplyConfig(t *testing.T, scenario sampleTypeScenario) {
func TestDiskFillingUpAfterDisablingOOO(t *testing.T) {
t.Parallel()
- for name, scenario := range sampleTypeScenarios {
- t.Run(name, func(t *testing.T) {
- testDiskFillingUpAfterDisablingOOO(t, scenario)
- })
+ for _, appV2 := range []bool{true, false} {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(fmt.Sprintf("sample=%v/appV2=%v", name, appV2), func(t *testing.T) {
+ testDiskFillingUpAfterDisablingOOO(t, scenario, func(db *DB, ctx context.Context) storage.LimitedAppenderV1 {
+ if appV2 {
+ return storage.AppenderV2AsLimitedV1(db.AppenderV2(ctx))
+ }
+ return db.Appender(ctx)
+ })
+ })
+ }
}
}
-func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenario) {
+func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenario, appenderFn func(db *DB, ctx context.Context) storage.LimitedAppenderV1) {
t.Parallel()
- ctx := context.Background()
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
@@ -8304,10 +8320,14 @@ func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenari
db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
- series1 := labels.FromStrings("foo", "bar1")
- var allSamples []chunks.Sample
+ var (
+ ctx = t.Context()
+ series1 = labels.FromStrings("foo", "bar1")
+ allSamples []chunks.Sample
+ )
+
addSamples := func(fromMins, toMins int64) {
- app := db.Appender(context.Background())
+ app := appenderFn(db, ctx)
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, s, err := scenario.appendFunc(app, series1, ts, ts)
@@ -8350,21 +8370,36 @@ func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenari
}
}
- // Add in-order samples until ready for compaction..
+ // Add in-order samples until ready for compaction.
addSamples(301, 500)
// Check that m-map files gets deleted properly after compactions.
db.head.mmapHeadChunks()
checkMmapFileContents([]string{"000001", "000002"}, nil)
- require.NoError(t, db.Compact(ctx))
+
+ // NOTE: We are investigating flaky errors from this compaction on i386 architecture. Compaction panics due to chunk
+ // mapper fatal error. Recover here to understand the error cause. Leaving panic recovery to test causes deadlock
+ // as t.Cleanup tries to close DB with open locks.
+ // See https://github.com/prometheus/prometheus/issues/17941#issuecomment-3846381263
+ require.NotPanics(t, func() {
+ require.NoError(t, db.Compact(ctx))
+ })
+
checkMmapFileContents([]string{"000002"}, []string{"000001"})
require.Nil(t, ms.ooo, "OOO mmap chunk was not compacted")
addSamples(501, 650)
db.head.mmapHeadChunks()
checkMmapFileContents([]string{"000002", "000003"}, []string{"000001"})
- require.NoError(t, db.Compact(ctx))
+
+ // NOTE: We are investigating flaky errors from this compaction on i386 architecture. Compaction panics due to chunk
+ // mapper fatal error. Recover here to understand the error cause. Leaving panic recovery to test causes deadlock
+ // as t.Cleanup tries to close DB with open locks.
+ // See https://github.com/prometheus/prometheus/issues/17941#issuecomment-3846381263
+ require.NotPanics(t, func() {
+ require.NoError(t, db.Compact(ctx))
+ })
checkMmapFileContents(nil, []string{"000001", "000002", "000003"})
// Verify that WBL is empty.
@@ -9323,3 +9358,248 @@ func TestBlockReloadInterval(t *testing.T) {
})
}
}
+
+func TestStaleSeriesCompaction(t *testing.T) {
+ opts := DefaultOptions()
+ opts.MinBlockDuration = 1000
+ opts.MaxBlockDuration = 1000
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+ t.Cleanup(func() {
+ require.NoError(t, db.Close())
+ })
+
+ var (
+ nonStaleSeries, staleSeries,
+ nonStaleHist, staleHist,
+ nonStaleFHist, staleFHist,
+ staleSeriesCrossingBoundary, staleHistCrossingBoundary, staleFHistCrossingBoundary []labels.Labels
+ numSeriesPerCategory = 1
+ )
+ for i := range numSeriesPerCategory {
+ nonStaleSeries = append(nonStaleSeries, labels.FromStrings("name", fmt.Sprintf("series%d", 1000+i)))
+ nonStaleHist = append(nonStaleHist, labels.FromStrings("name", fmt.Sprintf("series%d", 2000+i)))
+ nonStaleFHist = append(nonStaleFHist, labels.FromStrings("name", fmt.Sprintf("series%d", 3000+i)))
+
+ staleSeries = append(staleSeries, labels.FromStrings("name", fmt.Sprintf("series%d", 4000+i)))
+ staleHist = append(staleHist, labels.FromStrings("name", fmt.Sprintf("series%d", 5000+i)))
+ staleFHist = append(staleFHist, labels.FromStrings("name", fmt.Sprintf("series%d", 6000+i)))
+
+ staleSeriesCrossingBoundary = append(staleSeriesCrossingBoundary, labels.FromStrings("name", fmt.Sprintf("series%d", 7000+i)))
+ staleHistCrossingBoundary = append(staleHistCrossingBoundary, labels.FromStrings("name", fmt.Sprintf("series%d", 8000+i)))
+ staleFHistCrossingBoundary = append(staleFHistCrossingBoundary, labels.FromStrings("name", fmt.Sprintf("series%d", 9000+i)))
+ }
+
+ var (
+ v = 10.0
+ staleV = math.Float64frombits(value.StaleNaN)
+ h = tsdbutil.GenerateTestHistograms(1)[0]
+ fh = tsdbutil.GenerateTestFloatHistograms(1)[0]
+ staleH = &histogram.Histogram{Sum: staleV}
+ staleFH = &histogram.FloatHistogram{Sum: staleV}
+ )
+
+ addNormalSamples := func(ts int64, floatSeries, histSeries, floatHistSeries []labels.Labels) {
+ app := db.Appender(context.Background())
+ for i := range len(floatSeries) {
+ _, err := app.Append(0, floatSeries[i], ts, v)
+ require.NoError(t, err)
+ _, err = app.AppendHistogram(0, histSeries[i], ts, h, nil)
+ require.NoError(t, err)
+ _, err = app.AppendHistogram(0, floatHistSeries[i], ts, nil, fh)
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+ addStaleSamples := func(ts int64, floatSeries, histSeries, floatHistSeries []labels.Labels) {
+ app := db.Appender(context.Background())
+ for i := range len(floatSeries) {
+ _, err := app.Append(0, floatSeries[i], ts, staleV)
+ require.NoError(t, err)
+ _, err = app.AppendHistogram(0, histSeries[i], ts, staleH, nil)
+ require.NoError(t, err)
+ _, err = app.AppendHistogram(0, floatHistSeries[i], ts, nil, staleFH)
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Normal sample for all.
+ addNormalSamples(100, nonStaleSeries, nonStaleHist, nonStaleFHist)
+ addNormalSamples(100, staleSeries, staleHist, staleFHist)
+
+ // Stale sample for the stale series. Normal sample for the non-stale series.
+ addNormalSamples(200, nonStaleSeries, nonStaleHist, nonStaleFHist)
+ addStaleSamples(200, staleSeries, staleHist, staleFHist)
+
+ // Normal samples for the non-stale series later
+ addNormalSamples(300, nonStaleSeries, nonStaleHist, nonStaleFHist)
+
+ require.Equal(t, uint64(6*numSeriesPerCategory), db.Head().NumSeries())
+ require.Equal(t, uint64(3*numSeriesPerCategory), db.Head().NumStaleSeries())
+
+ // Series crossing block boundary and gets stale.
+ addNormalSamples(300, staleSeriesCrossingBoundary, staleHistCrossingBoundary, staleFHistCrossingBoundary)
+ addNormalSamples(700, staleSeriesCrossingBoundary, staleHistCrossingBoundary, staleFHistCrossingBoundary)
+ addNormalSamples(1100, staleSeriesCrossingBoundary, staleHistCrossingBoundary, staleFHistCrossingBoundary)
+ addStaleSamples(1200, staleSeriesCrossingBoundary, staleHistCrossingBoundary, staleFHistCrossingBoundary)
+
+ require.NoError(t, db.CompactStaleHead())
+
+ require.Equal(t, uint64(3*numSeriesPerCategory), db.Head().NumSeries())
+ require.Equal(t, uint64(0), db.Head().NumStaleSeries())
+
+ require.Len(t, db.Blocks(), 2)
+ m := db.Blocks()[0].Meta()
+ require.Equal(t, int64(0), m.MinTime)
+ require.Equal(t, int64(1000), m.MaxTime)
+ require.Truef(t, m.Compaction.FromStaleSeries(), "stale series info not found in block meta")
+ m = db.Blocks()[1].Meta()
+ require.Equal(t, int64(1000), m.MinTime)
+ require.Equal(t, int64(2000), m.MaxTime)
+ require.Truef(t, m.Compaction.FromStaleSeries(), "stale series info not found in block meta")
+
+ // To make sure that Head is not truncated based on stale series block.
+ require.NoError(t, db.reload())
+
+ nonFirstH := h.Copy()
+ nonFirstH.CounterResetHint = histogram.NotCounterReset
+ nonFirstFH := fh.Copy()
+ nonFirstFH.CounterResetHint = histogram.NotCounterReset
+
+ // Verify head block.
+ verifyHeadBlock := func() {
+ require.Equal(t, uint64(3), db.head.NumSeries())
+ require.Equal(t, uint64(0), db.head.NumStaleSeries())
+
+ expHeadQuery := make(map[string][]chunks.Sample)
+ for i := range numSeriesPerCategory {
+ expHeadQuery[fmt.Sprintf(`{name="%s"}`, nonStaleSeries[i].Get("name"))] = []chunks.Sample{
+ sample{t: 100, f: v}, sample{t: 200, f: v}, sample{t: 300, f: v},
+ }
+ expHeadQuery[fmt.Sprintf(`{name="%s"}`, nonStaleHist[i].Get("name"))] = []chunks.Sample{
+ sample{t: 100, h: h}, sample{t: 200, h: nonFirstH}, sample{t: 300, h: nonFirstH},
+ }
+ expHeadQuery[fmt.Sprintf(`{name="%s"}`, nonStaleFHist[i].Get("name"))] = []chunks.Sample{
+ sample{t: 100, fh: fh}, sample{t: 200, fh: nonFirstFH}, sample{t: 300, fh: nonFirstFH},
+ }
+ }
+
+ querier, err := NewBlockQuerier(NewRangeHead(db.head, 0, 300), 0, 300)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ querier.Close()
+ })
+ seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchRegexp, "name", "series.*"))
+ require.Equal(t, expHeadQuery, seriesSet)
+ }
+
+ verifyHeadBlock()
+
+ // Verify blocks from stale series.
+ {
+ expBlockQuery := make(map[string][]chunks.Sample)
+ for i := range numSeriesPerCategory {
+ expBlockQuery[fmt.Sprintf(`{name="%s"}`, staleSeries[i].Get("name"))] = []chunks.Sample{
+ sample{t: 100, f: v}, sample{t: 200, f: staleV},
+ }
+ expBlockQuery[fmt.Sprintf(`{name="%s"}`, staleHist[i].Get("name"))] = []chunks.Sample{
+ sample{t: 100, h: h}, sample{t: 200, h: staleH},
+ }
+ expBlockQuery[fmt.Sprintf(`{name="%s"}`, staleFHist[i].Get("name"))] = []chunks.Sample{
+ sample{t: 100, fh: fh}, sample{t: 200, fh: staleFH},
+ }
+ expBlockQuery[fmt.Sprintf(`{name="%s"}`, staleSeriesCrossingBoundary[i].Get("name"))] = []chunks.Sample{
+ sample{t: 300, f: v}, sample{t: 700, f: v}, sample{t: 1100, f: v}, sample{t: 1200, f: staleV},
+ }
+ expBlockQuery[fmt.Sprintf(`{name="%s"}`, staleHistCrossingBoundary[i].Get("name"))] = []chunks.Sample{
+ sample{t: 300, h: h}, sample{t: 700, h: nonFirstH}, sample{t: 1100, h: h}, sample{t: 1200, h: staleH},
+ }
+ expBlockQuery[fmt.Sprintf(`{name="%s"}`, staleFHistCrossingBoundary[i].Get("name"))] = []chunks.Sample{
+ sample{t: 300, fh: fh}, sample{t: 700, fh: nonFirstFH}, sample{t: 1100, fh: fh}, sample{t: 1200, fh: staleFH},
+ }
+ }
+
+ querier, err := NewBlockQuerier(db.Blocks()[0], 0, 1000)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ querier.Close()
+ })
+ seriesSet := queryWithoutReplacingNaNs(t, querier, labels.MustNewMatcher(labels.MatchRegexp, "name", "series.*"))
+
+ querier, err = NewBlockQuerier(db.Blocks()[1], 1000, 2000)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ querier.Close()
+ })
+ seriesSet2 := queryWithoutReplacingNaNs(t, querier, labels.MustNewMatcher(labels.MatchRegexp, "name", "series.*"))
+ for k, v := range seriesSet2 {
+ seriesSet[k] = append(seriesSet[k], v...)
+ }
+
+ require.Len(t, seriesSet, len(expBlockQuery))
+
+ // Compare all the samples except the stale value that needs special handling.
+ for _, category := range [][]labels.Labels{
+ staleSeries, staleHist, staleFHist,
+ staleSeriesCrossingBoundary, staleHistCrossingBoundary, staleFHistCrossingBoundary,
+ } {
+ for i := range numSeriesPerCategory {
+ seriesKey := fmt.Sprintf(`{name="%s"}`, category[i].Get("name"))
+ samples := expBlockQuery[seriesKey]
+ actSamples, exists := seriesSet[seriesKey]
+ require.Truef(t, exists, "series not found in result %s", seriesKey)
+ require.Len(t, actSamples, len(samples))
+
+ for i := range len(samples) - 1 {
+ require.Equal(t, samples[i], actSamples[i])
+ }
+
+ l := len(samples) - 1
+ require.Equal(t, samples[l].T(), actSamples[l].T())
+ switch {
+ case value.IsStaleNaN(samples[l].F()):
+ require.True(t, value.IsStaleNaN(actSamples[l].F()))
+ case samples[l].H() != nil:
+ require.True(t, value.IsStaleNaN(actSamples[l].H().Sum))
+ default:
+ require.True(t, value.IsStaleNaN(actSamples[l].FH().Sum))
+ }
+ }
+ }
+ }
+
+ {
+ // Restart DB and verify that stale series were discarded from WAL replay.
+ require.NoError(t, db.Close())
+ var err error
+ db, err = Open(db.Dir(), db.logger, db.registerer, db.opts, nil)
+ require.NoError(t, err)
+
+ verifyHeadBlock()
+ }
+}
+
+// TestStaleSeriesCompactionWithZeroSeries verifies that CompactStaleHead handles
+// an empty head (0 series) gracefully without division by zero or incorrectly
+// triggering compaction. This is a regression test for issue #17949.
+func TestStaleSeriesCompactionWithZeroSeries(t *testing.T) {
+ opts := DefaultOptions()
+ opts.MinBlockDuration = 1000
+ opts.MaxBlockDuration = 1000
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+ t.Cleanup(func() {
+ require.NoError(t, db.Close())
+ })
+
+ // Verify the head is empty.
+ require.Equal(t, uint64(0), db.Head().NumSeries())
+ require.Equal(t, uint64(0), db.Head().NumStaleSeries())
+
+ // CompactStaleHead should handle zero series gracefully (no panic, no error).
+ require.NoError(t, db.CompactStaleHead())
+
+ // Should still have no blocks since there was nothing to compact.
+ require.Empty(t, db.Blocks())
+}
diff --git a/tsdb/errors/errors.go b/tsdb/errors/errors.go
deleted file mode 100644
index 138b38a8d2..0000000000
--- a/tsdb/errors/errors.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright The Prometheus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package errors
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
-)
-
-// multiError type allows combining multiple errors into one.
-type multiError []error
-
-// NewMulti returns multiError with provided errors added if not nil.
-func NewMulti(errs ...error) multiError { //nolint:revive // unexported-return
- m := multiError{}
- m.Add(errs...)
- return m
-}
-
-// Add adds single or many errors to the error list. Each error is added only if not nil.
-// If the error is a nonNilMultiError type, the errors inside nonNilMultiError are added to the main multiError.
-func (es *multiError) Add(errs ...error) {
- for _, err := range errs {
- if err == nil {
- continue
- }
- var merr nonNilMultiError
- if errors.As(err, &merr) {
- *es = append(*es, merr.errs...)
- continue
- }
- *es = append(*es, err)
- }
-}
-
-// Err returns the error list as an error or nil if it is empty.
-func (es multiError) Err() error {
- if len(es) == 0 {
- return nil
- }
- return nonNilMultiError{errs: es}
-}
-
-// nonNilMultiError implements the error interface, and it represents
-// multiError with at least one error inside it.
-// This type is needed to make sure that nil is returned when no error is combined in multiError for err != nil
-// check to work.
-type nonNilMultiError struct {
- errs multiError
-}
-
-// Error returns a concatenated string of the contained errors.
-func (es nonNilMultiError) Error() string {
- var buf bytes.Buffer
-
- if len(es.errs) > 1 {
- fmt.Fprintf(&buf, "%d errors: ", len(es.errs))
- }
-
- for i, err := range es.errs {
- if i != 0 {
- buf.WriteString("; ")
- }
- buf.WriteString(err.Error())
- }
-
- return buf.String()
-}
-
-// Is attempts to match the provided error against errors in the error list.
-//
-// This function allows errors.Is to traverse the values stored in the MultiError.
-// It returns true if any of the errors in the list match the target.
-func (es nonNilMultiError) Is(target error) bool {
- for _, err := range es.errs {
- if errors.Is(err, target) {
- return true
- }
- }
- return false
-}
-
-// Unwrap returns the list of errors contained in the multiError.
-func (es nonNilMultiError) Unwrap() []error {
- return es.errs
-}
-
-// CloseAll closes all given closers while recording error in MultiError.
-func CloseAll(cs []io.Closer) error {
- errs := NewMulti()
- for _, c := range cs {
- errs.Add(c.Close())
- }
- return errs.Err()
-}
diff --git a/tsdb/errors/errors_test.go b/tsdb/errors/errors_test.go
deleted file mode 100644
index acffdea261..0000000000
--- a/tsdb/errors/errors_test.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package errors
-
-import (
- "context"
- "errors"
- "fmt"
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestMultiError_Is(t *testing.T) {
- customErr1 := errors.New("test error 1")
- customErr2 := errors.New("test error 2")
-
- testCases := map[string]struct {
- sourceErrors []error
- target error
- is bool
- }{
- "adding a context cancellation doesn't lose the information": {
- sourceErrors: []error{context.Canceled},
- target: context.Canceled,
- is: true,
- },
- "adding multiple context cancellations doesn't lose the information": {
- sourceErrors: []error{context.Canceled, context.Canceled},
- target: context.Canceled,
- is: true,
- },
- "adding wrapped context cancellations doesn't lose the information": {
- sourceErrors: []error{errors.New("some error"), fmt.Errorf("some message: %w", context.Canceled)},
- target: context.Canceled,
- is: true,
- },
- "adding a nil error doesn't lose the information": {
- sourceErrors: []error{errors.New("some error"), fmt.Errorf("some message: %w", context.Canceled), nil},
- target: context.Canceled,
- is: true,
- },
- "errors with no context cancellation error are not a context canceled error": {
- sourceErrors: []error{errors.New("first error"), errors.New("second error")},
- target: context.Canceled,
- is: false,
- },
- "no errors are not a context canceled error": {
- sourceErrors: nil,
- target: context.Canceled,
- is: false,
- },
- "no errors are a nil error": {
- sourceErrors: nil,
- target: nil,
- is: true,
- },
- "nested multi-error contains customErr1": {
- sourceErrors: []error{
- customErr1,
- NewMulti(
- customErr2,
- fmt.Errorf("wrapped %w", context.Canceled),
- ).Err(),
- },
- target: customErr1,
- is: true,
- },
- "nested multi-error contains customErr2": {
- sourceErrors: []error{
- customErr1,
- NewMulti(
- customErr2,
- fmt.Errorf("wrapped %w", context.Canceled),
- ).Err(),
- },
- target: customErr2,
- is: true,
- },
- "nested multi-error contains wrapped context.Canceled": {
- sourceErrors: []error{
- customErr1,
- NewMulti(
- customErr2,
- fmt.Errorf("wrapped %w", context.Canceled),
- ).Err(),
- },
- target: context.Canceled,
- is: true,
- },
- "nested multi-error does not contain context.DeadlineExceeded": {
- sourceErrors: []error{
- customErr1,
- NewMulti(
- customErr2,
- fmt.Errorf("wrapped %w", context.Canceled),
- ).Err(),
- },
- target: context.DeadlineExceeded,
- is: false, // make sure we still return false in valid cases
- },
- }
-
- for testName, testCase := range testCases {
- t.Run(testName, func(t *testing.T) {
- mErr := NewMulti(testCase.sourceErrors...)
- require.Equal(t, testCase.is, errors.Is(mErr.Err(), testCase.target))
- })
- }
-}
-
-func TestMultiError_As(t *testing.T) {
- tE1 := testError{"error cause 1"}
- tE2 := testError{"error cause 2"}
- var target testError
- testCases := map[string]struct {
- sourceErrors []error
- target error
- as bool
- }{
- "MultiError containing only a testError can be cast to that testError": {
- sourceErrors: []error{tE1},
- target: tE1,
- as: true,
- },
- "MultiError containing multiple testErrors can be cast to the first testError added": {
- sourceErrors: []error{tE1, tE2},
- target: tE1,
- as: true,
- },
- "MultiError containing multiple errors can be cast to the first testError added": {
- sourceErrors: []error{context.Canceled, tE1, context.DeadlineExceeded, tE2},
- target: tE1,
- as: true,
- },
- "MultiError not containing a testError cannot be cast to a testError": {
- sourceErrors: []error{context.Canceled, context.DeadlineExceeded},
- as: false,
- },
- }
-
- for testName, testCase := range testCases {
- t.Run(testName, func(t *testing.T) {
- mErr := NewMulti(testCase.sourceErrors...).Err()
- if testCase.as {
- require.ErrorAs(t, mErr, &target)
- require.Equal(t, testCase.target, target)
- } else {
- require.NotErrorAs(t, mErr, &target)
- }
- })
- }
-}
-
-type testError struct {
- cause string
-}
-
-func (e testError) Error() string {
- return fmt.Sprintf("testError[cause: %s]", e.cause)
-}
diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go
index b58976c911..36b0a7e660 100644
--- a/tsdb/exemplar.go
+++ b/tsdb/exemplar.go
@@ -327,9 +327,10 @@ func (ce *CircularExemplarStorage) grow(l int64) int {
{from: ce.nextIndex, to: oldSize},
{from: 0, to: ce.nextIndex},
}
- ce.nextIndex = copyExemplarRanges(ce.index, newSlice, ce.exemplars, ranges)
+ totalCopied, migrated := copyExemplarRanges(ce.index, newSlice, ce.exemplars, ranges)
+ ce.nextIndex = totalCopied
ce.exemplars = newSlice
- return oldSize
+ return migrated
}
// shrink the circular buffer by either trimming from the right or deleting the
@@ -353,6 +354,7 @@ func (ce *CircularExemplarStorage) shrink(l int64) (migrated int) {
newSlice := make([]circularBufferEntry, int(l))
+ var totalCopied int
switch {
case deleteStart == deleteEnd:
// The entire buffer was cleared (shrink to zero). Note that we don't have to
@@ -363,18 +365,18 @@ func (ce *CircularExemplarStorage) shrink(l int64) (migrated int) {
return 0
case deleteStart < deleteEnd:
// We delete an "inner" section of the circular buffer.
- migrated = copyExemplarRanges(ce.index, newSlice, ce.exemplars, []intRange{
+ totalCopied, migrated = copyExemplarRanges(ce.index, newSlice, ce.exemplars, []intRange{
{from: deleteEnd, to: oldSize},
{from: 0, to: deleteStart},
})
case deleteStart > deleteEnd:
// We keep an "inner" section of the circular buffer.
- migrated = copyExemplarRanges(ce.index, newSlice, ce.exemplars, []intRange{
+ totalCopied, migrated = copyExemplarRanges(ce.index, newSlice, ce.exemplars, []intRange{
{from: deleteEnd, to: deleteStart},
})
}
- ce.nextIndex = migrated % int(l)
+ ce.nextIndex = totalCopied % int(l)
ce.exemplars = newSlice
return migrated
}
@@ -405,8 +407,9 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
// If we insert an out-of-order exemplar, we preemptively find the insertion
// index to check for duplicates.
var insertionIndex int
+ var outOfOrder bool
if indexExists {
- outOfOrder := e.Ts >= ce.exemplars[idx.oldest].exemplar.Ts && e.Ts < ce.exemplars[idx.newest].exemplar.Ts
+ outOfOrder = e.Ts >= ce.exemplars[idx.oldest].exemplar.Ts && e.Ts < ce.exemplars[idx.newest].exemplar.Ts
if outOfOrder {
insertionIndex = ce.findInsertionIndex(e, idx)
if ce.exemplars[insertionIndex].exemplar.Ts == e.Ts {
@@ -425,8 +428,7 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
ce.index[string(seriesLabels)] = idx
}
- // Remove entries if the buffer is full. Note that this doesn't invalidate the
- // insertion index since out-of-order exemplars cannot be the oldest exemplar.
+ // Remove entries if the buffer is full.
if prev := &ce.exemplars[ce.nextIndex]; prev.ref != nil {
prevRef := prev.ref
if ce.removeExemplar(prev) {
@@ -436,6 +438,11 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
} else {
ce.removeIndex(prevRef)
}
+ } else if outOfOrder && insertionIndex == ce.nextIndex && prevRef == idx {
+ // The entry we were going to insert after was removed from the same series.
+ // Recalculate the insertion point in the updated linked list to avoid
+ // creating a self-referencing loop.
+ insertionIndex = ce.findInsertionIndex(e, idx)
}
}
@@ -582,20 +589,21 @@ func (e intRange) contains(i int) bool {
}
// copyExemplarRanges copies non-overlapping ranges from src into dest and
-// adjusts list pointers in dest and index accordingly. Returns the number of
-// copied items.
+// adjusts list pointers in dest and index accordingly. Returns the total
+// number of slots copied (for nextIndex) and the number of non-empty entries
+// migrated.
func copyExemplarRanges(
index map[string]*indexEntry,
dest, src []circularBufferEntry,
ranges []intRange,
-) int {
+) (totalCopied, migratedEntries int) {
offsets := make([]int, len(ranges))
n := 0
for i, rng := range ranges {
offsets[i] = n - rng.from
n += copy(dest[n:], src[rng.from:rng.to])
}
- migratedEntries := n
+ migratedEntries = n
for di := range n {
e := &dest[di]
if e.ref == nil {
@@ -631,5 +639,5 @@ func copyExemplarRanges(
}
}
}
- return migratedEntries
+ return n, migratedEntries
}
diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go
index 01ffeb9541..0d45f56b3e 100644
--- a/tsdb/exemplar_test.go
+++ b/tsdb/exemplar_test.go
@@ -190,6 +190,22 @@ func TestCircularExemplarStorage_AddExemplar(t *testing.T) {
{Labels: series1, Value: 0.3, Ts: 4},
},
},
+ {
+ name: "out-of-order insert where evicted entry is insertion point",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.2, Ts: 2}, // pos 0, linked list middle
+ {Labels: series1, Value: 0.1, Ts: 1}, // pos 1, linked list oldest
+ {Labels: series1, Value: 0.5, Ts: 5}, // pos 2, linked list newest
+ {Labels: series1, Value: 0.3, Ts: 3},
+ },
+ matcher: series1Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.5, Ts: 5},
+ },
+ },
{
name: "insert out of the OOO window",
size: 3,
@@ -390,7 +406,7 @@ func TestCircularExemplarStorage_Resize(t *testing.T) {
{Labels: series1, Value: 0.1, Ts: 1},
{Labels: series1, Value: 0.2, Ts: 2},
},
- wantNextIndex: 2,
+ wantNextIndex: 3,
},
{
name: "in-order, shrink",
@@ -431,7 +447,7 @@ func TestCircularExemplarStorage_Resize(t *testing.T) {
{Labels: series1, Value: 0.2, Ts: 2},
{Labels: series1, Value: 0.3, Ts: 3},
},
- wantNextIndex: 2,
+ wantNextIndex: 3,
},
{
name: "duplicate timestamps",
@@ -452,7 +468,7 @@ func TestCircularExemplarStorage_Resize(t *testing.T) {
exemplars: []exemplar.Exemplar{},
resize: 10,
wantExemplars: []exemplar.Exemplar{},
- wantNextIndex: 0,
+ wantNextIndex: 3,
},
{
name: "empty input, shrink",
@@ -507,7 +523,7 @@ func TestCircularExemplarStorage_Resize(t *testing.T) {
wantExemplars: []exemplar.Exemplar{
{Labels: series1, Value: 0.1, Ts: 1},
},
- wantNextIndex: 1,
+ wantNextIndex: 0,
},
}
@@ -660,6 +676,47 @@ func TestCircularExemplarStorage_Resize(t *testing.T) {
{Labels: series1, Value: 0.6, Ts: 6},
},
},
+ {
+ name: "grow non-full buffer then add entries",
+ addExemplars1: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ },
+ resize1: 10,
+ wantExemplars1: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ },
+ resize2: 10,
+ addExemplars2: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ },
+ wantExemplars2: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ },
+ },
+ {
+ name: "shrink non-full buffer then add entries",
+ addExemplars1: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ },
+ resize1: 2,
+ wantExemplars1: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ },
+ resize2: 2,
+ addExemplars2: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.2, Ts: 2},
+ },
+ wantExemplars2: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ },
+ },
}
for _, tc := range resizeTwiceCases {
diff --git a/tsdb/head.go b/tsdb/head.go
index 955c0ae5a7..6fe42c8cf2 100644
--- a/tsdb/head.go
+++ b/tsdb/head.go
@@ -40,7 +40,6 @@ import (
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
@@ -985,7 +984,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries)
return nil
}); err != nil {
// secondLastRef because the lastRef caused an error.
- return nil, nil, secondLastRef, fmt.Errorf("iterate on on-disk chunks: %w", err)
+ return nil, nil, secondLastRef, fmt.Errorf("iterate on-disk chunks: %w", err)
}
return mmappedChunks, oooMmappedChunks, lastRef, nil
}
@@ -1203,6 +1202,36 @@ func (h *Head) truncateMemory(mint int64) (err error) {
return h.truncateSeriesAndChunkDiskMapper("truncateMemory")
}
+// truncateStaleSeries removes the provided series as long as they are still stale.
+func (h *Head) truncateStaleSeries(seriesRefs []storage.SeriesRef, maxt int64) error {
+ h.chunkSnapshotMtx.Lock()
+ defer h.chunkSnapshotMtx.Unlock()
+
+ if h.MinTime() >= maxt {
+ return nil
+ }
+
+ h.WaitForPendingReadersInTimeRange(h.MinTime(), maxt)
+
+ deleted := h.gcStaleSeries(seriesRefs, maxt)
+
+ // Record these stale series refs in the WAL so that we can ignore them during replay.
+ if h.wal != nil {
+ stones := make([]tombstones.Stone, 0, len(seriesRefs))
+ for ref := range deleted {
+ stones = append(stones, tombstones.Stone{
+ Ref: ref,
+ Intervals: tombstones.Intervals{{Mint: math.MinInt64, Maxt: math.MaxInt64}},
+ })
+ }
+ var enc record.Encoder
+ if err := h.wal.Log(enc.Tombstones(stones, nil)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// WaitForPendingReadersInTimeRange waits for queries overlapping with given range to finish querying.
// The query timeout limits the max wait time of this function implicitly.
// The mint is inclusive and maxt is the truncation time hence exclusive.
@@ -1556,6 +1585,53 @@ func (h *RangeHead) String() string {
return fmt.Sprintf("range head (mint: %d, maxt: %d)", h.MinTime(), h.MaxTime())
}
+// StaleHead allows querying the stale series in the Head via an IndexReader, ChunkReader and tombstones.Reader.
+// Used only for compactions.
+type StaleHead struct {
+ RangeHead
+ staleSeriesRefs []storage.SeriesRef
+}
+
+// NewStaleHead returns a *StaleHead.
+func NewStaleHead(head *Head, mint, maxt int64, staleSeriesRefs []storage.SeriesRef) *StaleHead {
+ return &StaleHead{
+ RangeHead: RangeHead{
+ head: head,
+ mint: mint,
+ maxt: maxt,
+ },
+ staleSeriesRefs: staleSeriesRefs,
+ }
+}
+
+func (h *StaleHead) Index() (_ IndexReader, err error) {
+ return h.head.staleIndex(h.mint, h.maxt, h.staleSeriesRefs)
+}
+
+func (h *StaleHead) NumSeries() uint64 {
+ return h.head.NumStaleSeries()
+}
+
+var staleHeadULID = ulid.MustParse("0000000000XXXXXXXSTALEHEAD")
+
+func (h *StaleHead) Meta() BlockMeta {
+ return BlockMeta{
+ MinTime: h.MinTime(),
+ MaxTime: h.MaxTime(),
+ ULID: staleHeadULID,
+ Stats: BlockStats{
+ NumSeries: h.NumSeries(),
+ },
+ }
+}
+
+// String returns an human readable representation of the stake head. It's important to
+// keep this function in order to avoid the struct dump when the head is stringified in
+// errors or logs.
+func (h *StaleHead) String() string {
+ return fmt.Sprintf("stale head (mint: %d, maxt: %d)", h.MinTime(), h.MaxTime())
+}
+
// Delete all samples in the range of [mint, maxt] for series that satisfy the given
// label matchers.
func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error {
@@ -1625,13 +1701,14 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) {
// Drop old chunks and remember series IDs and hashes if they can be
// deleted entirely.
- deleted, affected, chunksRemoved, actualInOrderMint, minOOOTime, minMmapFile := h.series.gc(mint, minOOOMmapRef, &h.numStaleSeries)
+ deleted, affected, chunksRemoved, staleSeriesDeleted, actualInOrderMint, minOOOTime, minMmapFile := h.series.gc(mint, minOOOMmapRef)
seriesRemoved := len(deleted)
h.metrics.seriesRemoved.Add(float64(seriesRemoved))
h.metrics.chunksRemoved.Add(float64(chunksRemoved))
h.metrics.chunks.Sub(float64(chunksRemoved))
h.numSeries.Sub(uint64(seriesRemoved))
+ h.numStaleSeries.Sub(uint64(staleSeriesDeleted))
// Remove deleted series IDs from the postings lists.
h.postings.Delete(deleted, affected)
@@ -1734,17 +1811,17 @@ func (h *Head) Close() error {
// takes samples from most recent head chunk.
h.mmapHeadChunks()
- errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close())
+ errs := h.chunkDiskMapper.Close()
if h.wal != nil {
- errs.Add(h.wal.Close())
+ errs = errors.Join(errs, h.wal.Close())
}
if h.wbl != nil {
- errs.Add(h.wbl.Close())
+ errs = errors.Join(errs, h.wbl.Close())
}
- if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown {
- errs.Add(h.performChunkSnapshot())
+ if errs == nil && h.opts.EnableMemorySnapshotOnShutdown {
+ errs = errors.Join(errs, h.performChunkSnapshot())
}
- return errs.Err()
+ return errs
}
// String returns an human readable representation of the TSDB head. It's important to
@@ -1948,13 +2025,14 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st
// but the returned map goes into postings.Delete() which expects a map[storage.SeriesRef]struct
// and there's no easy way to cast maps.
// minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series.
-func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef, numStaleSeries *atomic.Uint64) (_ map[storage.SeriesRef]struct{}, _ map[labels.Label]struct{}, _ int, _, _ int64, minMmapFile int) {
+func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ map[labels.Label]struct{}, _, _ int, _, _ int64, minMmapFile int) {
var (
- deleted = map[storage.SeriesRef]struct{}{}
- affected = map[labels.Label]struct{}{}
- rmChunks = 0
- actualMint int64 = math.MaxInt64
- minOOOTime int64 = math.MaxInt64
+ deleted = map[storage.SeriesRef]struct{}{}
+ affected = map[labels.Label]struct{}{}
+ rmChunks = 0
+ staleSeriesDeleted = 0
+ actualMint int64 = math.MaxInt64
+ minOOOTime int64 = math.MaxInt64
)
minMmapFile = math.MaxInt32
@@ -2009,7 +2087,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef, n
if value.IsStaleNaN(series.lastValue) ||
(series.lastHistogramValue != nil && value.IsStaleNaN(series.lastHistogramValue.Sum)) ||
(series.lastFloatHistogramValue != nil && value.IsStaleNaN(series.lastFloatHistogramValue.Sum)) {
- numStaleSeries.Dec()
+ staleSeriesDeleted++
}
deleted[storage.SeriesRef(series.ref)] = struct{}{}
@@ -2025,7 +2103,166 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef, n
actualMint = mint
}
- return deleted, affected, rmChunks, actualMint, minOOOTime, minMmapFile
+ return deleted, affected, rmChunks, staleSeriesDeleted, actualMint, minOOOTime, minMmapFile
+}
+
+// gcStaleSeries removes all the provided series as long as they are still stale
+// and the series maxt is <= the given max.
+// The returned references are the series that got deleted.
+func (h *Head) gcStaleSeries(seriesRefs []storage.SeriesRef, maxt int64) map[storage.SeriesRef]struct{} {
+ // Drop old chunks and remember series IDs and hashes if they can be
+ // deleted entirely.
+ deleted, affected, chunksRemoved := h.series.gcStaleSeries(seriesRefs, maxt)
+ seriesRemoved := len(deleted)
+
+ h.metrics.seriesRemoved.Add(float64(seriesRemoved))
+ h.metrics.chunksRemoved.Add(float64(chunksRemoved))
+ h.metrics.chunks.Sub(float64(chunksRemoved))
+ h.numSeries.Sub(uint64(seriesRemoved))
+ h.numStaleSeries.Sub(uint64(seriesRemoved))
+
+ // Remove deleted series IDs from the postings lists.
+ h.postings.Delete(deleted, affected)
+
+ // Remove tombstones referring to the deleted series.
+ h.tombstones.DeleteTombstones(deleted)
+
+ if h.wal != nil {
+ _, last, _ := wlog.Segments(h.wal.Dir())
+ h.walExpiriesMtx.Lock()
+ // Keep series records until we're past segment 'last'
+ // because the WAL will still have samples records with
+ // this ref ID. If we didn't keep these series records then
+ // on start up when we replay the WAL, or any other code
+ // that reads the WAL, wouldn't be able to use those
+ // samples since we would have no labels for that ref ID.
+ for ref := range deleted {
+ h.walExpiries[chunks.HeadSeriesRef(ref)] = int64(last)
+ }
+ h.walExpiriesMtx.Unlock()
+ }
+
+ return deleted
+}
+
+// deleteSeriesByID deletes the series with the given reference.
+// Only used for WAL replay.
+func (h *Head) deleteSeriesByID(refs []chunks.HeadSeriesRef) {
+ var (
+ deleted = map[storage.SeriesRef]struct{}{}
+ affected = map[labels.Label]struct{}{}
+ staleSeriesDeleted = 0
+ chunksRemoved = 0
+ )
+
+ for _, ref := range refs {
+ refShard := int(ref) & (h.series.size - 1)
+ h.series.locks[refShard].Lock()
+
+ // Copying getByID here to avoid locking and unlocking twice.
+ series := h.series.series[refShard][ref]
+ if series == nil {
+ h.series.locks[refShard].Unlock()
+ continue
+ }
+
+ if value.IsStaleNaN(series.lastValue) ||
+ (series.lastHistogramValue != nil && value.IsStaleNaN(series.lastHistogramValue.Sum)) ||
+ (series.lastFloatHistogramValue != nil && value.IsStaleNaN(series.lastFloatHistogramValue.Sum)) {
+ staleSeriesDeleted++
+ }
+
+ hash := series.lset.Hash()
+ hashShard := int(hash) & (h.series.size - 1)
+
+ chunksRemoved += len(series.mmappedChunks)
+ if series.headChunks != nil {
+ chunksRemoved += series.headChunks.len()
+ }
+
+ deleted[storage.SeriesRef(series.ref)] = struct{}{}
+ series.lset.Range(func(l labels.Label) { affected[l] = struct{}{} })
+ h.series.hashes[hashShard].del(hash, series.ref)
+ delete(h.series.series[refShard], series.ref)
+
+ h.series.locks[refShard].Unlock()
+ }
+
+ h.metrics.seriesRemoved.Add(float64(len(deleted)))
+ h.metrics.chunksRemoved.Add(float64(chunksRemoved))
+ h.metrics.chunks.Sub(float64(chunksRemoved))
+ h.numSeries.Sub(uint64(len(deleted)))
+ h.numStaleSeries.Sub(uint64(staleSeriesDeleted))
+
+ // Remove deleted series IDs from the postings lists.
+ h.postings.Delete(deleted, affected)
+
+ // Remove tombstones referring to the deleted series.
+ h.tombstones.DeleteTombstones(deleted)
+}
+
+// gcStaleSeries removes all the stale series provided that they are still stale
+// and the series maxt is <= the given max.
+func (s *stripeSeries) gcStaleSeries(seriesRefs []storage.SeriesRef, maxt int64) (_ map[storage.SeriesRef]struct{}, _ map[labels.Label]struct{}, _ int) {
+ var (
+ deleted = map[storage.SeriesRef]struct{}{}
+ affected = map[labels.Label]struct{}{}
+ rmChunks = 0
+ )
+
+ staleSeriesMap := map[storage.SeriesRef]struct{}{}
+ for _, ref := range seriesRefs {
+ staleSeriesMap[ref] = struct{}{}
+ }
+
+ check := func(hashShard int, hash uint64, series *memSeries, deletedForCallback map[chunks.HeadSeriesRef]labels.Labels) {
+ if _, exists := staleSeriesMap[storage.SeriesRef(series.ref)]; !exists {
+ // This series was not compacted. Skip it.
+ return
+ }
+
+ series.Lock()
+ defer series.Unlock()
+
+ if series.maxTime() > maxt {
+ return
+ }
+
+ // Check if the series is still stale.
+ isStale := value.IsStaleNaN(series.lastValue) ||
+ (series.lastHistogramValue != nil && value.IsStaleNaN(series.lastHistogramValue.Sum)) ||
+ (series.lastFloatHistogramValue != nil && value.IsStaleNaN(series.lastFloatHistogramValue.Sum))
+
+ if !isStale {
+ return
+ }
+
+ if series.headChunks != nil {
+ rmChunks += series.headChunks.len()
+ }
+ rmChunks += len(series.mmappedChunks)
+
+ // The series is gone entirely. We need to keep the series lock
+ // and make sure we have acquired the stripe locks for hash and ID of the
+ // series alike.
+ // If we don't hold them all, there's a very small chance that a series receives
+ // samples again while we are half-way into deleting it.
+ refShard := int(series.ref) & (s.size - 1)
+ if hashShard != refShard {
+ s.locks[refShard].Lock()
+ defer s.locks[refShard].Unlock()
+ }
+
+ deleted[storage.SeriesRef(series.ref)] = struct{}{}
+ series.lset.Range(func(l labels.Label) { affected[l] = struct{}{} })
+ s.hashes[hashShard].del(hash, series.ref)
+ delete(s.series[refShard], series.ref)
+ deletedForCallback[series.ref] = series.lset // OK to access lset; series is locked at the top of this function.
+ }
+
+ s.iterForDeletion(check)
+
+ return deleted, affected, rmChunks
}
// The iterForDeletion function iterates through all series, invoking the checkDeletedFunc for each.
@@ -2101,17 +2338,20 @@ func (s *stripeSeries) postCreation(lset labels.Labels) {
}
type sample struct {
+ st int64
t int64
f float64
h *histogram.Histogram
fh *histogram.FloatHistogram
}
-func newSample(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample {
- return sample{t, v, h, fh}
+func newSample(st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample {
+ return sample{st, t, v, h, fh}
}
-func (s sample) T() int64 { return s.t }
+func (s sample) T() int64 { return s.t }
+
+func (s sample) ST() int64 { return s.st }
func (s sample) F() float64 { return s.f }
func (s sample) H() *histogram.Histogram { return s.h }
func (s sample) FH() *histogram.FloatHistogram { return s.fh }
diff --git a/tsdb/head_append.go b/tsdb/head_append.go
index fceb80bd34..e6c9f2828a 100644
--- a/tsdb/head_append.go
+++ b/tsdb/head_append.go
@@ -19,6 +19,7 @@ import (
"fmt"
"log/slog"
"math"
+ "time"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
@@ -117,10 +118,19 @@ func (a *initAppender) AppendSTZeroSample(ref storage.SeriesRef, lset labels.Lab
// for a completely fresh head with an empty WAL.
func (h *Head) initTime(t int64) {
if !h.minTime.CompareAndSwap(math.MaxInt64, t) {
+ // Concurrent appends that are initializing.
+ // Wait until h.maxTime is swapped to avoid minTime/maxTime races.
+ antiDeadlockTimeout := time.After(500 * time.Millisecond)
+ for h.maxTime.Load() == math.MinInt64 {
+ select {
+ case <-antiDeadlockTimeout:
+ return
+ default:
+ }
+ }
return
}
// Ensure that max time is initialized to at least the min time we just set.
- // Concurrent appenders may already have set it to a higher value.
h.maxTime.CompareAndSwap(math.MinInt64, t)
}
@@ -168,8 +178,6 @@ func (h *Head) appender() *headAppender {
headAppenderBase: headAppenderBase{
head: h,
minValidTime: minValidTime,
- mint: math.MaxInt64,
- maxt: math.MinInt64,
headMaxt: h.MaxTime(),
oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(),
seriesRefs: h.getRefSeriesBuffer(),
@@ -214,6 +222,9 @@ func (h *Head) getRefSeriesBuffer() []record.RefSeries {
}
func (h *Head) putRefSeriesBuffer(b []record.RefSeries) {
+ for i := range b { // Zero out to avoid retaining label data.
+ b[i].Labels = labels.EmptyLabels()
+ }
h.refSeriesPool.Put(b[:0])
}
@@ -257,6 +268,7 @@ func (h *Head) getHistogramBuffer() []record.RefHistogramSample {
}
func (h *Head) putHistogramBuffer(b []record.RefHistogramSample) {
+ clear(b)
h.histogramsPool.Put(b[:0])
}
@@ -269,6 +281,7 @@ func (h *Head) getFloatHistogramBuffer() []record.RefFloatHistogramSample {
}
func (h *Head) putFloatHistogramBuffer(b []record.RefFloatHistogramSample) {
+ clear(b)
h.floatHistogramsPool.Put(b[:0])
}
@@ -281,6 +294,7 @@ func (h *Head) getMetadataBuffer() []record.RefMetadata {
}
func (h *Head) putMetadataBuffer(b []record.RefMetadata) {
+ clear(b)
h.metadataPool.Put(b[:0])
}
@@ -387,7 +401,6 @@ func (b *appendBatch) close(h *Head) {
type headAppenderBase struct {
head *Head
minValidTime int64 // No samples below this timestamp are allowed.
- mint, maxt int64
headMaxt int64 // We track it here to not take the lock for every sample appended.
oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample.
@@ -471,13 +484,6 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
return 0, err
}
- if t < a.mint {
- a.mint = t
- }
- if t > a.maxt {
- a.maxt = t
- }
-
b := a.getCurrentBatch(stFloat, s.ref)
b.floats = append(b.floats, record.RefSample{
Ref: s.ref,
@@ -521,9 +527,6 @@ func (a *headAppender) AppendSTZeroSample(ref storage.SeriesRef, lset labels.Lab
return storage.SeriesRef(s.ref), storage.ErrOutOfOrderST
}
- if st > a.maxt {
- a.maxt = st
- }
b := a.getCurrentBatch(stFloat, s.ref)
b.floats = append(b.floats, record.RefSample{Ref: s.ref, T: st, V: 0.0})
b.floatSeries = append(b.floatSeries, s)
@@ -897,13 +900,6 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
b.floatHistogramSeries = append(b.floatHistogramSeries, s)
}
- if t < a.mint {
- a.mint = t
- }
- if t > a.maxt {
- a.maxt = t
- }
-
return storage.SeriesRef(s.ref), nil
}
@@ -1007,10 +1003,6 @@ func (a *headAppender) AppendHistogramSTZeroSample(ref storage.SeriesRef, lset l
b.floatHistogramSeries = append(b.floatHistogramSeries, s)
}
- if st > a.maxt {
- a.maxt = st
- }
-
return storage.SeriesRef(s.ref), nil
}
@@ -1843,7 +1835,8 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, o chunkOpts) (sa
if !sampleInOrder {
return sampleInOrder, chunkCreated
}
- s.app.Append(t, v)
+ // TODO(krajorama): pass ST.
+ s.app.Append(0, t, v)
c.maxTime = t
@@ -1885,7 +1878,8 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
prevApp = nil
}
- newChunk, recoded, s.app, _ = s.app.AppendHistogram(prevApp, t, h, false) // false=request a new chunk if needed
+ // TODO(krajorama): pass ST.
+ newChunk, recoded, s.app, _ = s.app.AppendHistogram(prevApp, 0, t, h, false) // false=request a new chunk if needed
s.lastHistogramValue = h
s.lastFloatHistogramValue = nil
@@ -1942,7 +1936,8 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
prevApp = nil
}
- newChunk, recoded, s.app, _ = s.app.AppendFloatHistogram(prevApp, t, fh, false) // False means request a new chunk if needed.
+ // TODO(krajorama): pass ST.
+ newChunk, recoded, s.app, _ = s.app.AppendFloatHistogram(prevApp, 0, t, fh, false) // False means request a new chunk if needed.
s.lastHistogramValue = nil
s.lastFloatHistogramValue = fh
@@ -2236,6 +2231,9 @@ func (s *memSeries) mmapChunks(chunkDiskMapper *chunks.ChunkDiskMapper) (count i
return count
}
+// TODO(bwplotka): Propagate errors correctly, even when they are async. Panicking here do occurs from time to time
+// and cause flaky tests with hidden root cause (unlocked mutexes when deferred closing).
+// We didn't have evidences of prod impact though, yet.
func handleChunkWriteError(err error) {
if err != nil && !errors.Is(err, chunks.ErrChunkDiskMapperClosed) {
panic(err)
diff --git a/tsdb/head_append_v2.go b/tsdb/head_append_v2.go
index 241fb42e97..87b62df536 100644
--- a/tsdb/head_append_v2.go
+++ b/tsdb/head_append_v2.go
@@ -17,7 +17,6 @@ import (
"context"
"errors"
"fmt"
- "math"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
@@ -89,8 +88,6 @@ func (h *Head) appenderV2() *headAppenderV2 {
headAppenderBase: headAppenderBase{
head: h,
minValidTime: minValidTime,
- mint: math.MaxInt64,
- maxt: math.MinInt64,
headMaxt: h.MaxTime(),
oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(),
seriesRefs: h.getRefSeriesBuffer(),
@@ -193,13 +190,6 @@ func (a *headAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t i
return 0, appErr
}
- if t < a.mint {
- a.mint = t
- }
- if t > a.maxt {
- a.maxt = t
- }
-
if isStale {
// For stale values we never attempt to process metadata/exemplars, claim the success.
return storage.SeriesRef(s.ref), nil
@@ -210,9 +200,6 @@ func (a *headAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t i
// Currently only exemplars can return partial errors.
partialErr = a.appendExemplars(s, opts.Exemplars)
}
-
- // TODO(bwplotka): Move/reuse metadata tests from scrape, once scrape adopts AppenderV2.
- // Currently tsdb package does not test metadata.
if a.head.opts.EnableMetadataWALRecords && !opts.Metadata.IsEmpty() {
s.Lock()
metaChanged := s.meta == nil || !s.meta.Equals(opts.Metadata)
@@ -323,6 +310,7 @@ func (a *headAppenderV2) appendExemplars(s *memSeries, exemplar []exemplar.Exemp
if err := a.head.exemplars.ValidateExemplar(s.labels(), e); err != nil {
if !errors.Is(err, storage.ErrDuplicateExemplar) && !errors.Is(err, storage.ErrExemplarsDisabled) {
// Except duplicates, return partial errors.
+ // TODO(bwplotka): Add exemplar info into error.
errs = append(errs, err)
continue
}
@@ -389,10 +377,6 @@ func (a *headAppenderV2) bestEffortAppendSTZeroSample(s *memSeries, ls labels.La
a.head.logger.Debug("Error when appending ST", "series", s.lset.String(), "st", st, "t", t, "err", err)
return
}
-
- if st > a.maxt {
- a.maxt = st
- }
}
var _ storage.GetRef = &headAppenderV2{}
diff --git a/tsdb/head_append_v2_test.go b/tsdb/head_append_v2_test.go
index 33bc3aec38..082d756e60 100644
--- a/tsdb/head_append_v2_test.go
+++ b/tsdb/head_append_v2_test.go
@@ -37,7 +37,6 @@ import (
dto "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
- "golang.org/x/sync/errgroup"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
@@ -53,209 +52,17 @@ import (
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
"github.com/prometheus/prometheus/util/testutil"
+ "github.com/prometheus/prometheus/util/testutil/synctest"
)
-// TODO(bwplotka): Ensure non-ported tests are not deleted from db_test.go when removing AppenderV1 flow (#17632),
+// TODO(bwplotka): Ensure non-ported tests are not deleted from head_test.go when removing AppenderV1 flow (#17632),
// for example:
// * TestChunkNotFoundHeadGCRace
// * TestHeadSeriesChunkRace
// * TestHeadLabelValuesWithMatchers
// * TestHeadLabelNamesWithMatchers
// * TestHeadShardedPostings
-
-// TestHeadAppenderV2_HighConcurrencyReadAndWrite generates 1000 series with a step of 15s and fills a whole block with samples,
-// this means in total it generates 4000 chunks because with a step of 15s there are 4 chunks per block per series.
-// While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the
-// returned results are correct.
-func TestHeadAppenderV2_HighConcurrencyReadAndWrite(t *testing.T) {
- head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
- defer func() {
- require.NoError(t, head.Close())
- }()
-
- seriesCnt := 1000
- readConcurrency := 2
- writeConcurrency := 10
- startTs := uint64(DefaultBlockDuration) // start at the second block relative to the unix epoch.
- qryRange := uint64(5 * time.Minute.Milliseconds())
- step := uint64(15 * time.Second / time.Millisecond)
- endTs := startTs + uint64(DefaultBlockDuration)
-
- labelSets := make([]labels.Labels, seriesCnt)
- for i := range seriesCnt {
- labelSets[i] = labels.FromStrings("seriesId", strconv.Itoa(i))
- }
-
- head.Init(0)
-
- g, ctx := errgroup.WithContext(context.Background())
- whileNotCanceled := func(f func() (bool, error)) error {
- for ctx.Err() == nil {
- cont, err := f()
- if err != nil {
- return err
- }
- if !cont {
- return nil
- }
- }
- return nil
- }
-
- // Create one channel for each write worker, the channels will be used by the coordinator
- // go routine to coordinate which timestamps each write worker has to write.
- writerTsCh := make([]chan uint64, writeConcurrency)
- for writerTsChIdx := range writerTsCh {
- writerTsCh[writerTsChIdx] = make(chan uint64)
- }
-
- // workerReadyWg is used to synchronize the start of the test,
- // we only start the test once all workers signal that they're ready.
- var workerReadyWg sync.WaitGroup
- workerReadyWg.Add(writeConcurrency + readConcurrency)
-
- // Start the write workers.
- for wid := range writeConcurrency {
- // Create copy of workerID to be used by worker routine.
- workerID := wid
-
- g.Go(func() error {
- // The label sets which this worker will write.
- workerLabelSets := labelSets[(seriesCnt/writeConcurrency)*workerID : (seriesCnt/writeConcurrency)*(workerID+1)]
-
- // Signal that this worker is ready.
- workerReadyWg.Done()
-
- return whileNotCanceled(func() (bool, error) {
- ts, ok := <-writerTsCh[workerID]
- if !ok {
- return false, nil
- }
-
- app := head.AppenderV2(ctx)
- for i := range workerLabelSets {
- // We also use the timestamp as the sample value.
- _, err := app.Append(0, workerLabelSets[i], 0, int64(ts), float64(ts), nil, nil, storage.AOptions{})
- if err != nil {
- return false, fmt.Errorf("Error when appending to head: %w", err)
- }
- }
-
- return true, app.Commit()
- })
- })
- }
-
- // queryHead is a helper to query the head for a given time range and labelset.
- queryHead := func(mint, maxt uint64, label labels.Label) (map[string][]chunks.Sample, error) {
- q, err := NewBlockQuerier(head, int64(mint), int64(maxt))
- if err != nil {
- return nil, err
- }
- return query(t, q, labels.MustNewMatcher(labels.MatchEqual, label.Name, label.Value)), nil
- }
-
- // readerTsCh will be used by the coordinator go routine to coordinate which timestamps the reader should read.
- readerTsCh := make(chan uint64)
-
- // Start the read workers.
- for wid := range readConcurrency {
- // Create copy of threadID to be used by worker routine.
- workerID := wid
-
- g.Go(func() error {
- querySeriesRef := (seriesCnt / readConcurrency) * workerID
-
- // Signal that this worker is ready.
- workerReadyWg.Done()
-
- return whileNotCanceled(func() (bool, error) {
- ts, ok := <-readerTsCh
- if !ok {
- return false, nil
- }
-
- querySeriesRef = (querySeriesRef + 1) % seriesCnt
- lbls := labelSets[querySeriesRef]
- // lbls has a single entry; extract it so we can run a query.
- var lbl labels.Label
- lbls.Range(func(l labels.Label) {
- lbl = l
- })
- samples, err := queryHead(ts-qryRange, ts, lbl)
- if err != nil {
- return false, err
- }
-
- if len(samples) != 1 {
- return false, fmt.Errorf("expected 1 series, got %d", len(samples))
- }
-
- series := lbls.String()
- expectSampleCnt := qryRange/step + 1
- if expectSampleCnt != uint64(len(samples[series])) {
- return false, fmt.Errorf("expected %d samples, got %d", expectSampleCnt, len(samples[series]))
- }
-
- for sampleIdx, sample := range samples[series] {
- expectedValue := ts - qryRange + (uint64(sampleIdx) * step)
- if sample.T() != int64(expectedValue) {
- return false, fmt.Errorf("expected sample %d to have ts %d, got %d", sampleIdx, expectedValue, sample.T())
- }
- if sample.F() != float64(expectedValue) {
- return false, fmt.Errorf("expected sample %d to have value %d, got %f", sampleIdx, expectedValue, sample.F())
- }
- }
-
- return true, nil
- })
- })
- }
-
- // Start the coordinator go routine.
- g.Go(func() error {
- currTs := startTs
-
- defer func() {
- // End of the test, close all channels to stop the workers.
- for _, ch := range writerTsCh {
- close(ch)
- }
- close(readerTsCh)
- }()
-
- // Wait until all workers are ready to start the test.
- workerReadyWg.Wait()
- return whileNotCanceled(func() (bool, error) {
- // Send the current timestamp to each of the writers.
- for _, ch := range writerTsCh {
- select {
- case ch <- currTs:
- case <-ctx.Done():
- return false, nil
- }
- }
-
- // Once data for at least has been ingested, send the current timestamp to the readers.
- if currTs > startTs+qryRange {
- select {
- case readerTsCh <- currTs - step:
- case <-ctx.Done():
- return false, nil
- }
- }
-
- currTs += step
- if currTs > endTs {
- return false, nil
- }
-
- return true, nil
- })
- })
-
- require.NoError(t, g.Wait())
-}
+// * TestHead_HighConcurrencyReadAndWrite
func TestHeadAppenderV2_WALMultiRef(t *testing.T) {
head, w := newTestHead(t, 1000, compression.None, false)
@@ -312,8 +119,8 @@ func TestHeadAppenderV2_WALMultiRef(t *testing.T) {
// The samples before the new ref should be discarded since Head truncation
// happens only after compacting the Head.
require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {
- sample{1700, 3, nil, nil},
- sample{2000, 4, nil, nil},
+ sample{0, 1700, 3, nil, nil},
+ sample{0, 2000, 4, nil, nil},
}}, series)
}
@@ -352,7 +159,6 @@ func TestHeadAppenderV2_ActiveAppenders(t *testing.T) {
func TestHeadAppenderV2_RaceBetweenSeriesCreationAndGC(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
- t.Cleanup(func() { _ = head.Close() })
require.NoError(t, head.Init(0))
const totalSeries = 100_000
@@ -395,7 +201,6 @@ func TestHeadAppenderV2_CanGCSeriesCreatedWithoutSamples(t *testing.T) {
t.Run(op, func(t *testing.T) {
chunkRange := time.Hour.Milliseconds()
head, _ := newTestHead(t, chunkRange, compression.None, true)
- t.Cleanup(func() { _ = head.Close() })
require.NoError(t, head.Init(0))
@@ -605,7 +410,7 @@ func TestHeadAppenderV2_DeleteUntilCurrMax(t *testing.T) {
it = exps.Iterator(nil)
resSamples, err := storage.ExpandSamples(it, newSample)
require.NoError(t, err)
- require.Equal(t, []chunks.Sample{sample{11, 1, nil, nil}}, resSamples)
+ require.Equal(t, []chunks.Sample{sample{0, 11, 1, nil, nil}}, resSamples)
for res.Next() {
}
require.NoError(t, res.Err())
@@ -722,7 +527,7 @@ func TestHeadAppenderV2_Delete_e2e(t *testing.T) {
v := rand.Float64()
_, err := app.Append(0, ls, 0, ts, v, nil, nil, storage.AOptions{})
require.NoError(t, err)
- series = append(series, sample{ts, v, nil, nil})
+ series = append(series, sample{0, ts, v, nil, nil})
ts += rand.Int63n(timeInterval) + 1
}
seriesMap[labels.New(l...).String()] = series
@@ -1520,7 +1325,7 @@ func TestDataMissingOnQueryDuringCompaction_AppenderV2(t *testing.T) {
ref, err = app.Append(ref, labels.FromStrings("a", "b"), 0, ts, float64(i), nil, nil, storage.AOptions{})
require.NoError(t, err)
maxt = ts
- expSamples = append(expSamples, sample{ts, float64(i), nil, nil})
+ expSamples = append(expSamples, sample{0, ts, float64(i), nil, nil})
}
require.NoError(t, app.Commit())
@@ -1627,17 +1432,35 @@ func TestWaitForPendingReadersInTimeRange_AppenderV2(t *testing.T) {
}
for _, c := range cases {
t.Run(fmt.Sprintf("mint=%d,maxt=%d,shouldWait=%t", c.mint, c.maxt, c.shouldWait), func(t *testing.T) {
+ // checkWaiting verifies WaitForPendingReadersInTimeRange behavior using synctest
+ // for deterministic time control. The function should block while an overlapping
+ // querier is open and return immediately when there's no overlap.
checkWaiting := func(cl io.Closer) {
- var waitOver atomic.Bool
- go func() {
- db.head.WaitForPendingReadersInTimeRange(truncMint, truncMaxt)
- waitOver.Store(true)
- }()
- <-time.After(550 * time.Millisecond)
- require.Equal(t, !c.shouldWait, waitOver.Load())
- require.NoError(t, cl.Close())
- <-time.After(550 * time.Millisecond)
- require.True(t, waitOver.Load())
+ synctest.Test(t, func(t *testing.T) {
+ var waitOver atomic.Bool
+ go func() {
+ db.head.WaitForPendingReadersInTimeRange(truncMint, truncMaxt)
+ waitOver.Store(true)
+ }()
+
+ // Wait for goroutine to either complete (no overlap) or block on Sleep (overlap).
+ synctest.Wait()
+
+ if c.shouldWait {
+ require.False(t, waitOver.Load(),
+ "WaitForPendingReadersInTimeRange should block while overlapping querier is open")
+ require.NoError(t, cl.Close())
+ // Advance fake time past the 500ms poll interval, then let goroutine process.
+ time.Sleep(time.Second)
+ synctest.Wait()
+ require.True(t, waitOver.Load(),
+ "WaitForPendingReadersInTimeRange should complete after querier is closed")
+ } else {
+ require.True(t, waitOver.Load(),
+ "WaitForPendingReadersInTimeRange should return immediately when no overlap")
+ require.NoError(t, cl.Close())
+ }
+ })
}
q, err := db.Querier(c.mint, c.maxt)
@@ -1864,7 +1687,8 @@ func TestHeadAppenderV2_Append_Histogram(t *testing.T) {
func TestHistogramInWALAndMmapChunk_AppenderV2(t *testing.T) {
head, _ := newTestHead(t, 3000, compression.None, false)
t.Cleanup(func() {
- require.NoError(t, head.Close())
+ // Captures head by reference, so it closes the final head after restarts.
+ _ = head.Close()
})
require.NoError(t, head.Init(0))
@@ -2011,9 +1835,10 @@ func TestHistogramInWALAndMmapChunk_AppenderV2(t *testing.T) {
}
// Restart head.
+ walDir := head.wal.Dir()
require.NoError(t, head.Close())
startHead := func() {
- w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
+ w, err := wlog.NewSize(nil, nil, walDir, 32768, compression.None)
require.NoError(t, err)
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
require.NoError(t, err)
@@ -2166,17 +1991,17 @@ func TestChunkSnapshot_AppenderV2(t *testing.T) {
aOpts.Exemplars = []exemplar.Exemplar{newExemplar(lbls, ts)}
}
val := rand.Float64()
- expSeries[lblStr] = append(expSeries[lblStr], sample{ts, val, nil, nil})
+ expSeries[lblStr] = append(expSeries[lblStr], sample{0, ts, val, nil, nil})
_, err := app.Append(0, lbls, 0, ts, val, nil, nil, aOpts)
require.NoError(t, err)
hist := histograms[int(ts)]
- expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{ts, 0, hist, nil})
+ expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{0, ts, 0, hist, nil})
_, err = app.Append(0, lblsHist, 0, ts, 0, hist, nil, storage.AOptions{})
require.NoError(t, err)
floatHist := floatHistogram[int(ts)]
- expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{ts, 0, nil, floatHist})
+ expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{0, ts, 0, nil, floatHist})
_, err = app.Append(0, lblsFloatHist, 0, ts, 0, nil, floatHist, storage.AOptions{})
require.NoError(t, err)
@@ -2244,17 +2069,17 @@ func TestChunkSnapshot_AppenderV2(t *testing.T) {
aOpts.Exemplars = []exemplar.Exemplar{newExemplar(lbls, ts)}
}
val := rand.Float64()
- expSeries[lblStr] = append(expSeries[lblStr], sample{ts, val, nil, nil})
+ expSeries[lblStr] = append(expSeries[lblStr], sample{0, ts, val, nil, nil})
_, err := app.Append(0, lbls, 0, ts, val, nil, nil, aOpts)
require.NoError(t, err)
hist := histograms[int(ts)]
- expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{ts, 0, hist, nil})
+ expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{0, ts, 0, hist, nil})
_, err = app.Append(0, lblsHist, 0, ts, 0, hist, nil, storage.AOptions{})
require.NoError(t, err)
floatHist := floatHistogram[int(ts)]
- expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{ts, 0, nil, floatHist})
+ expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{0, ts, 0, nil, floatHist})
_, err = app.Append(0, lblsFloatHist, 0, ts, 0, nil, floatHist, storage.AOptions{})
require.NoError(t, err)
@@ -4081,7 +3906,6 @@ func TestWALSampleAndExemplarOrder_AppenderV2(t *testing.T) {
func TestHeadAppenderV2_Append_FloatWithSameTimestampAsPreviousHistogram(t *testing.T) {
head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
- t.Cleanup(func() { head.Close() })
ls := labels.FromStrings(labels.MetricName, "test")
@@ -4112,10 +3936,18 @@ func TestHeadAppenderV2_Append_EnableSTAsZeroSample(t *testing.T) {
// Make sure counter resets hints are non-zero, so we can detect ST histogram samples.
testHistogram := tsdbutil.GenerateTestHistogram(1)
testHistogram.CounterResetHint = histogram.NotCounterReset
+
testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(1)
testFloatHistogram.CounterResetHint = histogram.NotCounterReset
+
+ testNHCB := tsdbutil.GenerateTestCustomBucketsHistogram(1)
+ testNHCB.CounterResetHint = histogram.NotCounterReset
+
+ testFloatNHCB := tsdbutil.GenerateTestCustomBucketsFloatHistogram(1)
+ testFloatNHCB.CounterResetHint = histogram.NotCounterReset
+
// TODO(beorn7): Once issue #15346 is fixed, the CounterResetHint of the
- // following two zero histograms should be histogram.CounterReset.
+ // following zero histograms should be histogram.CounterReset.
testZeroHistogram := &histogram.Histogram{
Schema: testHistogram.Schema,
ZeroThreshold: testHistogram.ZeroThreshold,
@@ -4132,6 +3964,19 @@ func TestHeadAppenderV2_Append_EnableSTAsZeroSample(t *testing.T) {
PositiveBuckets: []float64{0, 0, 0, 0},
NegativeBuckets: []float64{0, 0, 0, 0},
}
+ testZeroNHCB := &histogram.Histogram{
+ Schema: testNHCB.Schema,
+ PositiveSpans: testNHCB.PositiveSpans,
+ PositiveBuckets: []int64{0, 0, 0, 0},
+ CustomValues: testNHCB.CustomValues,
+ }
+ testZeroFloatNHCB := &histogram.FloatHistogram{
+ Schema: testFloatNHCB.Schema,
+ PositiveSpans: testFloatNHCB.PositiveSpans,
+ PositiveBuckets: []float64{0, 0, 0, 0},
+ CustomValues: testFloatNHCB.CustomValues,
+ }
+
type appendableSamples struct {
ts int64
fSample float64
@@ -4184,6 +4029,34 @@ func TestHeadAppenderV2_Append_EnableSTAsZeroSample(t *testing.T) {
}
}(),
},
+ {
+ name: "In order ct+normal sample/NHCB",
+ appendableSamples: []appendableSamples{
+ {ts: 100, h: testNHCB, st: 1},
+ {ts: 101, h: testNHCB, st: 1},
+ },
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 1, h: testZeroNHCB},
+ sample{t: 100, h: testNHCB},
+ sample{t: 101, h: testNHCB},
+ }
+ }(),
+ },
+ {
+ name: "In order ct+normal sample/floatNHCB",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fh: testFloatNHCB, st: 1},
+ {ts: 101, fh: testFloatNHCB, st: 1},
+ },
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 1, fh: testZeroFloatNHCB},
+ sample{t: 100, fh: testFloatNHCB},
+ sample{t: 101, fh: testFloatNHCB},
+ }
+ }(),
+ },
{
name: "Consecutive appends with same st ignore st/floatSample",
appendableSamples: []appendableSamples{
@@ -4224,6 +4097,34 @@ func TestHeadAppenderV2_Append_EnableSTAsZeroSample(t *testing.T) {
}
}(),
},
+ {
+ name: "Consecutive appends with same st ignore st/NHCB",
+ appendableSamples: []appendableSamples{
+ {ts: 100, h: testNHCB, st: 1},
+ {ts: 101, h: testNHCB, st: 1},
+ },
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 1, h: testZeroNHCB},
+ sample{t: 100, h: testNHCB},
+ sample{t: 101, h: testNHCB},
+ }
+ }(),
+ },
+ {
+ name: "Consecutive appends with same st ignore st/floatNHCB",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fh: testFloatNHCB, st: 1},
+ {ts: 101, fh: testFloatNHCB, st: 1},
+ },
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 1, fh: testZeroFloatNHCB},
+ sample{t: 100, fh: testFloatNHCB},
+ sample{t: 101, fh: testFloatNHCB},
+ }
+ }(),
+ },
{
name: "Consecutive appends with newer st do not ignore st/floatSample",
appendableSamples: []appendableSamples{
@@ -4263,6 +4164,32 @@ func TestHeadAppenderV2_Append_EnableSTAsZeroSample(t *testing.T) {
sample{t: 102, fh: testFloatHistogram},
},
},
+ {
+ name: "Consecutive appends with newer st do not ignore st/NHCB",
+ appendableSamples: []appendableSamples{
+ {ts: 100, h: testNHCB, st: 1},
+ {ts: 102, h: testNHCB, st: 101},
+ },
+ expectedSamples: []chunks.Sample{
+ sample{t: 1, h: testZeroNHCB},
+ sample{t: 100, h: testNHCB},
+ sample{t: 101, h: testZeroNHCB},
+ sample{t: 102, h: testNHCB},
+ },
+ },
+ {
+ name: "Consecutive appends with newer st do not ignore st/floatNHCB",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fh: testFloatNHCB, st: 1},
+ {ts: 102, fh: testFloatNHCB, st: 101},
+ },
+ expectedSamples: []chunks.Sample{
+ sample{t: 1, fh: testZeroFloatNHCB},
+ sample{t: 100, fh: testFloatNHCB},
+ sample{t: 101, fh: testZeroFloatNHCB},
+ sample{t: 102, fh: testFloatNHCB},
+ },
+ },
{
name: "ST equals to previous sample timestamp is ignored/floatSample",
appendableSamples: []appendableSamples{
@@ -4303,6 +4230,34 @@ func TestHeadAppenderV2_Append_EnableSTAsZeroSample(t *testing.T) {
}
}(),
},
+ {
+ name: "ST equals to previous sample timestamp is ignored/NHCB",
+ appendableSamples: []appendableSamples{
+ {ts: 100, h: testNHCB, st: 1},
+ {ts: 101, h: testNHCB, st: 100},
+ },
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 1, h: testZeroNHCB},
+ sample{t: 100, h: testNHCB},
+ sample{t: 101, h: testNHCB},
+ }
+ }(),
+ },
+ {
+ name: "ST equals to previous sample timestamp is ignored/floatNHCB",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fh: testFloatNHCB, st: 1},
+ {ts: 101, fh: testFloatNHCB, st: 100},
+ },
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 1, fh: testZeroFloatNHCB},
+ sample{t: 100, fh: testFloatNHCB},
+ sample{t: 101, fh: testFloatNHCB},
+ }
+ }(),
+ },
{
name: "ST lower than minValidTime/float",
appendableSamples: []appendableSamples{
@@ -4350,6 +4305,40 @@ func TestHeadAppenderV2_Append_EnableSTAsZeroSample(t *testing.T) {
}
}(),
},
+ {
+ name: "ST lower than minValidTime/NHCB",
+ appendableSamples: []appendableSamples{
+ {ts: 100, h: testNHCB, st: -1},
+ },
+ // ST results ErrOutOfBounds, but ST append is best effort, so
+ // ST should be ignored, but sample appended.
+ expectedSamples: func() []chunks.Sample {
+ // NOTE: Without ST, on query, first histogram sample will get
+ // CounterReset adjusted to 0.
+ firstSample := testNHCB.Copy()
+ firstSample.CounterResetHint = histogram.UnknownCounterReset
+ return []chunks.Sample{
+ sample{t: 100, h: firstSample},
+ }
+ }(),
+ },
+ {
+ name: "ST lower than minValidTime/floatNHCB",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fh: testFloatNHCB, st: -1},
+ },
+ // ST results ErrOutOfBounds, but ST append is best effort, so
+ // ST should be ignored, but sample appended.
+ expectedSamples: func() []chunks.Sample {
+ // NOTE: Without ST, on query, first histogram sample will get
+ // CounterReset adjusted to 0.
+ firstSample := testFloatNHCB.Copy()
+ firstSample.CounterResetHint = histogram.UnknownCounterReset
+ return []chunks.Sample{
+ sample{t: 100, fh: firstSample},
+ }
+ }(),
+ },
{
name: "ST duplicates an existing sample/float",
appendableSamples: []appendableSamples{
@@ -4403,6 +4392,44 @@ func TestHeadAppenderV2_Append_EnableSTAsZeroSample(t *testing.T) {
}
}(),
},
+ {
+ name: "ST duplicates an existing sample/NHCB",
+ appendableSamples: []appendableSamples{
+ {ts: 100, h: testNHCB},
+ {ts: 200, h: testNHCB, st: 100},
+ },
+ // ST results ErrDuplicateSampleForTimestamp, but ST append is best effort, so
+ // ST should be ignored, but sample appended.
+ expectedSamples: func() []chunks.Sample {
+ // NOTE: Without ST, on query, first histogram sample will get
+ // CounterReset adjusted to 0.
+ firstSample := testNHCB.Copy()
+ firstSample.CounterResetHint = histogram.UnknownCounterReset
+ return []chunks.Sample{
+ sample{t: 100, h: firstSample},
+ sample{t: 200, h: testNHCB},
+ }
+ }(),
+ },
+ {
+ name: "ST duplicates an existing sample/floatNHCB",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fh: testFloatNHCB},
+ {ts: 200, fh: testFloatNHCB, st: 100},
+ },
+ // ST results ErrDuplicateSampleForTimestamp, but ST append is best effort, so
+ // ST should ignored, but sample appended.
+ expectedSamples: func() []chunks.Sample {
+ // NOTE: Without ST, on query, first histogram sample will get
+ // CounterReset adjusted to 0.
+ firstSample := testFloatNHCB.Copy()
+ firstSample.CounterResetHint = histogram.UnknownCounterReset
+ return []chunks.Sample{
+ sample{t: 100, fh: firstSample},
+ sample{t: 200, fh: testFloatNHCB},
+ }
+ }(),
+ },
} {
t.Run(tc.name, func(t *testing.T) {
opts := newTestHeadDefaultOptions(DefaultBlockDuration, false)
@@ -4489,7 +4516,8 @@ func testHeadAppenderV2AppendHistogramAndCommitConcurrency(t *testing.T, appendF
func TestHeadAppenderV2_NumStaleSeries(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
t.Cleanup(func() {
- require.NoError(t, head.Close())
+ // Captures head by reference, so it closes the final head after restarts.
+ _ = head.Close()
})
require.NoError(t, head.Init(0))
diff --git a/tsdb/head_bench_test.go b/tsdb/head_bench_test.go
index dc0be0823a..d15f6cc310 100644
--- a/tsdb/head_bench_test.go
+++ b/tsdb/head_bench_test.go
@@ -230,7 +230,6 @@ func BenchmarkHeadAppender_AppendCommit(b *testing.B) {
opts := newTestHeadDefaultOptions(10000, false)
opts.EnableExemplarStorage = true // We benchmark with exemplars, benchmark with them.
h, _ := newTestHeadWithOptions(b, compression.None, opts)
- b.Cleanup(func() { require.NoError(b, h.Close()) })
ts := int64(1000)
diff --git a/tsdb/head_read.go b/tsdb/head_read.go
index 924b04bf0a..f0a1331fbb 100644
--- a/tsdb/head_read.go
+++ b/tsdb/head_read.go
@@ -22,6 +22,7 @@ import (
"sync"
"github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
@@ -201,6 +202,112 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
return nil
}
+func (h *Head) staleIndex(mint, maxt int64, staleSeriesRefs []storage.SeriesRef) (*headStaleIndexReader, error) {
+ return &headStaleIndexReader{
+ headIndexReader: h.indexRange(mint, maxt),
+ staleSeriesRefs: staleSeriesRefs,
+ }, nil
+}
+
+// headStaleIndexReader gives the stale series that have no out-of-order data.
+// This is only used for stale series compaction at the moment, that will only ask for all
+// the series during compaction. So to make that efficient, this index reader requires the
+// pre-calculated list of stale series refs that can be returned without re-reading the Head.
+type headStaleIndexReader struct {
+ *headIndexReader
+ staleSeriesRefs []storage.SeriesRef
+}
+
+func (h *headStaleIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
+ // If all postings are requested, return the precalculated list.
+ k, v := index.AllPostingsKey()
+ if len(h.staleSeriesRefs) > 0 && name == k && len(values) == 1 && values[0] == v {
+ return index.NewListPostings(h.staleSeriesRefs), nil
+ }
+ seriesRefs, err := h.head.filterStaleSeriesAndSortPostings(h.head.postings.Postings(ctx, name, values...))
+ if err != nil {
+ return index.ErrPostings(err), err
+ }
+ return index.NewListPostings(seriesRefs), nil
+}
+
+func (h *headStaleIndexReader) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) index.Postings {
+ // Unused for compaction, so we don't need to optimise.
+ seriesRefs, err := h.head.filterStaleSeriesAndSortPostings(h.head.postings.PostingsForLabelMatching(ctx, name, match))
+ if err != nil {
+ return index.ErrPostings(err)
+ }
+ return index.NewListPostings(seriesRefs)
+}
+
+func (h *headStaleIndexReader) PostingsForAllLabelValues(ctx context.Context, name string) index.Postings {
+ // Unused for compaction, so we don't need to optimise.
+ seriesRefs, err := h.head.filterStaleSeriesAndSortPostings(h.head.postings.PostingsForAllLabelValues(ctx, name))
+ if err != nil {
+ return index.ErrPostings(err)
+ }
+ return index.NewListPostings(seriesRefs)
+}
+
+// filterStaleSeriesAndSortPostings returns the stale series references from the given postings
+// that also do not have any out-of-order data.
+func (h *Head) filterStaleSeriesAndSortPostings(p index.Postings) ([]storage.SeriesRef, error) {
+ series := make([]*memSeries, 0, 1024)
+
+ notFoundSeriesCount := 0
+ for p.Next() {
+ s := h.series.getByID(chunks.HeadSeriesRef(p.At()))
+ if s == nil {
+ notFoundSeriesCount++
+ continue
+ }
+
+ s.Lock()
+ if s.ooo != nil {
+ // Has out-of-order data; skip it because we cannot determine if a series
+ // is stale when it's getting out-of-order data.
+ s.Unlock()
+ continue
+ }
+
+ if value.IsStaleNaN(s.lastValue) ||
+ (s.lastHistogramValue != nil && value.IsStaleNaN(s.lastHistogramValue.Sum)) ||
+ (s.lastFloatHistogramValue != nil && value.IsStaleNaN(s.lastFloatHistogramValue.Sum)) {
+ series = append(series, s)
+ }
+ s.Unlock()
+ }
+ if notFoundSeriesCount > 0 {
+ h.logger.Debug("Looked up stale series not found", "count", notFoundSeriesCount)
+ }
+ if err := p.Err(); err != nil {
+ return nil, fmt.Errorf("expand postings: %w", err)
+ }
+
+ slices.SortFunc(series, func(a, b *memSeries) int {
+ return labels.Compare(a.labels(), b.labels())
+ })
+
+ refs := make([]storage.SeriesRef, 0, len(series))
+ for _, p := range series {
+ refs = append(refs, storage.SeriesRef(p.ref))
+ }
+ return refs, nil
+}
+
+// SortedPostings returns the postings as it is because we expect any postings obtained via
+// headStaleIndexReader to be already sorted.
+func (*headStaleIndexReader) SortedPostings(p index.Postings) index.Postings {
+ // All the postings function above already give the sorted list of postings.
+ return p
+}
+
+// SortedStaleSeriesRefsNoOOOData returns all the series refs of the stale series that do not have any out-of-order data.
+func (h *Head) SortedStaleSeriesRefsNoOOOData(ctx context.Context) ([]storage.SeriesRef, error) {
+ k, v := index.AllPostingsKey()
+ return h.filterStaleSeriesAndSortPostings(h.postings.Postings(ctx, k, v))
+}
+
func appendSeriesChunks(s *memSeries, mint, maxt int64, chks []chunks.Meta) []chunks.Meta {
for i, c := range s.mmappedChunks {
// Do not expose chunks that are outside of the specified range.
diff --git a/tsdb/head_test.go b/tsdb/head_test.go
index acdf0ee000..7b8ae0ecbd 100644
--- a/tsdb/head_test.go
+++ b/tsdb/head_test.go
@@ -44,6 +44,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
@@ -56,6 +57,7 @@ import (
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
"github.com/prometheus/prometheus/util/testutil"
+ "github.com/prometheus/prometheus/util/testutil/synctest"
)
// newTestHeadDefaultOptions returns the HeadOptions that should be used by default in unit tests.
@@ -84,6 +86,12 @@ func newTestHeadWithOptions(t testing.TB, compressWAL compression.Type, opts *He
h, err := NewHead(nil, nil, wal, nil, opts, nil)
require.NoError(t, err)
+ t.Cleanup(func() {
+ // Use _ = h.Close() instead of require.NoError because some tests
+ // explicitly close the head as part of their test logic (e.g., to
+ // restart/reopen the head), and we don't want to fail on double-close.
+ _ = h.Close()
+ })
require.NoError(t, h.chunkDiskMapper.IterateAllChunks(func(chunks.HeadSeriesRef, chunks.ChunkDiskMapperRef, int64, int64, uint16, chunkenc.Encoding, bool) error {
return nil
@@ -95,9 +103,6 @@ func newTestHeadWithOptions(t testing.TB, compressWAL compression.Type, opts *He
func BenchmarkCreateSeries(b *testing.B) {
series := genSeries(b.N, 10, 0, 0)
h, _ := newTestHead(b, 10000, compression.None, false)
- b.Cleanup(func() {
- require.NoError(b, h.Close())
- })
b.ReportAllocs()
b.ResetTimer()
@@ -467,198 +472,242 @@ func BenchmarkLoadRealWLs(b *testing.B) {
}
}
+// TestHead_InitAppenderRace_ErrOutOfBounds tests against init races with maxTime vs minTime on empty head concurrent appends.
+// See: https://github.com/prometheus/prometheus/pull/17963
+func TestHead_InitAppenderRace_ErrOutOfBounds(t *testing.T) {
+ head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
+ require.NoError(t, head.Init(0))
+
+ ts := timestamp.FromTime(time.Now())
+ appendCycles := 100
+
+ g, ctx := errgroup.WithContext(t.Context())
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ for i := range 100 {
+ g.Go(func() error {
+ appends := 0
+ wg.Wait()
+ for ctx.Err() == nil && appends < appendCycles {
+ appends++
+ app := head.Appender(t.Context())
+ if _, err := app.Append(0, labels.FromStrings("__name__", strconv.Itoa(i)), ts, float64(ts)); err != nil {
+ return fmt.Errorf("error when appending to head: %w", err)
+ }
+ if err := app.Rollback(); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ wg.Done()
+ require.NoError(t, g.Wait())
+}
+
// TestHead_HighConcurrencyReadAndWrite generates 1000 series with a step of 15s and fills a whole block with samples,
// this means in total it generates 4000 chunks because with a step of 15s there are 4 chunks per block per series.
// While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the
// returned results are correct.
func TestHead_HighConcurrencyReadAndWrite(t *testing.T) {
- head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
- defer func() {
- require.NoError(t, head.Close())
- }()
+ for _, appV2 := range []bool{false, true} {
+ t.Run(fmt.Sprintf("appV2=%v", appV2), func(t *testing.T) {
+ head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
- seriesCnt := 1000
- readConcurrency := 2
- writeConcurrency := 10
- startTs := uint64(DefaultBlockDuration) // start at the second block relative to the unix epoch.
- qryRange := uint64(5 * time.Minute.Milliseconds())
- step := uint64(15 * time.Second / time.Millisecond)
- endTs := startTs + uint64(DefaultBlockDuration)
+ seriesCnt := 1000
+ readConcurrency := 2
+ writeConcurrency := 10
+ startTs := uint64(DefaultBlockDuration) // Start at the second block relative to the unix epoch.
+ qryRange := uint64(5 * time.Minute.Milliseconds())
+ step := uint64(15 * time.Second / time.Millisecond)
+ endTs := startTs + uint64(DefaultBlockDuration)
- labelSets := make([]labels.Labels, seriesCnt)
- for i := range seriesCnt {
- labelSets[i] = labels.FromStrings("seriesId", strconv.Itoa(i))
- }
-
- head.Init(0)
-
- g, ctx := errgroup.WithContext(context.Background())
- whileNotCanceled := func(f func() (bool, error)) error {
- for ctx.Err() == nil {
- cont, err := f()
- if err != nil {
- return err
+ labelSets := make([]labels.Labels, seriesCnt)
+ for i := range seriesCnt {
+ labelSets[i] = labels.FromStrings("seriesId", strconv.Itoa(i))
}
- if !cont {
+ require.NoError(t, head.Init(0))
+
+ g, ctx := errgroup.WithContext(t.Context())
+ whileNotCanceled := func(f func() (bool, error)) error {
+ for ctx.Err() == nil {
+ cont, err := f()
+ if err != nil {
+ return err
+ }
+ if !cont {
+ return nil
+ }
+ }
return nil
}
- }
- return nil
- }
- // Create one channel for each write worker, the channels will be used by the coordinator
- // go routine to coordinate which timestamps each write worker has to write.
- writerTsCh := make([]chan uint64, writeConcurrency)
- for writerTsChIdx := range writerTsCh {
- writerTsCh[writerTsChIdx] = make(chan uint64)
- }
+ // Create one channel for each write worker, the channels will be used by the coordinator
+ // go routine to coordinate which timestamps each write worker has to write.
+ writerTsCh := make([]chan uint64, writeConcurrency)
+ for writerTsChIdx := range writerTsCh {
+ writerTsCh[writerTsChIdx] = make(chan uint64)
+ }
- // workerReadyWg is used to synchronize the start of the test,
- // we only start the test once all workers signal that they're ready.
- var workerReadyWg sync.WaitGroup
- workerReadyWg.Add(writeConcurrency + readConcurrency)
+ // workerReadyWg is used to synchronize the start of the test,
+ // we only start the test once all workers signal that they're ready.
+ var workerReadyWg sync.WaitGroup
+ workerReadyWg.Add(writeConcurrency + readConcurrency)
- // Start the write workers.
- for wid := range writeConcurrency {
- // Create copy of workerID to be used by worker routine.
- workerID := wid
+ // Start the write workers.
+ for wid := range writeConcurrency {
+ // Create copy of workerID to be used by worker routine.
+ workerID := wid
- g.Go(func() error {
- // The label sets which this worker will write.
- workerLabelSets := labelSets[(seriesCnt/writeConcurrency)*workerID : (seriesCnt/writeConcurrency)*(workerID+1)]
+ g.Go(func() error {
+ // The label sets which this worker will write.
+ workerLabelSets := labelSets[(seriesCnt/writeConcurrency)*workerID : (seriesCnt/writeConcurrency)*(workerID+1)]
- // Signal that this worker is ready.
- workerReadyWg.Done()
+ // Signal that this worker is ready.
+ workerReadyWg.Done()
- return whileNotCanceled(func() (bool, error) {
- ts, ok := <-writerTsCh[workerID]
- if !ok {
- return false, nil
- }
+ return whileNotCanceled(func() (bool, error) {
+ ts, ok := <-writerTsCh[workerID]
+ if !ok {
+ return false, nil
+ }
- app := head.Appender(ctx)
- for i := range workerLabelSets {
- // We also use the timestamp as the sample value.
- _, err := app.Append(0, workerLabelSets[i], int64(ts), float64(ts))
- if err != nil {
- return false, fmt.Errorf("Error when appending to head: %w", err)
- }
- }
+ if appV2 {
+ app := head.AppenderV2(ctx)
+ for i := range workerLabelSets {
+ // We also use the timestamp as the sample value.
+ if _, err := app.Append(0, workerLabelSets[i], 0, int64(ts), float64(ts), nil, nil, storage.AOptions{}); err != nil {
+ return false, fmt.Errorf("error when appending (V2) to head: %w", err)
+ }
+ }
+ return true, app.Commit()
+ }
- return true, app.Commit()
- })
- })
- }
-
- // queryHead is a helper to query the head for a given time range and labelset.
- queryHead := func(mint, maxt uint64, label labels.Label) (map[string][]chunks.Sample, error) {
- q, err := NewBlockQuerier(head, int64(mint), int64(maxt))
- if err != nil {
- return nil, err
- }
- return query(t, q, labels.MustNewMatcher(labels.MatchEqual, label.Name, label.Value)), nil
- }
-
- // readerTsCh will be used by the coordinator go routine to coordinate which timestamps the reader should read.
- readerTsCh := make(chan uint64)
-
- // Start the read workers.
- for wid := range readConcurrency {
- // Create copy of threadID to be used by worker routine.
- workerID := wid
-
- g.Go(func() error {
- querySeriesRef := (seriesCnt / readConcurrency) * workerID
-
- // Signal that this worker is ready.
- workerReadyWg.Done()
-
- return whileNotCanceled(func() (bool, error) {
- ts, ok := <-readerTsCh
- if !ok {
- return false, nil
- }
-
- querySeriesRef = (querySeriesRef + 1) % seriesCnt
- lbls := labelSets[querySeriesRef]
- // lbls has a single entry; extract it so we can run a query.
- var lbl labels.Label
- lbls.Range(func(l labels.Label) {
- lbl = l
+ app := head.Appender(ctx)
+ for i := range workerLabelSets {
+ // We also use the timestamp as the sample value.
+ if _, err := app.Append(0, workerLabelSets[i], int64(ts), float64(ts)); err != nil {
+ return false, fmt.Errorf("error when appending to head: %w", err)
+ }
+ }
+ return true, app.Commit()
+ })
})
- samples, err := queryHead(ts-qryRange, ts, lbl)
+ }
+
+ // queryHead is a helper to query the head for a given time range and labelset.
+ queryHead := func(mint, maxt uint64, label labels.Label) (map[string][]chunks.Sample, error) {
+ q, err := NewBlockQuerier(head, int64(mint), int64(maxt))
if err != nil {
- return false, err
+ return nil, err
}
+ return query(t, q, labels.MustNewMatcher(labels.MatchEqual, label.Name, label.Value)), nil
+ }
- if len(samples) != 1 {
- return false, fmt.Errorf("expected 1 series, got %d", len(samples))
- }
+ // readerTsCh will be used by the coordinator go routine to coordinate which timestamps the reader should read.
+ readerTsCh := make(chan uint64)
- series := lbls.String()
- expectSampleCnt := qryRange/step + 1
- if expectSampleCnt != uint64(len(samples[series])) {
- return false, fmt.Errorf("expected %d samples, got %d", expectSampleCnt, len(samples[series]))
- }
+ // Start the read workers.
+ for wid := range readConcurrency {
+ // Create copy of threadID to be used by worker routine.
+ workerID := wid
- for sampleIdx, sample := range samples[series] {
- expectedValue := ts - qryRange + (uint64(sampleIdx) * step)
- if sample.T() != int64(expectedValue) {
- return false, fmt.Errorf("expected sample %d to have ts %d, got %d", sampleIdx, expectedValue, sample.T())
+ g.Go(func() error {
+ querySeriesRef := (seriesCnt / readConcurrency) * workerID
+
+ // Signal that this worker is ready.
+ workerReadyWg.Done()
+
+ return whileNotCanceled(func() (bool, error) {
+ ts, ok := <-readerTsCh
+ if !ok {
+ return false, nil
+ }
+
+ querySeriesRef = (querySeriesRef + 1) % seriesCnt
+ lbls := labelSets[querySeriesRef]
+ // lbls has a single entry; extract it so we can run a query.
+ var lbl labels.Label
+ lbls.Range(func(l labels.Label) {
+ lbl = l
+ })
+ samples, err := queryHead(ts-qryRange, ts, lbl)
+ if err != nil {
+ return false, err
+ }
+
+ if len(samples) != 1 {
+ return false, fmt.Errorf("expected 1 series, got %d", len(samples))
+ }
+
+ series := lbls.String()
+ expectSampleCnt := qryRange/step + 1
+ if expectSampleCnt != uint64(len(samples[series])) {
+ return false, fmt.Errorf("expected %d samples, got %d", expectSampleCnt, len(samples[series]))
+ }
+
+ for sampleIdx, sample := range samples[series] {
+ expectedValue := ts - qryRange + (uint64(sampleIdx) * step)
+ if sample.T() != int64(expectedValue) {
+ return false, fmt.Errorf("expected sample %d to have ts %d, got %d", sampleIdx, expectedValue, sample.T())
+ }
+ if sample.F() != float64(expectedValue) {
+ return false, fmt.Errorf("expected sample %d to have value %d, got %f", sampleIdx, expectedValue, sample.F())
+ }
+ }
+
+ return true, nil
+ })
+ })
+ }
+
+ // Start the coordinator go routine.
+ g.Go(func() error {
+ currTs := startTs
+
+ defer func() {
+ // End of the test, close all channels to stop the workers.
+ for _, ch := range writerTsCh {
+ close(ch)
}
- if sample.F() != float64(expectedValue) {
- return false, fmt.Errorf("expected sample %d to have value %d, got %f", sampleIdx, expectedValue, sample.F())
- }
- }
+ close(readerTsCh)
+ }()
- return true, nil
+ // Wait until all workers are ready to start the test.
+ workerReadyWg.Wait()
+
+ return whileNotCanceled(func() (bool, error) {
+ // Send the current timestamp to each of the writers.
+ for _, ch := range writerTsCh {
+ select {
+ case ch <- currTs:
+ case <-ctx.Done():
+ return false, nil
+ }
+ }
+
+ // Once data for at least has been ingested, send the current timestamp to the readers.
+ if currTs > startTs+qryRange {
+ select {
+ case readerTsCh <- currTs - step:
+ case <-ctx.Done():
+ return false, nil
+ }
+ }
+
+ currTs += step
+ if currTs > endTs {
+ return false, nil
+ }
+
+ return true, nil
+ })
})
+
+ require.NoError(t, g.Wait())
})
}
-
- // Start the coordinator go routine.
- g.Go(func() error {
- currTs := startTs
-
- defer func() {
- // End of the test, close all channels to stop the workers.
- for _, ch := range writerTsCh {
- close(ch)
- }
- close(readerTsCh)
- }()
-
- // Wait until all workers are ready to start the test.
- workerReadyWg.Wait()
- return whileNotCanceled(func() (bool, error) {
- // Send the current timestamp to each of the writers.
- for _, ch := range writerTsCh {
- select {
- case ch <- currTs:
- case <-ctx.Done():
- return false, nil
- }
- }
-
- // Once data for at least has been ingested, send the current timestamp to the readers.
- if currTs > startTs+qryRange {
- select {
- case readerTsCh <- currTs - step:
- case <-ctx.Done():
- return false, nil
- }
- }
-
- currTs += step
- if currTs > endTs {
- return false, nil
- }
-
- return true, nil
- })
- })
-
- require.NoError(t, g.Wait())
}
func TestHead_ReadWAL(t *testing.T) {
@@ -703,9 +752,6 @@ func TestHead_ReadWAL(t *testing.T) {
}
head, w := newTestHead(t, 1000, compress, false)
- defer func() {
- require.NoError(t, head.Close())
- }()
populateTestWL(t, w, entries, nil)
@@ -745,7 +791,7 @@ func TestHead_ReadWAL(t *testing.T) {
// Verify samples and exemplar for series 10.
c, _, _, err := s10.chunk(0, head.chunkDiskMapper, &head.memChunkPool)
require.NoError(t, err)
- require.Equal(t, []sample{{100, 2, nil, nil}, {101, 5, nil, nil}}, expandChunk(c.chunk.Iterator(nil)))
+ require.Equal(t, []sample{{0, 100, 2, nil, nil}, {0, 101, 5, nil, nil}}, expandChunk(c.chunk.Iterator(nil)))
q, err := head.ExemplarQuerier(context.Background())
require.NoError(t, err)
@@ -758,14 +804,14 @@ func TestHead_ReadWAL(t *testing.T) {
// Verify samples for series 50
c, _, _, err = s50.chunk(0, head.chunkDiskMapper, &head.memChunkPool)
require.NoError(t, err)
- require.Equal(t, []sample{{101, 6, nil, nil}}, expandChunk(c.chunk.Iterator(nil)))
+ require.Equal(t, []sample{{0, 101, 6, nil, nil}}, expandChunk(c.chunk.Iterator(nil)))
// Verify records for series 100 and its duplicate, series 101.
// The samples before the new series record should be discarded since a duplicate record
// is only possible when old samples were compacted.
c, _, _, err = s100.chunk(0, head.chunkDiskMapper, &head.memChunkPool)
require.NoError(t, err)
- require.Equal(t, []sample{{101, 7, nil, nil}}, expandChunk(c.chunk.Iterator(nil)))
+ require.Equal(t, []sample{{0, 101, 7, nil, nil}}, expandChunk(c.chunk.Iterator(nil)))
q, err = head.ExemplarQuerier(context.Background())
require.NoError(t, err)
@@ -841,8 +887,8 @@ func TestHead_WALMultiRef(t *testing.T) {
// The samples before the new ref should be discarded since Head truncation
// happens only after compacting the Head.
require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {
- sample{1700, 3, nil, nil},
- sample{2000, 4, nil, nil},
+ sample{0, 1700, 3, nil, nil},
+ sample{0, 2000, 4, nil, nil},
}}, series)
}
@@ -1056,9 +1102,6 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) {
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
h, w := newTestHead(t, 1000, compression.None, false)
- t.Cleanup(func() {
- require.NoError(t, h.Close())
- })
populateTestWL(t, w, tc.walEntries, nil)
first, _, err := wlog.Segments(w.Dir())
@@ -1134,9 +1177,6 @@ func TestHead_KeepSeriesInWALCheckpoint(t *testing.T) {
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
- t.Cleanup(func() {
- require.NoError(t, h.Close())
- })
if tc.prepare != nil {
tc.prepare(t, h)
@@ -1152,7 +1192,6 @@ func TestHead_KeepSeriesInWALCheckpoint(t *testing.T) {
func TestHead_ActiveAppenders(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
- defer head.Close()
require.NoError(t, head.Init(0))
@@ -1185,7 +1224,6 @@ func TestHead_ActiveAppenders(t *testing.T) {
func TestHead_RaceBetweenSeriesCreationAndGC(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
- t.Cleanup(func() { _ = head.Close() })
require.NoError(t, head.Init(0))
const totalSeries = 100_000
@@ -1228,7 +1266,6 @@ func TestHead_CanGarbagecollectSeriesCreatedWithoutSamples(t *testing.T) {
t.Run(op, func(t *testing.T) {
chunkRange := time.Hour.Milliseconds()
head, _ := newTestHead(t, chunkRange, compression.None, true)
- t.Cleanup(func() { _ = head.Close() })
require.NoError(t, head.Init(0))
@@ -1267,7 +1304,6 @@ func TestHead_UnknownWALRecord(t *testing.T) {
head, w := newTestHead(t, 1000, compression.None, false)
w.Log([]byte{255, 42})
require.NoError(t, head.Init(0))
- require.NoError(t, head.Close())
}
// BenchmarkHead_Truncate is quite heavy, so consider running it with
@@ -1277,9 +1313,6 @@ func BenchmarkHead_Truncate(b *testing.B) {
prepare := func(b *testing.B, churn int) *Head {
h, _ := newTestHead(b, 1000, compression.None, false)
- b.Cleanup(func() {
- require.NoError(b, h.Close())
- })
h.initTime(0)
@@ -1346,9 +1379,6 @@ func BenchmarkHead_Truncate(b *testing.B) {
func TestHead_Truncate(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
h.initTime(0)
@@ -1671,9 +1701,6 @@ func TestHeadDeleteSeriesWithoutSamples(t *testing.T) {
},
}
head, w := newTestHead(t, 1000, compress, false)
- defer func() {
- require.NoError(t, head.Close())
- }()
populateTestWL(t, w, entries, nil)
@@ -1818,9 +1845,6 @@ func TestHeadDeleteSimple(t *testing.T) {
func TestDeleteUntilCurMax(t *testing.T) {
hb, _ := newTestHead(t, 1000000, compression.None, false)
- defer func() {
- require.NoError(t, hb.Close())
- }()
numSamples := int64(10)
app := hb.Appender(context.Background())
@@ -1859,7 +1883,7 @@ func TestDeleteUntilCurMax(t *testing.T) {
it = exps.Iterator(nil)
resSamples, err := storage.ExpandSamples(it, newSample)
require.NoError(t, err)
- require.Equal(t, []chunks.Sample{sample{11, 1, nil, nil}}, resSamples)
+ require.Equal(t, []chunks.Sample{sample{0, 11, 1, nil, nil}}, resSamples)
for res.Next() {
}
require.NoError(t, res.Err())
@@ -1963,9 +1987,6 @@ func TestDelete_e2e(t *testing.T) {
}
hb, _ := newTestHead(t, 100000, compression.None, false)
- defer func() {
- require.NoError(t, hb.Close())
- }()
app := hb.Appender(context.Background())
for _, l := range lbls {
@@ -1976,7 +1997,7 @@ func TestDelete_e2e(t *testing.T) {
v := rand.Float64()
_, err := app.Append(0, ls, ts, v)
require.NoError(t, err)
- series = append(series, sample{ts, v, nil, nil})
+ series = append(series, sample{0, ts, v, nil, nil})
ts += rand.Int63n(timeInterval) + 1
}
seriesMap[labels.New(l...).String()] = series
@@ -2331,9 +2352,6 @@ func TestGCChunkAccess(t *testing.T) {
// Put a chunk, select it. GC it and then access it.
const chunkRange = 1000
h, _ := newTestHead(t, chunkRange, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
cOpts := chunkOpts{
chunkDiskMapper: h.chunkDiskMapper,
@@ -2390,9 +2408,6 @@ func TestGCSeriesAccess(t *testing.T) {
// Put a series, select it. GC it and then access it.
const chunkRange = 1000
h, _ := newTestHead(t, chunkRange, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
cOpts := chunkOpts{
chunkDiskMapper: h.chunkDiskMapper,
@@ -2449,9 +2464,6 @@ func TestGCSeriesAccess(t *testing.T) {
func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
h.initTime(0)
@@ -2479,9 +2491,6 @@ func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) {
func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
h.initTime(0)
@@ -2512,9 +2521,6 @@ func TestHead_LogRollback(t *testing.T) {
for _, compress := range []compression.Type{compression.None, compression.Snappy, compression.Zstd} {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
h, w := newTestHead(t, 1000, compress, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
app := h.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("a", "b"), 1, 2)
@@ -2534,9 +2540,6 @@ func TestHead_LogRollback(t *testing.T) {
func TestHead_ReturnsSortedLabelValues(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
h.initTime(0)
@@ -2807,9 +2810,6 @@ func TestHeadReadWriterRepair(t *testing.T) {
func TestNewWalSegmentOnTruncate(t *testing.T) {
h, wal := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
add := func(ts int64) {
app := h.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("a", "b"), ts, 0)
@@ -2837,9 +2837,6 @@ func TestNewWalSegmentOnTruncate(t *testing.T) {
func TestAddDuplicateLabelName(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
add := func(labels labels.Labels, labelName string) {
app := h.Appender(context.Background())
@@ -3035,9 +3032,6 @@ func TestIsolationRollback(t *testing.T) {
// Rollback after a failed append and test if the low watermark has progressed anyway.
hb, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, hb.Close())
- }()
app := hb.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 0)
@@ -3066,9 +3060,6 @@ func TestIsolationLowWatermarkMonotonous(t *testing.T) {
}
hb, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, hb.Close())
- }()
app1 := hb.Appender(context.Background())
_, err := app1.Append(0, labels.FromStrings("foo", "bar"), 0, 0)
@@ -3103,9 +3094,6 @@ func TestIsolationAppendIDZeroIsNoop(t *testing.T) {
}
h, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
h.initTime(0)
@@ -3135,9 +3123,6 @@ func TestIsolationWithoutAdd(t *testing.T) {
}
hb, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, hb.Close())
- }()
app := hb.Appender(context.Background())
require.NoError(t, app.Commit())
@@ -3257,9 +3242,6 @@ func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario, opti
func testHeadSeriesChunkRace(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
require.NoError(t, h.Init(0))
app := h.Appender(context.Background())
@@ -3292,9 +3274,6 @@ func testHeadSeriesChunkRace(t *testing.T) {
func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, head.Close())
- }()
const (
firstSeriesTimestamp int64 = 100
@@ -3353,7 +3332,6 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) {
func TestHeadLabelValuesWithMatchers(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
- t.Cleanup(func() { require.NoError(t, head.Close()) })
ctx := context.Background()
@@ -3429,9 +3407,6 @@ func TestHeadLabelValuesWithMatchers(t *testing.T) {
func TestHeadLabelNamesWithMatchers(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, head.Close())
- }()
app := head.Appender(context.Background())
for i := range 100 {
@@ -3499,9 +3474,6 @@ func TestHeadShardedPostings(t *testing.T) {
headOpts := newTestHeadDefaultOptions(1000, false)
headOpts.EnableSharding = true
head, _ := newTestHeadWithOptions(t, compression.None, headOpts)
- defer func() {
- require.NoError(t, head.Close())
- }()
ctx := context.Background()
@@ -3562,9 +3534,6 @@ func TestHeadShardedPostings(t *testing.T) {
func TestErrReuseAppender(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, head.Close())
- }()
app := head.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("test", "test"), 0, 0)
@@ -3625,8 +3594,6 @@ func TestHeadMintAfterTruncation(t *testing.T) {
require.NoError(t, head.Truncate(7500))
require.Equal(t, int64(7500), head.MinTime())
require.Equal(t, int64(7500), head.minValidTime.Load())
-
- require.NoError(t, head.Close())
}
func TestHeadExemplars(t *testing.T) {
@@ -3648,13 +3615,11 @@ func TestHeadExemplars(t *testing.T) {
})
require.NoError(t, err)
require.NoError(t, app.Commit())
- require.NoError(t, head.Close())
}
func BenchmarkHeadLabelValuesWithMatchers(b *testing.B) {
chunkRange := int64(2000)
head, _ := newTestHead(b, chunkRange, compression.None, false)
- b.Cleanup(func() { require.NoError(b, head.Close()) })
ctx := context.Background()
@@ -3838,7 +3803,7 @@ func TestDataMissingOnQueryDuringCompaction(t *testing.T) {
ref, err = app.Append(ref, labels.FromStrings("a", "b"), ts, float64(i))
require.NoError(t, err)
maxt = ts
- expSamples = append(expSamples, sample{ts, float64(i), nil, nil})
+ expSamples = append(expSamples, sample{0, ts, float64(i), nil, nil})
}
require.NoError(t, app.Commit())
@@ -3945,17 +3910,35 @@ func TestWaitForPendingReadersInTimeRange(t *testing.T) {
}
for _, c := range cases {
t.Run(fmt.Sprintf("mint=%d,maxt=%d,shouldWait=%t", c.mint, c.maxt, c.shouldWait), func(t *testing.T) {
+ // checkWaiting verifies WaitForPendingReadersInTimeRange behavior using synctest
+ // for deterministic time control. The function should block while an overlapping
+ // querier is open and return immediately when there's no overlap.
checkWaiting := func(cl io.Closer) {
- var waitOver atomic.Bool
- go func() {
- db.head.WaitForPendingReadersInTimeRange(truncMint, truncMaxt)
- waitOver.Store(true)
- }()
- <-time.After(550 * time.Millisecond)
- require.Equal(t, !c.shouldWait, waitOver.Load())
- require.NoError(t, cl.Close())
- <-time.After(550 * time.Millisecond)
- require.True(t, waitOver.Load())
+ synctest.Test(t, func(t *testing.T) {
+ var waitOver atomic.Bool
+ go func() {
+ db.head.WaitForPendingReadersInTimeRange(truncMint, truncMaxt)
+ waitOver.Store(true)
+ }()
+
+ // Wait for goroutine to either complete (no overlap) or block on Sleep (overlap).
+ synctest.Wait()
+
+ if c.shouldWait {
+ require.False(t, waitOver.Load(),
+ "WaitForPendingReadersInTimeRange should block while overlapping querier is open")
+ require.NoError(t, cl.Close())
+ // Advance fake time past the 500ms poll interval, then let goroutine process.
+ time.Sleep(time.Second)
+ synctest.Wait()
+ require.True(t, waitOver.Load(),
+ "WaitForPendingReadersInTimeRange should complete after querier is closed")
+ } else {
+ require.True(t, waitOver.Load(),
+ "WaitForPendingReadersInTimeRange should return immediately when no overlap")
+ require.NoError(t, cl.Close())
+ }
+ })
}
q, err := db.Querier(c.mint, c.maxt)
@@ -4100,9 +4083,6 @@ func TestAppendHistogram(t *testing.T) {
for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} {
t.Run(strconv.Itoa(numHistograms), func(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
- t.Cleanup(func() {
- require.NoError(t, head.Close())
- })
require.NoError(t, head.Init(0))
ingestTs := int64(0)
@@ -4205,7 +4185,8 @@ func TestAppendHistogram(t *testing.T) {
func TestHistogramInWALAndMmapChunk(t *testing.T) {
head, _ := newTestHead(t, 3000, compression.None, false)
t.Cleanup(func() {
- require.NoError(t, head.Close())
+ // Captures head by reference, so it closes the final head after restarts.
+ _ = head.Close()
})
require.NoError(t, head.Init(0))
@@ -4352,9 +4333,10 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
}
// Restart head.
+ walDir := head.wal.Dir()
require.NoError(t, head.Close())
startHead := func() {
- w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
+ w, err := wlog.NewSize(nil, nil, walDir, 32768, compression.None)
require.NoError(t, err)
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
require.NoError(t, err)
@@ -4503,17 +4485,17 @@ func TestChunkSnapshot(t *testing.T) {
// 240 samples should m-map at least 1 chunk.
for ts := int64(1); ts <= 240; ts++ {
val := rand.Float64()
- expSeries[lblStr] = append(expSeries[lblStr], sample{ts, val, nil, nil})
+ expSeries[lblStr] = append(expSeries[lblStr], sample{0, ts, val, nil, nil})
ref, err := app.Append(0, lbls, ts, val)
require.NoError(t, err)
hist := histograms[int(ts)]
- expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{ts, 0, hist, nil})
+ expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{0, ts, 0, hist, nil})
_, err = app.AppendHistogram(0, lblsHist, ts, hist, nil)
require.NoError(t, err)
floatHist := floatHistogram[int(ts)]
- expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{ts, 0, nil, floatHist})
+ expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{0, ts, 0, nil, floatHist})
_, err = app.AppendHistogram(0, lblsFloatHist, ts, nil, floatHist)
require.NoError(t, err)
@@ -4577,17 +4559,17 @@ func TestChunkSnapshot(t *testing.T) {
// 240 samples should m-map at least 1 chunk.
for ts := int64(241); ts <= 480; ts++ {
val := rand.Float64()
- expSeries[lblStr] = append(expSeries[lblStr], sample{ts, val, nil, nil})
+ expSeries[lblStr] = append(expSeries[lblStr], sample{0, ts, val, nil, nil})
ref, err := app.Append(0, lbls, ts, val)
require.NoError(t, err)
hist := histograms[int(ts)]
- expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{ts, 0, hist, nil})
+ expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{0, ts, 0, hist, nil})
_, err = app.AppendHistogram(0, lblsHist, ts, hist, nil)
require.NoError(t, err)
floatHist := floatHistogram[int(ts)]
- expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{ts, 0, nil, floatHist})
+ expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{0, ts, 0, nil, floatHist})
_, err = app.AppendHistogram(0, lblsFloatHist, ts, nil, floatHist)
require.NoError(t, err)
@@ -5680,9 +5662,6 @@ func testOOOMmapReplay(t *testing.T, scenario sampleTypeScenario) {
func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
require.NoError(t, h.Init(0))
@@ -5727,6 +5706,9 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) {
require.NoError(t, err)
h, err = NewHead(nil, nil, wal, nil, h.opts, nil)
require.NoError(t, err)
+ t.Cleanup(func() {
+ _ = h.Close()
+ })
require.NoError(t, h.Init(0))
series, created, err = h.getOrCreate(seriesLabels.Hash(), seriesLabels, false)
@@ -6367,9 +6349,6 @@ func TestCuttingNewHeadChunks(t *testing.T) {
for testName, tc := range testCases {
t.Run(testName, func(t *testing.T) {
h, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
a := h.Appender(context.Background())
@@ -6435,9 +6414,6 @@ func TestHeadDetectsDuplicateSampleAtSizeLimit(t *testing.T) {
baseTS := int64(1695209650)
h, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
a := h.Appender(context.Background())
var err error
@@ -6502,9 +6478,6 @@ func TestWALSampleAndExemplarOrder(t *testing.T) {
for testName, tc := range testcases {
t.Run(testName, func(t *testing.T) {
h, w := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
app := h.Appender(context.Background())
ref, err := tc.appendF(app, 10)
@@ -6552,7 +6525,6 @@ func TestHeadCompactionWhileAppendAndCommitExemplar(t *testing.T) {
require.NoError(t, err)
h.Truncate(10)
app.Commit()
- h.Close()
}
func labelsWithHashCollision() (labels.Labels, labels.Labels) {
@@ -6614,7 +6586,7 @@ func TestStripeSeries_gc(t *testing.T) {
s, ms1, ms2 := stripeSeriesWithCollidingSeries(t)
hash := ms1.lset.Hash()
- s.gc(0, 0, nil)
+ s.gc(0, 0)
// Verify that we can get neither ms1 nor ms2 after gc-ing corresponding series
got := s.getByHash(hash, ms1.lset)
@@ -6648,7 +6620,6 @@ func TestPostingsCardinalityStats(t *testing.T) {
func TestHeadAppender_AppendFloatWithSameTimestampAsPreviousHistogram(t *testing.T) {
head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
- t.Cleanup(func() { head.Close() })
ls := labels.FromStrings(labels.MetricName, "test")
@@ -6872,9 +6843,6 @@ func TestHeadAppender_AppendST(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
h, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
a := h.Appender(context.Background())
lbls := labels.FromStrings("foo", "bar")
for _, sample := range tc.appendableSamples {
@@ -6950,10 +6918,6 @@ func TestHeadAppender_AppendHistogramSTZeroSample(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
h, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
- defer func() {
- require.NoError(t, h.Close())
- }()
-
lbls := labels.FromStrings("foo", "bar")
var ref storage.SeriesRef
@@ -6979,9 +6943,6 @@ func TestHeadCompactableDoesNotCompactEmptyHead(t *testing.T) {
// would return true which is incorrect. This test verifies that we short-circuit
// the check when the head has not yet had any samples added.
head, _ := newTestHead(t, 1, compression.None, false)
- defer func() {
- require.NoError(t, head.Close())
- }()
require.False(t, head.compactable())
}
@@ -7021,9 +6982,6 @@ func TestHeadAppendHistogramAndCommitConcurrency(t *testing.T) {
func testHeadAppendHistogramAndCommitConcurrency(t *testing.T, appendFn func(storage.Appender, int) error) {
head, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, head.Close())
- }()
wg := sync.WaitGroup{}
wg.Add(2)
@@ -7057,7 +7015,8 @@ func testHeadAppendHistogramAndCommitConcurrency(t *testing.T, appendFn func(sto
func TestHead_NumStaleSeries(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
t.Cleanup(func() {
- require.NoError(t, head.Close())
+ // Captures head by reference, so it closes the final head after restarts.
+ _ = head.Close()
})
require.NoError(t, head.Init(0))
@@ -7228,9 +7187,6 @@ func TestHistogramStalenessConversionMetrics(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
- defer func() {
- require.NoError(t, head.Close())
- }()
lbls := labels.FromStrings("name", tc.name)
diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go
index bbcad9d855..0581b9306e 100644
--- a/tsdb/head_wal.go
+++ b/tsdb/head_wal.go
@@ -37,7 +37,6 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/encoding"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
@@ -308,7 +307,21 @@ Outer:
}
h.wlReplaySamplesPool.Put(v)
case []tombstones.Stone:
+ // Tombstone records will be fairly rare, so not trying to optimise the allocations here.
+ deleteSeriesShards := make([][]chunks.HeadSeriesRef, concurrency)
for _, s := range v {
+ if len(s.Intervals) == 1 && s.Intervals[0].Mint == math.MinInt64 && s.Intervals[0].Maxt == math.MaxInt64 {
+ // This series was fully deleted at this point. This record is only done for stale series at the moment.
+ mod := uint64(s.Ref) % uint64(concurrency)
+ deleteSeriesShards[mod] = append(deleteSeriesShards[mod], chunks.HeadSeriesRef(s.Ref))
+
+ // If the series is with a different reference, try deleting that.
+ if r, ok := multiRef[chunks.HeadSeriesRef(s.Ref)]; ok {
+ mod := uint64(r) % uint64(concurrency)
+ deleteSeriesShards[mod] = append(deleteSeriesShards[mod], r)
+ }
+ continue
+ }
for _, itv := range s.Intervals {
if itv.Maxt < h.minValidTime.Load() {
continue
@@ -326,6 +339,14 @@ Outer:
h.tombstones.AddInterval(s.Ref, itv)
}
}
+
+ for i := range concurrency {
+ if len(deleteSeriesShards[i]) > 0 {
+ processors[i].input <- walSubsetProcessorInputItem{deletedSeriesRefs: deleteSeriesShards[i]}
+ deleteSeriesShards[i] = nil
+ }
+ }
+
h.wlReplaytStonesPool.Put(v)
case []record.RefExemplar:
for _, e := range v {
@@ -558,10 +579,11 @@ type walSubsetProcessor struct {
}
type walSubsetProcessorInputItem struct {
- samples []record.RefSample
- histogramSamples []histogramRecord
- existingSeries *memSeries
- walSeriesRef chunks.HeadSeriesRef
+ samples []record.RefSample
+ histogramSamples []histogramRecord
+ existingSeries *memSeries
+ walSeriesRef chunks.HeadSeriesRef
+ deletedSeriesRefs []chunks.HeadSeriesRef
}
func (wp *walSubsetProcessor) setup() {
@@ -712,6 +734,10 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
case wp.histogramsOutput <- in.histogramSamples:
default:
}
+
+ if len(in.deletedSeriesRefs) > 0 {
+ h.deleteSeriesByID(in.deletedSeriesRefs)
+ }
}
h.updateMinMaxTime(mint, maxt)
@@ -1509,7 +1535,7 @@ func DeleteChunkSnapshots(dir string, maxIndex, maxOffset int) error {
return err
}
- errs := tsdb_errors.NewMulti()
+ var errs []error
for _, fi := range files {
if !strings.HasPrefix(fi.Name(), chunkSnapshotPrefix) {
continue
@@ -1532,11 +1558,11 @@ func DeleteChunkSnapshots(dir string, maxIndex, maxOffset int) error {
if idx < maxIndex || (idx == maxIndex && offset < maxOffset) {
if err := os.RemoveAll(filepath.Join(dir, fi.Name())); err != nil {
- errs.Add(err)
+ errs = append(errs, err)
}
}
}
- return errs.Err()
+ return errors.Join(errs...)
}
// loadChunkSnapshot replays the chunk snapshot and restores the Head state from it. If there was any error returned,
@@ -1724,14 +1750,14 @@ Outer:
}
close(errChan)
- merr := tsdb_errors.NewMulti()
+ var errs []error
if loopErr != nil {
- merr.Add(fmt.Errorf("decode loop: %w", loopErr))
+ errs = append(errs, fmt.Errorf("decode loop: %w", loopErr))
}
for err := range errChan {
- merr.Add(fmt.Errorf("record processing: %w", err))
+ errs = append(errs, fmt.Errorf("record processing: %w", err))
}
- if err := merr.Err(); err != nil {
+ if err := errors.Join(errs...); err != nil {
return -1, -1, nil, err
}
diff --git a/tsdb/index/index.go b/tsdb/index/index.go
index 493264b87f..9b907bb7a7 100644
--- a/tsdb/index/index.go
+++ b/tsdb/index/index.go
@@ -101,6 +101,9 @@ var ErrPostingsOffsetTableTooLarge = errors.New("length size exceeds 4 bytes")
// ErrIndexExceeds64GiB is returned when the index file would exceed the 64GiB limit.
var ErrIndexExceeds64GiB = errors.New("exceeding max size of 64GiB")
+// ErrSymbolTableTooLarge is returned when the symbol table size exceeds 4 bytes (4GiB limit).
+var ErrSymbolTableTooLarge = fmt.Errorf("symbol table size exceeds %d bytes", uint32(math.MaxUint32))
+
// The table gets initialized with sync.Once but may still cause a race
// with any other use of the crc32 package anywhere. Thus we initialize it
// before.
@@ -550,7 +553,7 @@ func (w *Writer) finishSymbols() error {
symbolTableSize := w.f.pos - w.toc.Symbols - 4
// The symbol table's part is 4 bytes. So the total symbol table size must be less than or equal to 2^32-1
if symbolTableSize > math.MaxUint32 {
- return fmt.Errorf("symbol table size exceeds %d bytes: %d", uint32(math.MaxUint32), symbolTableSize)
+ return fmt.Errorf("%w: %d", ErrSymbolTableTooLarge, symbolTableSize)
}
// Write out the length and symbol count.
diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go
index 31b93f850d..c0bf213c45 100644
--- a/tsdb/index/postings.go
+++ b/tsdb/index/postings.go
@@ -956,7 +956,7 @@ func FindIntersectingPostings(p Postings, candidates []Postings) (indexes []int,
}
if p.At() == h.at() {
indexes = append(indexes, h.popIndex())
- } else if err := h.next(); err != nil {
+ } else if err := h.seekHead(p.At()); err != nil {
return nil, err
}
}
@@ -999,20 +999,18 @@ func (h *postingsWithIndexHeap) popIndex() int {
// at provides the storage.SeriesRef where root Postings is pointing at this moment.
func (h postingsWithIndexHeap) at() storage.SeriesRef { return h[0].p.At() }
-// next performs the Postings.Next() operation on the root of the heap, performing the related operation on the heap
-// and conveniently returning the result of calling Postings.Err() if the result of calling Next() was false.
-// If Next() succeeds, heap is fixed to move the root to its new position, according to its Postings.At() value.
-// If Next() returns fails and there's no error reported by Postings.Err(), then root is marked as removed and heap is fixed.
-func (h *postingsWithIndexHeap) next() error {
+// seekHead performs the Postings.Seek() operation on the root of the heap.
+// If the root is exhausted or fails, it is removed from the heap.
+func (h *postingsWithIndexHeap) seekHead(val storage.SeriesRef) error {
pi := (*h)[0]
- next := pi.p.Next()
+ next := pi.p.Seek(val)
if next {
heap.Fix(h, 0)
return nil
}
if err := pi.p.Err(); err != nil {
- return fmt.Errorf("postings %d: %w", pi.index, err)
+ return fmt.Errorf("seek postings %d: %w", pi.index, err)
}
h.popIndex()
return nil
diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go
index 77b43f76ab..5c67a2da6d 100644
--- a/tsdb/index/postings_test.go
+++ b/tsdb/index/postings_test.go
@@ -1192,7 +1192,7 @@ func (p *postingsFailingAfterNthCall) Err() error {
}
func TestPostingsWithIndexHeap(t *testing.T) {
- t.Run("iterate", func(t *testing.T) {
+ t.Run("seekHead", func(t *testing.T) {
h := postingsWithIndexHeap{
{index: 0, p: NewListPostings([]storage.SeriesRef{10, 20, 30})},
{index: 1, p: NewListPostings([]storage.SeriesRef{1, 5})},
@@ -1205,7 +1205,7 @@ func TestPostingsWithIndexHeap(t *testing.T) {
for _, expected := range []storage.SeriesRef{1, 5, 10, 20, 25, 30, 50} {
require.Equal(t, expected, h.at())
- require.NoError(t, h.next())
+ require.NoError(t, h.seekHead(h.at()+1))
}
require.True(t, h.empty())
})
@@ -1223,7 +1223,7 @@ func TestPostingsWithIndexHeap(t *testing.T) {
for _, expected := range []storage.SeriesRef{1, 5, 10, 20} {
require.Equal(t, expected, h.at())
- require.NoError(t, h.next())
+ require.NoError(t, h.seekHead(h.at()+1))
}
require.Equal(t, storage.SeriesRef(25), h.at())
node := heap.Pop(&h).(postingsWithIndex)
diff --git a/tsdb/label_values_bench_test.go b/tsdb/label_values_bench_test.go
new file mode 100644
index 0000000000..1e55cf80c0
--- /dev/null
+++ b/tsdb/label_values_bench_test.go
@@ -0,0 +1,86 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+ "context"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/tsdb/wlog"
+)
+
+// BenchmarkLabelValues_SlowPath benchmarks the performance of LabelValues when the matcher
+// is far ahead of the candidate posting list. This reproduces the performance regression
+// described in #14551 where dense candidates caused O(N) iteration instead of O(log N) seeking.
+func BenchmarkLabelValues_SlowPath(b *testing.B) {
+ // Create a head with some data.
+ opts := DefaultHeadOptions()
+ opts.ChunkDirRoot = b.TempDir()
+ h, err := NewHead(nil, nil, nil, nil, opts, nil)
+ require.NoError(b, err)
+ defer h.Close()
+
+ app := h.Appender(context.Background())
+ // 1. Create a large number of series for a "candidate" label (e.g. "job").
+ // We want these to NOT match the target matcher, but be candidates for a different label.
+ // We use "job=api" and "instance=..."
+ // We want the interaction to be:
+ // LabelValues("instance", "job"="api")
+ // "job"="api" will have 1 series at the END.
+ // "instance" will have 100k series.
+
+ // Actually, let's stick to the reproduction case:
+ // distinct values for "val1".
+ // "b"="1" matcher.
+
+ // Create 100k series with the same label value ("common") but without the matcher label.
+ // This results in a single large posting list for that value, simulating a dense candidate.
+ for i := range 100000 {
+ _, err := app.Append(0, labels.FromStrings("val1", "common", "extra", strconv.Itoa(i)), time.Now().UnixMilli(), 1)
+ require.NoError(b, err)
+ }
+
+ // Create 1 series that matches the label "b=1", with a series ID greater than all previous ones.
+ // This forces the intersection to skip over all 100k previous candidates.
+ _, err = app.Append(0, labels.FromStrings("val1", "common", "b", "1"), time.Now().UnixMilli(), 1)
+ require.NoError(b, err)
+
+ require.NoError(b, app.Commit())
+
+ ctx := context.Background()
+ matcher := labels.MustNewMatcher(labels.MatchEqual, "b", "1")
+
+ // Use the correct method to access label values.
+ idx, err := h.Index()
+ require.NoError(b, err)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for b.Loop() {
+ // "val1"="common" has 100k+1 postings.
+ // "b=1" has 1 posting (the last one).
+ vals, err := idx.LabelValues(ctx, "val1", nil, matcher)
+ require.NoError(b, err)
+ require.Equal(b, []string{"common"}, vals)
+ }
+}
+
+// Ensure wlog/wal needed for NewHead.
+var _ = wlog.WL{}
diff --git a/tsdb/ooo_head.go b/tsdb/ooo_head.go
index c6ae924372..f9746c4c61 100644
--- a/tsdb/ooo_head.go
+++ b/tsdb/ooo_head.go
@@ -40,7 +40,8 @@ func (o *OOOChunk) Insert(t int64, v float64, h *histogram.Histogram, fh *histog
// try to append at the end first if the new timestamp is higher than the
// last known timestamp.
if len(o.samples) == 0 || t > o.samples[len(o.samples)-1].t {
- o.samples = append(o.samples, sample{t, v, h, fh})
+ // TODO(krajorama): pass ST.
+ o.samples = append(o.samples, sample{0, t, v, h, fh})
return true
}
@@ -49,7 +50,8 @@ func (o *OOOChunk) Insert(t int64, v float64, h *histogram.Histogram, fh *histog
if i >= len(o.samples) {
// none found. append it at the end
- o.samples = append(o.samples, sample{t, v, h, fh})
+ // TODO(krajorama): pass ST.
+ o.samples = append(o.samples, sample{0, t, v, h, fh})
return true
}
@@ -61,7 +63,8 @@ func (o *OOOChunk) Insert(t int64, v float64, h *histogram.Histogram, fh *histog
// Expand length by 1 to make room. use a zero sample, we will overwrite it anyway.
o.samples = append(o.samples, sample{})
copy(o.samples[i+1:], o.samples[i:])
- o.samples[i] = sample{t, v, h, fh}
+ // TODO(krajorama): pass ST.
+ o.samples[i] = sample{0, t, v, h, fh}
return true
}
@@ -125,7 +128,8 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error
}
switch encoding {
case chunkenc.EncXOR:
- app.Append(s.t, s.f)
+ // TODO(krajorama): pass ST.
+ app.Append(0, s.t, s.f)
case chunkenc.EncHistogram:
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
prevHApp, _ := prevApp.(*chunkenc.HistogramAppender)
@@ -133,7 +137,8 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error
newChunk chunkenc.Chunk
recoded bool
)
- newChunk, recoded, app, _ = app.AppendHistogram(prevHApp, s.t, s.h, false)
+ // TODO(krajorama): pass ST.
+ newChunk, recoded, app, _ = app.AppendHistogram(prevHApp, 0, s.t, s.h, false)
if newChunk != nil { // A new chunk was allocated.
if !recoded {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
@@ -148,7 +153,8 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error
newChunk chunkenc.Chunk
recoded bool
)
- newChunk, recoded, app, _ = app.AppendFloatHistogram(prevHApp, s.t, s.fh, false)
+ // TODO(krajorama): pass ST.
+ newChunk, recoded, app, _ = app.AppendFloatHistogram(prevHApp, 0, s.t, s.fh, false)
if newChunk != nil { // A new chunk was allocated.
if !recoded {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go
index 4ecaa51fec..f58ee3aada 100644
--- a/tsdb/ooo_head_read_test.go
+++ b/tsdb/ooo_head_read_test.go
@@ -301,9 +301,6 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
for _, headChunk := range []bool{false, true} {
t.Run(fmt.Sprintf("name=%s, permutation=%d, headChunk=%t", tc.name, perm, headChunk), func(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, true)
- defer func() {
- require.NoError(t, h.Close())
- }()
require.NoError(t, h.Init(0))
s1, _, _ := h.getOrCreate(s1ID, s1Lset, false)
@@ -389,7 +386,6 @@ func TestOOOHeadChunkReader_LabelValues(t *testing.T) {
func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenario) {
chunkRange := int64(2000)
head, _ := newTestHead(t, chunkRange, compression.None, true)
- t.Cleanup(func() { require.NoError(t, head.Close()) })
ctx := context.Background()
diff --git a/tsdb/querier.go b/tsdb/querier.go
index 4a487aa568..ac7a14e1b3 100644
--- a/tsdb/querier.go
+++ b/tsdb/querier.go
@@ -27,7 +27,6 @@ import (
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/util/annotations"
@@ -92,13 +91,13 @@ func (q *blockBaseQuerier) Close() error {
return errors.New("block querier already closed")
}
- errs := tsdb_errors.NewMulti(
+ errs := []error{
q.index.Close(),
q.chunks.Close(),
q.tombstones.Close(),
- )
+ }
q.closed = true
- return errs.Err()
+ return errors.Join(errs...)
}
type blockQuerier struct {
@@ -788,6 +787,11 @@ func (p *populateWithDelSeriesIterator) AtT() int64 {
return p.curr.AtT()
}
+// AtST TODO(krajorama): test AtST() when chunks support it.
+func (p *populateWithDelSeriesIterator) AtST() int64 {
+ return p.curr.AtST()
+}
+
func (p *populateWithDelSeriesIterator) Err() error {
if err := p.populateWithDelGenericSeriesIterator.Err(); err != nil {
return err
@@ -862,6 +866,7 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
// populateCurrForSingleChunk sets the fields within p.currMetaWithChunk. This
// should be called if the samples in p.currDelIter only form one chunk.
+// TODO(krajorama): test ST when chunks support it.
func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool {
valueType := p.currDelIter.Next()
if valueType == chunkenc.ValNone {
@@ -877,7 +882,7 @@ func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool {
var (
newChunk chunkenc.Chunk
app chunkenc.Appender
- t int64
+ st, t int64
err error
)
switch valueType {
@@ -893,7 +898,8 @@ func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool {
}
var h *histogram.Histogram
t, h = p.currDelIter.AtHistogram(nil)
- _, _, app, err = app.AppendHistogram(nil, t, h, true)
+ st = p.currDelIter.AtST()
+ _, _, app, err = app.AppendHistogram(nil, st, t, h, true)
if err != nil {
break
}
@@ -910,7 +916,8 @@ func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool {
}
var v float64
t, v = p.currDelIter.At()
- app.Append(t, v)
+ st = p.currDelIter.AtST()
+ app.Append(st, t, v)
}
case chunkenc.ValFloatHistogram:
newChunk = chunkenc.NewFloatHistogramChunk()
@@ -924,7 +931,8 @@ func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool {
}
var h *histogram.FloatHistogram
t, h = p.currDelIter.AtFloatHistogram(nil)
- _, _, app, err = app.AppendFloatHistogram(nil, t, h, true)
+ st = p.currDelIter.AtST()
+ _, _, app, err = app.AppendFloatHistogram(nil, st, t, h, true)
if err != nil {
break
}
@@ -950,6 +958,7 @@ func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool {
// populateChunksFromIterable reads the samples from currDelIter to create
// chunks for chunksFromIterable. It also sets p.currMetaWithChunk to the first
// chunk.
+// TODO(krajorama): test ST when chunks support it.
func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool {
p.chunksFromIterable = p.chunksFromIterable[:0]
p.chunksFromIterableIdx = -1
@@ -965,7 +974,7 @@ func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool {
var (
// t is the timestamp for the current sample.
- t int64
+ st, t int64
cmint int64
cmaxt int64
@@ -1004,23 +1013,26 @@ func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool {
{
var v float64
t, v = p.currDelIter.At()
- app.Append(t, v)
+ st = p.currDelIter.AtST()
+ app.Append(st, t, v)
}
case chunkenc.ValHistogram:
{
var v *histogram.Histogram
t, v = p.currDelIter.AtHistogram(nil)
+ st = p.currDelIter.AtST()
// No need to set prevApp as AppendHistogram will set the
// counter reset header for the appender that's returned.
- newChunk, recoded, app, err = app.AppendHistogram(nil, t, v, false)
+ newChunk, recoded, app, err = app.AppendHistogram(nil, st, t, v, false)
}
case chunkenc.ValFloatHistogram:
{
var v *histogram.FloatHistogram
t, v = p.currDelIter.AtFloatHistogram(nil)
+ st = p.currDelIter.AtST()
// No need to set prevApp as AppendHistogram will set the
// counter reset header for the appender that's returned.
- newChunk, recoded, app, err = app.AppendFloatHistogram(nil, t, v, false)
+ newChunk, recoded, app, err = app.AppendFloatHistogram(nil, st, t, v, false)
}
}
@@ -1202,6 +1214,11 @@ func (it *DeletedIterator) AtT() int64 {
return it.Iter.AtT()
}
+// AtST TODO(krajorama): test AtST() when chunks support it.
+func (it *DeletedIterator) AtST() int64 {
+ return it.Iter.AtST()
+}
+
func (it *DeletedIterator) Seek(t int64) chunkenc.ValueType {
if it.Iter.Err() != nil {
return chunkenc.ValNone
diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go
index 6933aa617a..4387635959 100644
--- a/tsdb/querier_test.go
+++ b/tsdb/querier_test.go
@@ -141,7 +141,7 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe
app, _ := chunk.Appender()
for _, smpl := range chk {
require.NotNil(t, smpl.fh, "chunk can only contain one type of sample")
- _, _, _, err := app.AppendFloatHistogram(nil, smpl.t, smpl.fh, true)
+ _, _, _, err := app.AppendFloatHistogram(nil, 0, smpl.t, smpl.fh, true)
require.NoError(t, err, "chunk should be appendable")
}
chkReader[chunkRef] = chunk
@@ -150,7 +150,7 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe
app, _ := chunk.Appender()
for _, smpl := range chk {
require.NotNil(t, smpl.h, "chunk can only contain one type of sample")
- _, _, _, err := app.AppendHistogram(nil, smpl.t, smpl.h, true)
+ _, _, _, err := app.AppendHistogram(nil, 0, smpl.t, smpl.h, true)
require.NoError(t, err, "chunk should be appendable")
}
chkReader[chunkRef] = chunk
@@ -160,7 +160,7 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe
for _, smpl := range chk {
require.Nil(t, smpl.h, "chunk can only contain one type of sample")
require.Nil(t, smpl.fh, "chunk can only contain one type of sample")
- app.Append(smpl.t, smpl.f)
+ app.Append(0, smpl.t, smpl.f)
}
chkReader[chunkRef] = chunk
}
@@ -318,24 +318,24 @@ func TestBlockQuerier(t *testing.T) {
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
+ []chunks.Sample{sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 4, nil, nil}, sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}, sample{0, 7, 4, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 1, 1, nil, nil}, sample{0, 2, 2, nil, nil}, sample{0, 3, 3, nil, nil}, sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("b", "b"),
- []chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
+ []chunks.Sample{sample{0, 1, 3, nil, nil}, sample{0, 2, 2, nil, nil}, sample{0, 3, 6, nil, nil}, sample{0, 5, 1, nil, nil}, sample{0, 6, 7, nil, nil}, sample{0, 7, 2, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
+ []chunks.Sample{sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 4, nil, nil}}, []chunks.Sample{sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}, sample{0, 7, 4, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 1, 1, nil, nil}, sample{0, 2, 2, nil, nil}, sample{0, 3, 3, nil, nil}}, []chunks.Sample{sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
- []chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []chunks.Sample{sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
+ []chunks.Sample{sample{0, 1, 3, nil, nil}, sample{0, 2, 2, nil, nil}, sample{0, 3, 6, nil, nil}}, []chunks.Sample{sample{0, 5, 1, nil, nil}, sample{0, 6, 7, nil, nil}, sample{0, 7, 2, nil, nil}},
),
}),
},
@@ -345,18 +345,18 @@ func TestBlockQuerier(t *testing.T) {
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
+ []chunks.Sample{sample{0, 2, 3, nil, nil}, sample{0, 3, 4, nil, nil}, sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 2, 2, nil, nil}, sample{0, 3, 3, nil, nil}, sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
+ []chunks.Sample{sample{0, 2, 3, nil, nil}, sample{0, 3, 4, nil, nil}}, []chunks.Sample{sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 2, 2, nil, nil}, sample{0, 3, 3, nil, nil}}, []chunks.Sample{sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
}),
},
@@ -369,20 +369,20 @@ func TestBlockQuerier(t *testing.T) {
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
+ []chunks.Sample{sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 4, nil, nil}, sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}, sample{0, 7, 4, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 1, 1, nil, nil}, sample{0, 2, 2, nil, nil}, sample{0, 3, 3, nil, nil}, sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}},
- []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
+ []chunks.Sample{sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 4, nil, nil}},
+ []chunks.Sample{sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}, sample{0, 7, 4, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
- []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 1, 1, nil, nil}, sample{0, 2, 2, nil, nil}, sample{0, 3, 3, nil, nil}},
+ []chunks.Sample{sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
}),
},
@@ -395,18 +395,18 @@ func TestBlockQuerier(t *testing.T) {
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
+ []chunks.Sample{sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}, sample{0, 7, 4, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
+ []chunks.Sample{sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}, sample{0, 7, 4, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
}),
},
@@ -454,24 +454,24 @@ func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) {
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
+ []chunks.Sample{sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 4, nil, nil}, sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}, sample{0, 7, 4, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 1, 1, nil, nil}, sample{0, 2, 2, nil, nil}, sample{0, 3, 3, nil, nil}, sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("b", "b"),
- []chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
+ []chunks.Sample{sample{0, 1, 3, nil, nil}, sample{0, 2, 2, nil, nil}, sample{0, 3, 6, nil, nil}, sample{0, 5, 1, nil, nil}, sample{0, 6, 7, nil, nil}, sample{0, 7, 2, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
+ []chunks.Sample{sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 4, nil, nil}, sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}, sample{0, 7, 4, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 1, 1, nil, nil}, sample{0, 2, 2, nil, nil}, sample{0, 3, 3, nil, nil}, sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
- []chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
+ []chunks.Sample{sample{0, 1, 3, nil, nil}, sample{0, 2, 2, nil, nil}, sample{0, 3, 6, nil, nil}, sample{0, 5, 1, nil, nil}, sample{0, 6, 7, nil, nil}, sample{0, 7, 2, nil, nil}},
),
}),
},
@@ -481,18 +481,18 @@ func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) {
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
+ []chunks.Sample{sample{0, 2, 3, nil, nil}, sample{0, 3, 4, nil, nil}, sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 2, 2, nil, nil}, sample{0, 3, 3, nil, nil}, sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
+ []chunks.Sample{sample{0, 2, 3, nil, nil}, sample{0, 3, 4, nil, nil}, sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 2, 2, nil, nil}, sample{0, 3, 3, nil, nil}, sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
}),
},
@@ -537,18 +537,18 @@ func TestBlockQuerier_TrimmingDoesNotModifyOriginalTombstoneIntervals(t *testing
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
+ []chunks.Sample{sample{0, 3, 4, nil, nil}, sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 3, 3, nil, nil}, sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
+ []chunks.Sample{sample{0, 3, 4, nil, nil}}, []chunks.Sample{sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
+ []chunks.Sample{sample{0, 3, 3, nil, nil}}, []chunks.Sample{sample{0, 5, 3, nil, nil}, sample{0, 6, 6, nil, nil}},
),
}),
}
@@ -574,22 +574,22 @@ var testData = []seriesSamples{
{
lset: map[string]string{"a": "a"},
chunks: [][]sample{
- {{1, 2, nil, nil}, {2, 3, nil, nil}, {3, 4, nil, nil}},
- {{5, 2, nil, nil}, {6, 3, nil, nil}, {7, 4, nil, nil}},
+ {{0, 1, 2, nil, nil}, {0, 2, 3, nil, nil}, {0, 3, 4, nil, nil}},
+ {{0, 5, 2, nil, nil}, {0, 6, 3, nil, nil}, {0, 7, 4, nil, nil}},
},
},
{
lset: map[string]string{"a": "a", "b": "b"},
chunks: [][]sample{
- {{1, 1, nil, nil}, {2, 2, nil, nil}, {3, 3, nil, nil}},
- {{5, 3, nil, nil}, {6, 6, nil, nil}},
+ {{0, 1, 1, nil, nil}, {0, 2, 2, nil, nil}, {0, 3, 3, nil, nil}},
+ {{0, 5, 3, nil, nil}, {0, 6, 6, nil, nil}},
},
},
{
lset: map[string]string{"b": "b"},
chunks: [][]sample{
- {{1, 3, nil, nil}, {2, 2, nil, nil}, {3, 6, nil, nil}},
- {{5, 1, nil, nil}, {6, 7, nil, nil}, {7, 2, nil, nil}},
+ {{0, 1, 3, nil, nil}, {0, 2, 2, nil, nil}, {0, 3, 6, nil, nil}},
+ {{0, 5, 1, nil, nil}, {0, 6, 7, nil, nil}, {0, 7, 2, nil, nil}},
},
},
}
@@ -636,24 +636,24 @@ func TestBlockQuerierDelete(t *testing.T) {
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
+ []chunks.Sample{sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}, sample{0, 7, 4, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{5, 3, nil, nil}},
+ []chunks.Sample{sample{0, 5, 3, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("b", "b"),
- []chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}},
+ []chunks.Sample{sample{0, 1, 3, nil, nil}, sample{0, 2, 2, nil, nil}, sample{0, 3, 6, nil, nil}, sample{0, 5, 1, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
+ []chunks.Sample{sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}, sample{0, 7, 4, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{5, 3, nil, nil}},
+ []chunks.Sample{sample{0, 5, 3, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
- []chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []chunks.Sample{sample{5, 1, nil, nil}},
+ []chunks.Sample{sample{0, 1, 3, nil, nil}, sample{0, 2, 2, nil, nil}, sample{0, 3, 6, nil, nil}}, []chunks.Sample{sample{0, 5, 1, nil, nil}},
),
}),
},
@@ -663,18 +663,18 @@ func TestBlockQuerierDelete(t *testing.T) {
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
+ []chunks.Sample{sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{5, 3, nil, nil}},
+ []chunks.Sample{sample{0, 5, 3, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
- []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
+ []chunks.Sample{sample{0, 5, 2, nil, nil}, sample{0, 6, 3, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
- []chunks.Sample{sample{5, 3, nil, nil}},
+ []chunks.Sample{sample{0, 5, 3, nil, nil}},
),
}),
},
@@ -790,6 +790,10 @@ func (it *mockSampleIterator) AtT() int64 {
return it.s[it.idx].T()
}
+func (it *mockSampleIterator) AtST() int64 {
+ return it.s[it.idx].ST()
+}
+
func (it *mockSampleIterator) Next() chunkenc.ValueType {
if it.idx < len(it.s)-1 {
it.idx++
@@ -871,15 +875,15 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
{
name: "one chunk",
samples: [][]chunks.Sample{
- {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
+ {sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}},
},
expected: []chunks.Sample{
- sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
+ sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
+ sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
@@ -887,19 +891,19 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
{
name: "two full chunks",
samples: [][]chunks.Sample{
- {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
- {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
+ {sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}},
+ {sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil}},
},
expected: []chunks.Sample{
- sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
+ sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}, sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
+ sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
+ sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}},
@@ -907,23 +911,23 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
{
name: "three full chunks",
samples: [][]chunks.Sample{
- {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
- {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
- {sample{10, 22, nil, nil}, sample{203, 3493, nil, nil}},
+ {sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}},
+ {sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil}},
+ {sample{0, 10, 22, nil, nil}, sample{0, 203, 3493, nil, nil}},
},
expected: []chunks.Sample{
- sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, sample{10, 22, nil, nil}, sample{203, 3493, nil, nil},
+ sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}, sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil}, sample{0, 10, 22, nil, nil}, sample{0, 203, 3493, nil, nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
+ sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
+ sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{10, 22, nil, nil}, sample{203, 3493, nil, nil},
+ sample{0, 10, 22, nil, nil}, sample{0, 203, 3493, nil, nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}, {10, 203}},
@@ -939,8 +943,8 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
{
name: "two chunks and seek beyond chunks",
samples: [][]chunks.Sample{
- {sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
- {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
+ {sample{0, 1, 2, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}},
+ {sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil}},
},
seek: 10,
@@ -949,27 +953,27 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
{
name: "two chunks and seek on middle of first chunk",
samples: [][]chunks.Sample{
- {sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
- {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
+ {sample{0, 1, 2, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}},
+ {sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil}},
},
seek: 2,
seekSuccess: true,
expected: []chunks.Sample{
- sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
+ sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}, sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil},
},
},
{
name: "two chunks and seek before first chunk",
samples: [][]chunks.Sample{
- {sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
- {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
+ {sample{0, 1, 2, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}},
+ {sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil}},
},
seek: -32,
seekSuccess: true,
expected: []chunks.Sample{
- sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
+ sample{0, 1, 2, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}, sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil},
},
},
// Deletion / Trim cases.
@@ -981,20 +985,20 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
{
name: "two chunks with trimmed first and last samples from edge chunks",
samples: [][]chunks.Sample{
- {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
- {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
+ {sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}},
+ {sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil}},
},
intervals: tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 9, Maxt: math.MaxInt64}),
expected: []chunks.Sample{
- sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil},
+ sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}, sample{0, 7, 89, nil, nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
+ sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{7, 89, nil, nil},
+ sample{0, 7, 89, nil, nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{3, 6}, {7, 7}},
@@ -1002,20 +1006,20 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
{
name: "two chunks with trimmed middle sample of first chunk",
samples: [][]chunks.Sample{
- {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
- {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
+ {sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}},
+ {sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil}},
},
intervals: tombstones.Intervals{{Mint: 2, Maxt: 3}},
expected: []chunks.Sample{
- sample{1, 2, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
+ sample{0, 1, 2, nil, nil}, sample{0, 6, 1, nil, nil}, sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 2, nil, nil}, sample{6, 1, nil, nil},
+ sample{0, 1, 2, nil, nil}, sample{0, 6, 1, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
+ sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}},
@@ -1023,20 +1027,20 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
{
name: "two chunks with deletion across two chunks",
samples: [][]chunks.Sample{
- {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
- {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
+ {sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}},
+ {sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil}},
},
intervals: tombstones.Intervals{{Mint: 6, Maxt: 7}},
expected: []chunks.Sample{
- sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{9, 8, nil, nil},
+ sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 9, 8, nil, nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil},
+ sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{9, 8, nil, nil},
+ sample{0, 9, 8, nil, nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 3}, {9, 9}},
@@ -1044,17 +1048,17 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
{
name: "two chunks with first chunk deleted",
samples: [][]chunks.Sample{
- {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
- {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
+ {sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}},
+ {sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil}},
},
intervals: tombstones.Intervals{{Mint: 1, Maxt: 6}},
expected: []chunks.Sample{
- sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
+ sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
+ sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{7, 9}},
@@ -1063,22 +1067,22 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
{
name: "two chunks with trimmed first and last samples from edge chunks, seek from middle of first chunk",
samples: [][]chunks.Sample{
- {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
- {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
+ {sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}},
+ {sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil}},
},
intervals: tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 9, Maxt: math.MaxInt64}),
seek: 3,
seekSuccess: true,
expected: []chunks.Sample{
- sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil},
+ sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}, sample{0, 7, 89, nil, nil},
},
},
{
name: "one chunk where all samples are trimmed",
samples: [][]chunks.Sample{
- {sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
- {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
+ {sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}},
+ {sample{0, 7, 89, nil, nil}, sample{0, 9, 8, nil, nil}},
},
intervals: tombstones.Intervals{{Mint: math.MinInt64, Maxt: 3}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64}),
@@ -1089,24 +1093,24 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "one histogram chunk",
samples: [][]chunks.Sample{
{
- sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
- sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
- sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil},
- sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.GenerateTestHistogram(2), nil},
+ sample{0, 3, 0, tsdbutil.GenerateTestHistogram(3), nil},
+ sample{0, 6, 0, tsdbutil.GenerateTestHistogram(6), nil},
},
},
expected: []chunks.Sample{
- sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
- sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
- sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
- sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
+ sample{0, 3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
+ sample{0, 6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
- sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
- sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
- sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
+ sample{0, 3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
+ sample{0, 6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
@@ -1115,21 +1119,21 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "one histogram chunk intersect with earlier deletion interval",
samples: [][]chunks.Sample{
{
- sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
- sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
- sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil},
- sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.GenerateTestHistogram(2), nil},
+ sample{0, 3, 0, tsdbutil.GenerateTestHistogram(3), nil},
+ sample{0, 6, 0, tsdbutil.GenerateTestHistogram(6), nil},
},
},
intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}},
expected: []chunks.Sample{
- sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
- sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
+ sample{0, 3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
+ sample{0, 6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
- sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
+ sample{0, 3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
+ sample{0, 6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{3, 6}},
@@ -1138,23 +1142,23 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "one histogram chunk intersect with later deletion interval",
samples: [][]chunks.Sample{
{
- sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
- sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
- sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil},
- sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.GenerateTestHistogram(2), nil},
+ sample{0, 3, 0, tsdbutil.GenerateTestHistogram(3), nil},
+ sample{0, 6, 0, tsdbutil.GenerateTestHistogram(6), nil},
},
},
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
expected: []chunks.Sample{
- sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
- sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
- sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
+ sample{0, 3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
- sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
- sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
+ sample{0, 3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 3}},
@@ -1163,24 +1167,24 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "one float histogram chunk",
samples: [][]chunks.Sample{
{
- sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
- sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
- sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
+ sample{0, 3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
+ sample{0, 6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
},
},
expected: []chunks.Sample{
- sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
- sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
- sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
+ sample{0, 3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
+ sample{0, 6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
- sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
- sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
+ sample{0, 3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
+ sample{0, 6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
@@ -1189,21 +1193,21 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "one float histogram chunk intersect with earlier deletion interval",
samples: [][]chunks.Sample{
{
- sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
- sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
- sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
+ sample{0, 3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
+ sample{0, 6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
},
},
intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}},
expected: []chunks.Sample{
- sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
- sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
+ sample{0, 3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
+ sample{0, 6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
- sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
+ sample{0, 3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
+ sample{0, 6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
}),
},
expectedMinMaxTimes: []minMaxTimes{{3, 6}},
@@ -1212,23 +1216,23 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "one float histogram chunk intersect with later deletion interval",
samples: [][]chunks.Sample{
{
- sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
- sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
- sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
+ sample{0, 3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
+ sample{0, 6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
},
},
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
expected: []chunks.Sample{
- sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
- sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
+ sample{0, 3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
- sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
+ sample{0, 3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 3}},
@@ -1237,24 +1241,24 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "one gauge histogram chunk",
samples: [][]chunks.Sample{
{
- sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
- sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
- sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
- sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
+ sample{0, 3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
+ sample{0, 6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
},
},
expected: []chunks.Sample{
- sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
- sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
- sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
- sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
+ sample{0, 3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
+ sample{0, 6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
- sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
- sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
- sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
+ sample{0, 3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
+ sample{0, 6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
@@ -1263,21 +1267,21 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "one gauge histogram chunk intersect with earlier deletion interval",
samples: [][]chunks.Sample{
{
- sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
- sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
- sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
- sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
+ sample{0, 3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
+ sample{0, 6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
},
},
intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}},
expected: []chunks.Sample{
- sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
- sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
+ sample{0, 3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
+ sample{0, 6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
- sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
+ sample{0, 3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
+ sample{0, 6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{3, 6}},
@@ -1286,23 +1290,23 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "one gauge histogram chunk intersect with later deletion interval",
samples: [][]chunks.Sample{
{
- sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
- sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
- sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
- sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
+ sample{0, 3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
+ sample{0, 6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
},
},
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
expected: []chunks.Sample{
- sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
- sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
- sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
+ sample{0, 3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
- sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
- sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
+ sample{0, 1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
+ sample{0, 2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
+ sample{0, 3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 3}},
@@ -1311,24 +1315,24 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "one gauge float histogram",
samples: [][]chunks.Sample{
{
- sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
- sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
- sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
+ sample{0, 3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
+ sample{0, 6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
},
},
expected: []chunks.Sample{
- sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
- sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
- sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
+ sample{0, 3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
+ sample{0, 6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
- sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
- sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
+ sample{0, 3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
+ sample{0, 6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
@@ -1337,21 +1341,21 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "one gauge float histogram chunk intersect with earlier deletion interval",
samples: [][]chunks.Sample{
{
- sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
- sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
- sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
+ sample{0, 3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
+ sample{0, 6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
},
},
intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}},
expected: []chunks.Sample{
- sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
- sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
+ sample{0, 3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
+ sample{0, 6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
- sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
+ sample{0, 3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
+ sample{0, 6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
}),
},
expectedMinMaxTimes: []minMaxTimes{{3, 6}},
@@ -1360,23 +1364,23 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "one gauge float histogram chunk intersect with later deletion interval",
samples: [][]chunks.Sample{
{
- sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
- sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
- sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
+ sample{0, 3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
+ sample{0, 6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
},
},
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
expected: []chunks.Sample{
- sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
- sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
+ sample{0, 3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
- sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
- sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
+ sample{0, 1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
+ sample{0, 2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
+ sample{0, 3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 3}},
@@ -1384,31 +1388,31 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
{
name: "three full mixed chunks",
samples: [][]chunks.Sample{
- {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
+ {sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}},
{
- sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
- sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
+ sample{0, 9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
},
{
- sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
- sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
+ sample{0, 10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
+ sample{0, 203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
},
expected: []chunks.Sample{
- sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
+ sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil}, sample{0, 7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{0, 9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{0, 10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{0, 203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
+ sample{0, 1, 2, nil, nil}, sample{0, 2, 3, nil, nil}, sample{0, 3, 5, nil, nil}, sample{0, 6, 1, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
- sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
+ sample{0, 9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
- sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
+ sample{0, 10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
+ sample{0, 203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}, {10, 203}},
@@ -1417,30 +1421,30 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "three full mixed chunks in different order",
samples: [][]chunks.Sample{
{
- sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
- sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
+ sample{0, 9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
},
- {sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}},
+ {sample{0, 11, 2, nil, nil}, sample{0, 12, 3, nil, nil}, sample{0, 13, 5, nil, nil}, sample{0, 16, 1, nil, nil}},
{
- sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
- sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
+ sample{0, 100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
+ sample{0, 203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
},
expected: []chunks.Sample{
- sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}, sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
+ sample{0, 7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{0, 9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{0, 11, 2, nil, nil}, sample{0, 12, 3, nil, nil}, sample{0, 13, 5, nil, nil}, sample{0, 16, 1, nil, nil}, sample{0, 100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{0, 203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
- sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
+ sample{0, 9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil},
+ sample{0, 11, 2, nil, nil}, sample{0, 12, 3, nil, nil}, sample{0, 13, 5, nil, nil}, sample{0, 16, 1, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
- sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
+ sample{0, 100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
+ sample{0, 203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
}),
},
expectedMinMaxTimes: []minMaxTimes{{7, 9}, {11, 16}, {100, 203}},
@@ -1449,29 +1453,29 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "three full mixed chunks in different order intersect with deletion interval",
samples: [][]chunks.Sample{
{
- sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
- sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
+ sample{0, 9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
},
- {sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}},
+ {sample{0, 11, 2, nil, nil}, sample{0, 12, 3, nil, nil}, sample{0, 13, 5, nil, nil}, sample{0, 16, 1, nil, nil}},
{
- sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
- sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
+ sample{0, 100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
+ sample{0, 203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
},
intervals: tombstones.Intervals{{Mint: 8, Maxt: 11}, {Mint: 15, Maxt: 150}},
expected: []chunks.Sample{
- sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
+ sample{0, 7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{0, 12, 3, nil, nil}, sample{0, 13, 5, nil, nil}, sample{0, 203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{12, 3, nil, nil}, sample{13, 5, nil, nil},
+ sample{0, 12, 3, nil, nil}, sample{0, 13, 5, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
+ sample{0, 203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
}),
},
expectedMinMaxTimes: []minMaxTimes{{7, 7}, {12, 13}, {203, 203}},
@@ -1480,30 +1484,30 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "three full mixed chunks overlapping",
samples: [][]chunks.Sample{
{
- sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
- sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
+ sample{0, 12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
},
- {sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}},
+ {sample{0, 11, 2, nil, nil}, sample{0, 12, 3, nil, nil}, sample{0, 13, 5, nil, nil}, sample{0, 16, 1, nil, nil}},
{
- sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
- sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
+ sample{0, 10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
+ sample{0, 203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
},
expected: []chunks.Sample{
- sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}, sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
+ sample{0, 7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{0, 12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{0, 11, 2, nil, nil}, sample{0, 12, 3, nil, nil}, sample{0, 13, 5, nil, nil}, sample{0, 16, 1, nil, nil}, sample{0, 10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{0, 203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
- sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
+ sample{0, 12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil},
+ sample{0, 11, 2, nil, nil}, sample{0, 12, 3, nil, nil}, sample{0, 13, 5, nil, nil}, sample{0, 16, 1, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
- sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
+ sample{0, 10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
+ sample{0, 203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
}),
},
expectedMinMaxTimes: []minMaxTimes{{7, 12}, {11, 16}, {10, 203}},
@@ -1512,56 +1516,56 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "int histogram iterables with counter resets",
samples: [][]chunks.Sample{
{
- sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
- sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestHistogram(8), nil},
+ sample{0, 8, 0, tsdbutil.GenerateTestHistogram(9), nil},
// Counter reset should be detected when chunks are created from the iterable.
- sample{12, 0, tsdbutil.GenerateTestHistogram(5), nil},
- sample{15, 0, tsdbutil.GenerateTestHistogram(6), nil},
- sample{16, 0, tsdbutil.GenerateTestHistogram(7), nil},
+ sample{0, 12, 0, tsdbutil.GenerateTestHistogram(5), nil},
+ sample{0, 15, 0, tsdbutil.GenerateTestHistogram(6), nil},
+ sample{0, 16, 0, tsdbutil.GenerateTestHistogram(7), nil},
// Counter reset should be detected when chunks are created from the iterable.
- sample{17, 0, tsdbutil.GenerateTestHistogram(5), nil},
+ sample{0, 17, 0, tsdbutil.GenerateTestHistogram(5), nil},
},
{
- sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
- sample{19, 0, tsdbutil.GenerateTestHistogram(7), nil},
+ sample{0, 18, 0, tsdbutil.GenerateTestHistogram(6), nil},
+ sample{0, 19, 0, tsdbutil.GenerateTestHistogram(7), nil},
// Counter reset should be detected when chunks are created from the iterable.
- sample{20, 0, tsdbutil.GenerateTestHistogram(5), nil},
- sample{21, 0, tsdbutil.GenerateTestHistogram(6), nil},
+ sample{0, 20, 0, tsdbutil.GenerateTestHistogram(5), nil},
+ sample{0, 21, 0, tsdbutil.GenerateTestHistogram(6), nil},
},
},
expected: []chunks.Sample{
- sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
- sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
- sample{12, 0, tsdbutil.GenerateTestHistogram(5), nil},
- sample{15, 0, tsdbutil.GenerateTestHistogram(6), nil},
- sample{16, 0, tsdbutil.GenerateTestHistogram(7), nil},
- sample{17, 0, tsdbutil.GenerateTestHistogram(5), nil},
- sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
- sample{19, 0, tsdbutil.GenerateTestHistogram(7), nil},
- sample{20, 0, tsdbutil.GenerateTestHistogram(5), nil},
- sample{21, 0, tsdbutil.GenerateTestHistogram(6), nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestHistogram(8), nil},
+ sample{0, 8, 0, tsdbutil.GenerateTestHistogram(9), nil},
+ sample{0, 12, 0, tsdbutil.GenerateTestHistogram(5), nil},
+ sample{0, 15, 0, tsdbutil.GenerateTestHistogram(6), nil},
+ sample{0, 16, 0, tsdbutil.GenerateTestHistogram(7), nil},
+ sample{0, 17, 0, tsdbutil.GenerateTestHistogram(5), nil},
+ sample{0, 18, 0, tsdbutil.GenerateTestHistogram(6), nil},
+ sample{0, 19, 0, tsdbutil.GenerateTestHistogram(7), nil},
+ sample{0, 20, 0, tsdbutil.GenerateTestHistogram(5), nil},
+ sample{0, 21, 0, tsdbutil.GenerateTestHistogram(6), nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
- sample{8, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(9)), nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestHistogram(8), nil},
+ sample{0, 8, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(9)), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{12, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil},
- sample{15, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
- sample{16, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(7)), nil},
+ sample{0, 12, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil},
+ sample{0, 15, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
+ sample{0, 16, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(7)), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{17, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil},
+ sample{0, 17, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
- sample{19, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(7)), nil},
+ sample{0, 18, 0, tsdbutil.GenerateTestHistogram(6), nil},
+ sample{0, 19, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(7)), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{20, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil},
- sample{21, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
+ sample{0, 20, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil},
+ sample{0, 21, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{
@@ -1581,56 +1585,56 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "float histogram iterables with counter resets",
samples: [][]chunks.Sample{
{
- sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)},
- sample{8, 0, nil, tsdbutil.GenerateTestFloatHistogram(9)},
+ sample{0, 7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)},
+ sample{0, 8, 0, nil, tsdbutil.GenerateTestFloatHistogram(9)},
// Counter reset should be detected when chunks are created from the iterable.
- sample{12, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
- sample{15, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
- sample{16, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
+ sample{0, 12, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
+ sample{0, 15, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
+ sample{0, 16, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
// Counter reset should be detected when chunks are created from the iterable.
- sample{17, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
+ sample{0, 17, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
},
{
- sample{18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
- sample{19, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
+ sample{0, 18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
+ sample{0, 19, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
// Counter reset should be detected when chunks are created from the iterable.
- sample{20, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
- sample{21, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
+ sample{0, 20, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
+ sample{0, 21, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
},
},
expected: []chunks.Sample{
- sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)},
- sample{8, 0, nil, tsdbutil.GenerateTestFloatHistogram(9)},
- sample{12, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
- sample{15, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
- sample{16, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
- sample{17, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
- sample{18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
- sample{19, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
- sample{20, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
- sample{21, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
+ sample{0, 7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)},
+ sample{0, 8, 0, nil, tsdbutil.GenerateTestFloatHistogram(9)},
+ sample{0, 12, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
+ sample{0, 15, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
+ sample{0, 16, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
+ sample{0, 17, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
+ sample{0, 18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
+ sample{0, 19, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
+ sample{0, 20, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
+ sample{0, 21, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)},
- sample{8, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(9))},
+ sample{0, 7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)},
+ sample{0, 8, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(9))},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{12, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))},
- sample{15, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
- sample{16, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(7))},
+ sample{0, 12, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))},
+ sample{0, 15, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
+ sample{0, 16, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(7))},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{17, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))},
+ sample{0, 17, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
- sample{19, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(7))},
+ sample{0, 18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
+ sample{0, 19, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(7))},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{20, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))},
- sample{21, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
+ sample{0, 20, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))},
+ sample{0, 21, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
}),
},
expectedMinMaxTimes: []minMaxTimes{
@@ -1650,61 +1654,61 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
name: "iterables with mixed encodings and counter resets",
samples: [][]chunks.Sample{
{
- sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
- sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
- sample{9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)},
- sample{10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)},
- sample{11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)},
- sample{12, 13, nil, nil},
- sample{13, 14, nil, nil},
- sample{14, 0, tsdbutil.GenerateTestHistogram(8), nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestHistogram(8), nil},
+ sample{0, 8, 0, tsdbutil.GenerateTestHistogram(9), nil},
+ sample{0, 9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)},
+ sample{0, 10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)},
+ sample{0, 11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)},
+ sample{0, 12, 13, nil, nil},
+ sample{0, 13, 14, nil, nil},
+ sample{0, 14, 0, tsdbutil.GenerateTestHistogram(8), nil},
// Counter reset should be detected when chunks are created from the iterable.
- sample{15, 0, tsdbutil.GenerateTestHistogram(7), nil},
+ sample{0, 15, 0, tsdbutil.GenerateTestHistogram(7), nil},
},
{
- sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
- sample{19, 45, nil, nil},
+ sample{0, 18, 0, tsdbutil.GenerateTestHistogram(6), nil},
+ sample{0, 19, 45, nil, nil},
},
},
expected: []chunks.Sample{
- sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
- sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
- sample{9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)},
- sample{10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)},
- sample{11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)},
- sample{12, 13, nil, nil},
- sample{13, 14, nil, nil},
- sample{14, 0, tsdbutil.GenerateTestHistogram(8), nil},
- sample{15, 0, tsdbutil.GenerateTestHistogram(7), nil},
- sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
- sample{19, 45, nil, nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestHistogram(8), nil},
+ sample{0, 8, 0, tsdbutil.GenerateTestHistogram(9), nil},
+ sample{0, 9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)},
+ sample{0, 10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)},
+ sample{0, 11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)},
+ sample{0, 12, 13, nil, nil},
+ sample{0, 13, 14, nil, nil},
+ sample{0, 14, 0, tsdbutil.GenerateTestHistogram(8), nil},
+ sample{0, 15, 0, tsdbutil.GenerateTestHistogram(7), nil},
+ sample{0, 18, 0, tsdbutil.GenerateTestHistogram(6), nil},
+ sample{0, 19, 45, nil, nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
- sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
- sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
+ sample{0, 7, 0, tsdbutil.GenerateTestHistogram(8), nil},
+ sample{0, 8, 0, tsdbutil.GenerateTestHistogram(9), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)},
- sample{10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)},
- sample{11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)},
+ sample{0, 9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)},
+ sample{0, 10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)},
+ sample{0, 11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{12, 13, nil, nil},
- sample{13, 14, nil, nil},
+ sample{0, 12, 13, nil, nil},
+ sample{0, 13, 14, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{14, 0, tsdbutil.GenerateTestHistogram(8), nil},
+ sample{0, 14, 0, tsdbutil.GenerateTestHistogram(8), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{15, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(7)), nil},
+ sample{0, 15, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(7)), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
+ sample{0, 18, 0, tsdbutil.GenerateTestHistogram(6), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
- sample{19, 45, nil, nil},
+ sample{0, 19, 45, nil, nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{
@@ -1845,8 +1849,8 @@ func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) {
valType: chunkenc.ValFloat,
chks: [][]chunks.Sample{
{},
- {sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
- {sample{4, 4, nil, nil}, sample{5, 5, nil, nil}},
+ {sample{0, 1, 1, nil, nil}, sample{0, 2, 2, nil, nil}, sample{0, 3, 3, nil, nil}},
+ {sample{0, 4, 4, nil, nil}, sample{0, 5, 5, nil, nil}},
},
},
{
@@ -1854,8 +1858,8 @@ func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) {
valType: chunkenc.ValHistogram,
chks: [][]chunks.Sample{
{},
- {sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil}},
- {sample{4, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(5), nil}},
+ {sample{0, 1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{0, 2, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{0, 3, 0, tsdbutil.GenerateTestHistogram(3), nil}},
+ {sample{0, 4, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{0, 5, 0, tsdbutil.GenerateTestHistogram(5), nil}},
},
},
{
@@ -1863,8 +1867,8 @@ func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) {
valType: chunkenc.ValFloatHistogram,
chks: [][]chunks.Sample{
{},
- {sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)}},
- {sample{4, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)}},
+ {sample{0, 1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{0, 2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{0, 3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)}},
+ {sample{0, 4, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{0, 5, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)}},
},
},
}
@@ -1898,7 +1902,7 @@ func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) {
valType: chunkenc.ValFloat,
chks: [][]chunks.Sample{
{},
- {sample{1, 2, nil, nil}, sample{3, 4, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}},
+ {sample{0, 1, 2, nil, nil}, sample{0, 3, 4, nil, nil}, sample{0, 5, 6, nil, nil}, sample{0, 7, 8, nil, nil}},
{},
},
},
@@ -1907,7 +1911,7 @@ func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) {
valType: chunkenc.ValHistogram,
chks: [][]chunks.Sample{
{},
- {sample{1, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}},
+ {sample{0, 1, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{0, 3, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{0, 5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{0, 7, 0, tsdbutil.GenerateTestHistogram(8), nil}},
{},
},
},
@@ -1916,7 +1920,7 @@ func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) {
valType: chunkenc.ValFloatHistogram,
chks: [][]chunks.Sample{
{},
- {sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}},
+ {sample{0, 1, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{0, 3, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{0, 5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{0, 7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}},
{},
},
},
@@ -1948,21 +1952,21 @@ func TestPopulateWithDelSeriesIterator_SeekWithMinTime(t *testing.T) {
name: "float",
valType: chunkenc.ValFloat,
chks: [][]chunks.Sample{
- {sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{6, 8, nil, nil}},
+ {sample{0, 1, 6, nil, nil}, sample{0, 5, 6, nil, nil}, sample{0, 6, 8, nil, nil}},
},
},
{
name: "histogram",
valType: chunkenc.ValHistogram,
chks: [][]chunks.Sample{
- {sample{1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{6, 0, tsdbutil.GenerateTestHistogram(8), nil}},
+ {sample{0, 1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{0, 5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{0, 6, 0, tsdbutil.GenerateTestHistogram(8), nil}},
},
},
{
name: "float histogram",
valType: chunkenc.ValFloatHistogram,
chks: [][]chunks.Sample{
- {sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}},
+ {sample{0, 1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{0, 5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{0, 6, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}},
},
},
}
@@ -1991,21 +1995,21 @@ func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) {
name: "float",
valType: chunkenc.ValFloat,
chks: [][]chunks.Sample{
- {sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}},
+ {sample{0, 1, 6, nil, nil}, sample{0, 5, 6, nil, nil}, sample{0, 7, 8, nil, nil}},
},
},
{
name: "histogram",
valType: chunkenc.ValHistogram,
chks: [][]chunks.Sample{
- {sample{1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}},
+ {sample{0, 1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{0, 5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{0, 7, 0, tsdbutil.GenerateTestHistogram(8), nil}},
},
},
{
name: "float histogram",
valType: chunkenc.ValFloatHistogram,
chks: [][]chunks.Sample{
- {sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}},
+ {sample{0, 1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{0, 5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{0, 7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}},
},
},
}
@@ -2096,7 +2100,7 @@ func TestDeletedIterator(t *testing.T) {
for i := range 1000 {
act[i].t = int64(i)
act[i].f = rand.Float64()
- app.Append(act[i].t, act[i].f)
+ app.Append(0, act[i].t, act[i].f)
}
cases := []struct {
@@ -2156,7 +2160,7 @@ func TestDeletedIterator_WithSeek(t *testing.T) {
for i := range 1000 {
act[i].t = int64(i)
act[i].f = float64(i)
- app.Append(act[i].t, act[i].f)
+ app.Append(0, act[i].t, act[i].f)
}
cases := []struct {
diff --git a/tsdb/repair.go b/tsdb/repair.go
index 0d9d449a40..4ef69c80ed 100644
--- a/tsdb/repair.go
+++ b/tsdb/repair.go
@@ -15,13 +15,13 @@ package tsdb
import (
"encoding/json"
+ "errors"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
)
@@ -82,20 +82,22 @@ func repairBadIndexVersion(logger *slog.Logger, dir string) error {
// Set the 5th byte to 2 to indicate the correct file format version.
if _, err := repl.WriteAt([]byte{2}, 4); err != nil {
- errs := tsdb_errors.NewMulti(
- fmt.Errorf("rewrite of index.repaired for block dir: %v: %w", d, err))
- if err := repl.Close(); err != nil {
- errs.Add(fmt.Errorf("close: %w", err))
+ errs := []error{
+ fmt.Errorf("rewrite of index.repaired for block dir: %v: %w", d, err),
}
- return errs.Err()
+ if err := repl.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("close: %w", err))
+ }
+ return errors.Join(errs...)
}
if err := repl.Sync(); err != nil {
- errs := tsdb_errors.NewMulti(
- fmt.Errorf("sync of index.repaired for block dir: %v: %w", d, err))
- if err := repl.Close(); err != nil {
- errs.Add(fmt.Errorf("close: %w", err))
+ errs := []error{
+ fmt.Errorf("sync of index.repaired for block dir: %v: %w", d, err),
}
- return errs.Err()
+ if err := repl.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("close: %w", err))
+ }
+ return errors.Join(errs...)
}
if err := repl.Close(); err != nil {
return fmt.Errorf("close repaired index for block dir: %v: %w", d, err)
diff --git a/tsdb/tsdbutil/remove_tmp_dirs.go b/tsdb/tsdbutil/remove_tmp_dirs.go
new file mode 100644
index 0000000000..a95db3159e
--- /dev/null
+++ b/tsdb/tsdbutil/remove_tmp_dirs.go
@@ -0,0 +1,45 @@
+// Copyright 2018 The Prometheus Authors
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdbutil
+
+import (
+ "io/fs"
+ "log/slog"
+ "os"
+ "path/filepath"
+)
+
+// RemoveTmpDirs attempts to remove directories in the specified directory which match the isTmpDir predicate.
+// Errors encountered during reading the directory that other than non-existence are returned. All other errors
+// encountered during removal of tmp directories are logged but do not cause early termination.
+func RemoveTmpDirs(l *slog.Logger, dir string, isTmpDir func(fi fs.DirEntry) bool) error {
+ files, err := os.ReadDir(dir)
+ if os.IsNotExist(err) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ for _, f := range files {
+ if isTmpDir(f) {
+ if err := os.RemoveAll(filepath.Join(dir, f.Name())); err != nil {
+ l.Error("failed to delete tmp dir", "dir", filepath.Join(dir, f.Name()), "err", err)
+ continue
+ }
+ l.Info("Found and deleted tmp dir", "dir", filepath.Join(dir, f.Name()))
+ }
+ }
+ return nil
+}
diff --git a/tsdb/tsdbutil/remove_tmp_dirs_test.go b/tsdb/tsdbutil/remove_tmp_dirs_test.go
new file mode 100644
index 0000000000..4ab282d3b3
--- /dev/null
+++ b/tsdb/tsdbutil/remove_tmp_dirs_test.go
@@ -0,0 +1,124 @@
+// Copyright 2018 The Prometheus Authors
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdbutil
+
+import (
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/prometheus/common/promslog"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRemoveTmpDirs(t *testing.T) {
+ tests := []struct {
+ name string
+ isTmpDir func(fi fs.DirEntry) bool
+ setup func(t *testing.T, dir string)
+ expectedDirs []string // Directories that should remain after cleanup
+ }{
+ {
+ name: "remove directories with tmp prefix",
+ isTmpDir: func(fi fs.DirEntry) bool {
+ return fi.IsDir() && strings.HasPrefix(fi.Name(), "tmp")
+ },
+ setup: func(t *testing.T, dir string) {
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "tmpdir1"), 0o755))
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "tmpdir2"), 0o755))
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "normaldir"), 0o755))
+ },
+ expectedDirs: []string{"normaldir"},
+ },
+ {
+ name: "remove directories with specific suffix",
+ isTmpDir: func(fi fs.DirEntry) bool {
+ return fi.IsDir() && strings.HasSuffix(fi.Name(), ".tmp")
+ },
+ setup: func(t *testing.T, dir string) {
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "data.tmp"), 0o755))
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "cache.tmp"), 0o755))
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "permanent"), 0o755))
+ },
+ expectedDirs: []string{"permanent"},
+ },
+ {
+ name: "no temporary directories to remove",
+ isTmpDir: func(fi fs.DirEntry) bool {
+ return fi.IsDir() && strings.HasPrefix(fi.Name(), "tmp")
+ },
+ setup: func(t *testing.T, dir string) {
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "normaldir1"), 0o755))
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "normaldir2"), 0o755))
+ },
+ expectedDirs: []string{"normaldir1", "normaldir2"},
+ },
+ {
+ name: "empty directory",
+ isTmpDir: func(fi fs.DirEntry) bool {
+ return fi.IsDir() && strings.HasPrefix(fi.Name(), "tmp")
+ },
+ setup: func(_ *testing.T, _ string) {}, // No setup needed - directory is empty
+ expectedDirs: []string{},
+ },
+ {
+ name: "directory with files only (no directories)",
+ isTmpDir: func(fi fs.DirEntry) bool {
+ return fi.IsDir() && strings.HasPrefix(fi.Name(), "tmp")
+ },
+ setup: func(t *testing.T, dir string) {
+ require.NoError(t, os.WriteFile(filepath.Join(dir, "tmpfile1.txt"), []byte("test"), 0o644))
+ require.NoError(t, os.WriteFile(filepath.Join(dir, "tmpfile2.txt"), []byte("test"), 0o644))
+ },
+ expectedDirs: []string{},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ testDir := t.TempDir()
+
+ if tt.setup != nil {
+ tt.setup(t, testDir)
+ }
+
+ require.NoError(t, RemoveTmpDirs(promslog.NewNopLogger(), testDir, tt.isTmpDir))
+
+ entries, err := os.ReadDir(testDir)
+ require.NoError(t, err)
+
+ // Get actual remaining directories
+ var actualDirs []string
+ for _, entry := range entries {
+ if entry.IsDir() {
+ actualDirs = append(actualDirs, entry.Name())
+ }
+ }
+
+ require.ElementsMatch(t, tt.expectedDirs, actualDirs, "Remaining directories don't match expected")
+ })
+ }
+}
+
+func TestRemoveTmpDirs_NonExistentDirectory(t *testing.T) {
+ testDir := t.TempDir()
+ nonExistent := filepath.Join(testDir, "does_not_exist")
+
+ require.NoError(t, RemoveTmpDirs(promslog.NewNopLogger(), nonExistent, func(_ fs.DirEntry) bool {
+ return true
+ }))
+}
diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go
index 6742141fbc..3a4e194fec 100644
--- a/tsdb/wlog/checkpoint.go
+++ b/tsdb/wlog/checkpoint.go
@@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"io"
+ "io/fs"
"log/slog"
"math"
"os"
@@ -31,6 +32,7 @@ import (
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
+ "github.com/prometheus/prometheus/tsdb/tsdbutil"
)
// CheckpointStats returns stats about a created checkpoint.
@@ -80,8 +82,16 @@ func DeleteCheckpoints(dir string, maxIndex int) error {
return errors.Join(errs...)
}
-// CheckpointPrefix is the prefix used for checkpoint files.
-const CheckpointPrefix = "checkpoint."
+// checkpointTempFileSuffix is the suffix used when creating temporary checkpoint files.
+const checkpointTempFileSuffix = ".tmp"
+
+// DeleteTempCheckpoints deletes all temporary checkpoint directories in the given directory.
+func DeleteTempCheckpoints(logger *slog.Logger, dir string) error {
+ if err := tsdbutil.RemoveTmpDirs(logger, dir, isTempDir); err != nil {
+ return fmt.Errorf("remove previous temporary checkpoint dirs: %w", err)
+ }
+ return nil
+}
// Checkpoint creates a compacted checkpoint of segments in range [from, to] in the given WAL.
// It includes the most recent checkpoint if it exists.
@@ -123,13 +133,13 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
defer sgmReader.Close()
}
- cpdir := checkpointDir(w.Dir(), to)
- cpdirtmp := cpdir + ".tmp"
-
- if err := os.RemoveAll(cpdirtmp); err != nil {
- return nil, fmt.Errorf("remove previous temporary checkpoint dir: %w", err)
+ if err := DeleteTempCheckpoints(logger, w.Dir()); err != nil {
+ return nil, err
}
+ cpdir := checkpointDir(w.Dir(), to)
+ cpdirtmp := cpdir + checkpointTempFileSuffix
+
if err := os.MkdirAll(cpdirtmp, 0o777); err != nil {
return nil, fmt.Errorf("create checkpoint dir: %w", err)
}
@@ -394,8 +404,11 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
return stats, nil
}
+// checkpointPrefix is the prefix used for checkpoint files.
+const checkpointPrefix = "checkpoint."
+
func checkpointDir(dir string, i int) string {
- return filepath.Join(dir, fmt.Sprintf(CheckpointPrefix+"%08d", i))
+ return filepath.Join(dir, fmt.Sprintf(checkpointPrefix+"%08d", i))
}
type checkpointRef struct {
@@ -411,13 +424,13 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) {
for i := range files {
fi := files[i]
- if !strings.HasPrefix(fi.Name(), CheckpointPrefix) {
+ if !strings.HasPrefix(fi.Name(), checkpointPrefix) {
continue
}
if !fi.IsDir() {
return nil, fmt.Errorf("checkpoint %s is not a directory", fi.Name())
}
- idx, err := strconv.Atoi(fi.Name()[len(CheckpointPrefix):])
+ idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):])
if err != nil {
continue
}
@@ -431,3 +444,7 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) {
return refs, nil
}
+
+func isTempDir(fi fs.DirEntry) bool {
+ return strings.HasPrefix(fi.Name(), checkpointPrefix) && strings.HasSuffix(fi.Name(), checkpointTempFileSuffix)
+}
diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go
index 97ca2e768d..a348239ec7 100644
--- a/tsdb/wlog/checkpoint_test.go
+++ b/tsdb/wlog/checkpoint_test.go
@@ -417,3 +417,81 @@ func TestCheckpointNoTmpFolderAfterError(t *testing.T) {
})
require.NoError(t, err)
}
+
+func TestCheckpointDeletesTemporaryCheckpoints(t *testing.T) {
+ dir := t.TempDir()
+
+ // Create one tmp checkpoint directory
+ require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.00001000.tmp"), 0o777))
+
+ w, err := New(nil, nil, dir, compression.None)
+ require.NoError(t, err)
+ defer w.Close()
+
+ _, err = Checkpoint(promslog.NewNopLogger(), w, 0, 1000, func(_ chunks.HeadSeriesRef) bool { return true }, 1000)
+ require.NoError(t, err)
+
+ files, err := os.ReadDir(dir)
+ require.NoError(t, err)
+
+ var actualDirectories []string
+ for _, f := range files {
+ if !f.IsDir() {
+ continue
+ }
+ actualDirectories = append(actualDirectories, f.Name())
+ }
+ require.Equal(t, []string{"checkpoint.00001000"}, actualDirectories)
+}
+
+func TestDeleteTempCheckpoints(t *testing.T) {
+ testCases := []struct {
+ name string
+ checkpointDirectoriesToCreate []string
+ expectedDirectories []string
+ }{
+ {
+ name: "no tmp checkpoints",
+ checkpointDirectoriesToCreate: nil,
+ expectedDirectories: nil,
+ },
+ {
+ name: "one tmp checkpoint",
+ checkpointDirectoriesToCreate: []string{"checkpoint.00001000.tmp"},
+ expectedDirectories: nil,
+ },
+ {
+ name: "many tmp checkpoints",
+ checkpointDirectoriesToCreate: []string{"checkpoint.00000001.tmp", "checkpoint.00001000.tmp", "checkpoint.00002000.tmp"},
+ expectedDirectories: nil,
+ },
+ {
+ name: "mix of tmp and regular checkpoints",
+ checkpointDirectoriesToCreate: []string{"checkpoint.00000001", "checkpoint.00000001.tmp", "checkpoint.00001000.tmp", "checkpoint.00002000"},
+ expectedDirectories: []string{"checkpoint.00000001", "checkpoint.00002000"},
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ dir := t.TempDir()
+ for _, fn := range tc.checkpointDirectoriesToCreate {
+ require.NoError(t, os.MkdirAll(filepath.Join(dir, fn), 0o777))
+ }
+
+ require.NoError(t, DeleteTempCheckpoints(promslog.NewNopLogger(), dir))
+
+ files, err := os.ReadDir(dir)
+ require.NoError(t, err)
+
+ var actualDirectories []string
+ for _, f := range files {
+ if !f.IsDir() {
+ continue
+ }
+ actualDirectories = append(actualDirectories, f.Name())
+ }
+ require.Equal(t, tc.expectedDirectories, actualDirectories)
+ })
+ }
+}
diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go
index 971423e5cc..9381fe99b5 100644
--- a/tsdb/wlog/reader_test.go
+++ b/tsdb/wlog/reader_test.go
@@ -550,7 +550,7 @@ func TestReaderData(t *testing.T) {
}
}
-// closeAll closes all given closers while recording error in MultiError.
+// closeAll closes all given closers while recording all errors.
func closeAll(cs []io.Closer) error {
var errs []error
for _, c := range cs {
diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go
index a68b2ba4fc..550b9fcdc5 100644
--- a/util/annotations/annotations.go
+++ b/util/annotations/annotations.go
@@ -16,7 +16,7 @@ package annotations
import (
"errors"
"fmt"
- "maps"
+ "time"
"github.com/prometheus/common/model"
@@ -43,12 +43,18 @@ func (a *Annotations) Add(err error) Annotations {
if *a == nil {
*a = Annotations{}
}
+ if prevErr, exists := (*a)[err.Error()]; exists {
+ var anErr annoError
+ if errors.As(err, &anErr) {
+ err = anErr.Merge(prevErr)
+ }
+ }
(*a)[err.Error()] = err
return *a
}
-// Merge adds the contents of the second annotation to the first, modifying
-// the first in-place, and returns the merged first Annotation for convenience.
+// Merge adds the contents of the second set of Annotations to the first, modifying
+// the first in-place, and returns the merged first Annotations for convenience.
func (a *Annotations) Merge(aa Annotations) Annotations {
if *a == nil {
if aa == nil {
@@ -56,7 +62,15 @@ func (a *Annotations) Merge(aa Annotations) Annotations {
}
*a = Annotations{}
}
- maps.Copy((*a), aa)
+ for key, val := range aa {
+ if prevVal, exists := (*a)[key]; exists {
+ var anErr annoError
+ if errors.As(val, &anErr) {
+ val = anErr.Merge(prevVal)
+ }
+ }
+ (*a)[key] = val
+ }
return *a
}
@@ -81,10 +95,9 @@ func (a Annotations) AsStrings(query string, maxWarnings, maxInfos int) (warning
warnSkipped := 0
infoSkipped := 0
for _, err := range a {
- var anErr annoErr
+ var anErr annoError
if errors.As(err, &anErr) {
- anErr.Query = query
- err = anErr
+ anErr.SetQuery(query)
}
switch {
case errors.Is(err, PromQLInfo):
@@ -157,23 +170,48 @@ var (
MismatchedCustomBucketsHistogramsInfo = fmt.Errorf("%w: mismatched custom buckets were reconciled during", PromQLInfo)
)
+// annoError extends the standard error interface to provide additional functionality
+// for PromQL annotations, allowing them to be merged with other similar errors.
+type annoError interface {
+ error
+ // Necessary so we can use errors.Is() to disambiguate between warning and info.
+ Unwrap() error
+ // Necessary when we want to show position info. Also, this is only called at the end when we call
+ // AsStrings(), so before that we deduplicate based on the raw error string when query is empty,
+ // and the full error string with details will only be shown in the end when query is set.
+ SetQuery(string)
+ // We can define custom merge functions to merge individual annotations of the same type if they have
+ // the same raw error string.
+ Merge(error) error
+}
+
type annoErr struct {
PositionRange posrange.PositionRange
Err error
Query string
}
-func (e annoErr) Error() string {
+func (e *annoErr) Error() string {
if e.Query == "" {
return e.Err.Error()
}
return fmt.Sprintf("%s (%s)", e.Err, e.PositionRange.StartPosInput(e.Query, 0))
}
-func (e annoErr) Unwrap() error {
+func (e *annoErr) Unwrap() error {
return e.Err
}
+func (e *annoErr) SetQuery(query string) {
+ e.Query = query
+}
+
+// We do not merge generic annotations, instead we just ignore the provided error
+// and return the original.
+func (e *annoErr) Merge(_ error) error {
+ return e
+}
+
func maybeAddMetricName(anno error, metricName string) error {
if metricName == "" {
return anno
@@ -184,7 +222,7 @@ func maybeAddMetricName(anno error, metricName string) error {
// NewInvalidQuantileWarning is used when the user specifies an invalid quantile
// value, i.e. a float that is outside the range [0, 1] or NaN.
func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w, got %g", InvalidQuantileWarning, q),
}
@@ -193,7 +231,7 @@ func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error {
// NewInvalidRatioWarning is used when the user specifies an invalid ratio
// value, i.e. a float that is outside the range [-1, 1] or NaN.
func NewInvalidRatioWarning(q, to float64, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w, got %g, capping to %g", InvalidRatioWarning, q, to),
}
@@ -203,7 +241,7 @@ func NewInvalidRatioWarning(q, to float64, pos posrange.PositionRange) error {
// of a classic histogram.
func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRange) error {
anno := maybeAddMetricName(fmt.Errorf("%w of %q", BadBucketLabelWarning, label), metricName)
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: anno,
}
@@ -213,7 +251,7 @@ func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRan
// float samples and histogram samples for functions that do not support mixed
// samples.
func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w metric name %q", MixedFloatsHistogramsWarning, metricName),
}
@@ -222,7 +260,7 @@ func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRan
// NewMixedFloatsHistogramsAggWarning is used when the queried series includes both
// float samples and histogram samples in an aggregation.
func NewMixedFloatsHistogramsAggWarning(pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w aggregation", MixedFloatsHistogramsWarning),
}
@@ -231,7 +269,7 @@ func NewMixedFloatsHistogramsAggWarning(pos posrange.PositionRange) error {
// NewMixedClassicNativeHistogramsWarning is used when the queried series includes
// both classic and native histograms.
func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: maybeAddMetricName(MixedClassicNativeHistogramsWarning, metricName),
}
@@ -240,7 +278,7 @@ func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.Posi
// NewNativeHistogramNotCounterWarning is used when histogramRate is called
// with isCounter set to true on a gauge histogram.
func NewNativeHistogramNotCounterWarning(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", NativeHistogramNotCounterWarning, metricName),
}
@@ -249,7 +287,7 @@ func NewNativeHistogramNotCounterWarning(metricName string, pos posrange.Positio
// NewNativeHistogramNotGaugeWarning is used when histogramRate is called
// with isCounter set to false on a counter histogram.
func NewNativeHistogramNotGaugeWarning(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", NativeHistogramNotGaugeWarning, metricName),
}
@@ -258,7 +296,7 @@ func NewNativeHistogramNotGaugeWarning(metricName string, pos posrange.PositionR
// NewMixedExponentialCustomHistogramsWarning is used when the queried series includes
// histograms with both exponential and custom buckets schemas.
func NewMixedExponentialCustomHistogramsWarning(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", MixedExponentialCustomHistogramsWarning, metricName),
}
@@ -267,7 +305,7 @@ func NewMixedExponentialCustomHistogramsWarning(metricName string, pos posrange.
// NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not
// have the suffixes _total, _sum, _count, or _bucket.
func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", PossibleNonCounterInfo, metricName),
}
@@ -276,25 +314,84 @@ func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) er
// NewPossibleNonCounterLabelInfo is used when a named counter metric with only float samples does not
// have the __type__ label set to "counter".
func NewPossibleNonCounterLabelInfo(metricName, typeLabel string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w, got %q: %q", PossibleNonCounterLabelInfo, typeLabel, metricName),
}
}
+type histogramQuantileForcedMonotonicityErr struct {
+ PositionRange posrange.PositionRange
+ Err error
+ Query string
+ minTs, maxTs int64
+ minBucket, maxBucket, maxDiff float64
+ count int
+}
+
+func (e *histogramQuantileForcedMonotonicityErr) Error() string {
+ if e.Query == "" {
+ return e.Err.Error()
+ }
+ startTime := time.Unix(e.minTs/1000, 0).UTC().Format(time.RFC3339)
+ endTime := time.Unix(e.maxTs/1000, 0).UTC().Format(time.RFC3339)
+ return fmt.Sprintf("%s, from buckets %g to %g, with a max diff of %.2g, over %d samples from %s to %s (%s)", e.Err, e.minBucket, e.maxBucket, e.maxDiff, e.count+1, startTime, endTime, e.PositionRange.StartPosInput(e.Query, 0))
+}
+
+func (e *histogramQuantileForcedMonotonicityErr) Unwrap() error {
+ return e.Err
+}
+
+func (e *histogramQuantileForcedMonotonicityErr) SetQuery(query string) {
+ e.Query = query
+}
+
+func (e *histogramQuantileForcedMonotonicityErr) Merge(other error) error {
+ o := &histogramQuantileForcedMonotonicityErr{}
+ ok := errors.As(other, &o)
+ if !ok {
+ return e
+ }
+ if e.Err.Error() != o.Err.Error() {
+ return e
+ }
+ if e.minTs < o.minTs {
+ o.minTs = e.minTs
+ }
+ if e.maxTs > o.maxTs {
+ o.maxTs = e.maxTs
+ }
+ if e.minBucket < o.minBucket {
+ o.minBucket = e.minBucket
+ }
+ if e.maxBucket > o.maxBucket {
+ o.maxBucket = e.maxBucket
+ }
+ if e.maxDiff > o.maxDiff {
+ o.maxDiff = e.maxDiff
+ }
+ o.count += e.count + 1
+ return o
+}
+
// NewHistogramQuantileForcedMonotonicityInfo is used when the input (classic histograms) to
// histogram_quantile needs to be forced to be monotonic.
-func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange, ts int64, minBucket, maxBucket, maxDiff float64) error {
+ return &histogramQuantileForcedMonotonicityErr{
PositionRange: pos,
Err: maybeAddMetricName(HistogramQuantileForcedMonotonicityInfo, metricName),
+ minTs: ts,
+ maxTs: ts,
+ minBucket: minBucket,
+ maxBucket: maxBucket,
+ maxDiff: maxDiff,
}
}
// NewIncompatibleTypesInBinOpInfo is used if binary operators act on a
// combination of types that doesn't work and therefore returns no result.
func NewIncompatibleTypesInBinOpInfo(lhsType, operator, rhsType string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q: %s %s %s", IncompatibleTypesInBinOpInfo, operator, lhsType, operator, rhsType),
}
@@ -303,7 +400,7 @@ func NewIncompatibleTypesInBinOpInfo(lhsType, operator, rhsType string, pos posr
// NewHistogramIgnoredInAggregationInfo is used when a histogram is ignored by
// an aggregation operator that cannot handle histograms.
func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %s aggregation", HistogramIgnoredInAggregationInfo, aggregation),
}
@@ -312,7 +409,7 @@ func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.Posit
// NewHistogramIgnoredInMixedRangeInfo is used when a histogram is ignored
// in a range vector which contains mix of floats and histograms.
func NewHistogramIgnoredInMixedRangeInfo(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", HistogramIgnoredInMixedRangeInfo, metricName),
}
@@ -321,28 +418,28 @@ func NewHistogramIgnoredInMixedRangeInfo(metricName string, pos posrange.Positio
// NewIncompatibleBucketLayoutInBinOpWarning is used if binary operators act on a
// combination of two incompatible histograms.
func NewIncompatibleBucketLayoutInBinOpWarning(operator string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %s", IncompatibleBucketLayoutInBinOpWarning, operator),
}
}
func NewNativeHistogramQuantileNaNResultInfo(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: maybeAddMetricName(NativeHistogramQuantileNaNResultInfo, metricName),
}
}
func NewNativeHistogramQuantileNaNSkewInfo(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: maybeAddMetricName(NativeHistogramQuantileNaNSkewInfo, metricName),
}
}
func NewNativeHistogramFractionNaNsInfo(metricName string, pos posrange.PositionRange) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: maybeAddMetricName(NativeHistogramFractionNaNsInfo, metricName),
}
@@ -368,7 +465,7 @@ func (op HistogramOperation) String() string {
// NewHistogramCounterResetCollisionWarning is used when two counter histograms are added or subtracted where one has
// a CounterReset hint and the other has NotCounterReset.
func NewHistogramCounterResetCollisionWarning(pos posrange.PositionRange, operation HistogramOperation) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %s", HistogramCounterResetCollisionWarning, operation.String()),
}
@@ -377,7 +474,7 @@ func NewHistogramCounterResetCollisionWarning(pos posrange.PositionRange, operat
// NewMismatchedCustomBucketsHistogramsInfo is used when the queried series includes
// custom buckets histograms with mismatched custom bounds that cause reconciling.
func NewMismatchedCustomBucketsHistogramsInfo(pos posrange.PositionRange, operation HistogramOperation) error {
- return annoErr{
+ return &annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %s", MismatchedCustomBucketsHistogramsInfo, operation.String()),
}
diff --git a/util/annotations/annotations_test.go b/util/annotations/annotations_test.go
new file mode 100644
index 0000000000..39fb8e62f4
--- /dev/null
+++ b/util/annotations/annotations_test.go
@@ -0,0 +1,114 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package annotations
+
+import (
+ "errors"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/promql/parser/posrange"
+)
+
+func TestAnnotations_AsStrings(t *testing.T) {
+ var annos Annotations
+ pos := posrange.PositionRange{Start: 3, End: 8}
+
+ annos.Add(errors.New("this is a non-annotation error"))
+
+ annos.Add(NewInvalidRatioWarning(1.1, 100, pos))
+ annos.Add(NewInvalidRatioWarning(1.2, 123, pos))
+
+ annos.Add(newTestCustomWarning(1.5, pos, 12, 14))
+ annos.Add(newTestCustomWarning(1.5, pos, 10, 20))
+ annos.Add(newTestCustomWarning(1.5, pos, 5, 15))
+ annos.Add(newTestCustomWarning(1.5, pos, 12, 14))
+
+ annos.Add(NewHistogramIgnoredInAggregationInfo("sum", pos))
+
+ annos.Add(NewHistogramQuantileForcedMonotonicityInfo("series_1", pos, 1735084800000, 5, 50, 5.5))
+ annos.Add(NewHistogramQuantileForcedMonotonicityInfo("series_1", pos, 1703462400000, 10, 100, 10))
+ annos.Add(NewHistogramQuantileForcedMonotonicityInfo("series_1", pos, 1733011200000, 2.5, 75, 7.5))
+
+ warnings, infos := annos.AsStrings("lorem ipsum dolor sit amet", 0, 0)
+ require.ElementsMatch(t, warnings, []string{
+ "this is a non-annotation error",
+ "PromQL warning: ratio value should be between -1 and 1, got 1.1, capping to 100 (1:4)",
+ "PromQL warning: ratio value should be between -1 and 1, got 1.2, capping to 123 (1:4)",
+ "PromQL warning: custom value set to 1.5, 4 instances with smallest 5 and biggest 20 (1:4)",
+ })
+ require.ElementsMatch(t, infos, []string{
+ "PromQL info: ignored histogram in sum aggregation (1:4)",
+ `PromQL info: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name "series_1", from buckets 2.5 to 100, with a max diff of 10, over 3 samples from 2023-12-25T00:00:00Z to 2024-12-25T00:00:00Z (1:4)`,
+ })
+}
+
+type testCustomError struct {
+ PositionRange posrange.PositionRange
+ Err error
+ Query string
+ Min []float64
+ Max []float64
+ Count int
+}
+
+func (e *testCustomError) Error() string {
+ if e.Query == "" {
+ return e.Err.Error()
+ }
+ return fmt.Sprintf("%s, %d instances with smallest %g and biggest %g (%s)", e.Err, e.Count+1, e.Min[0], e.Max[0], e.PositionRange.StartPosInput(e.Query, 0))
+}
+
+func (e *testCustomError) Unwrap() error {
+ return e.Err
+}
+
+func (e *testCustomError) SetQuery(query string) {
+ e.Query = query
+}
+
+func (e *testCustomError) Merge(other error) error {
+ o := &testCustomError{}
+ ok := errors.As(other, &o)
+ if !ok {
+ return e
+ }
+ if e.Err.Error() != o.Err.Error() || len(e.Min) != len(o.Min) || len(e.Max) != len(o.Max) {
+ return e
+ }
+ for i, aMin := range e.Min {
+ if aMin < o.Min[i] {
+ o.Min[i] = aMin
+ }
+ }
+ for i, aMax := range e.Max {
+ if aMax > o.Max[i] {
+ o.Max[i] = aMax
+ }
+ }
+ o.Count += e.Count + 1
+ return o
+}
+
+func newTestCustomWarning(q float64, pos posrange.PositionRange, smallest, largest float64) error {
+ testCustomWarning := fmt.Errorf("%w: custom value set to", PromQLWarning)
+ return &testCustomError{
+ PositionRange: pos,
+ Err: fmt.Errorf("%w %g", testCustomWarning, q),
+ Min: []float64{smallest},
+ Max: []float64{largest},
+ }
+}
diff --git a/util/fuzzing/corpus.go b/util/fuzzing/corpus.go
index 52930b2669..025e4dfd7a 100644
--- a/util/fuzzing/corpus.go
+++ b/util/fuzzing/corpus.go
@@ -14,7 +14,6 @@
package fuzzing
import (
- "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
)
@@ -58,16 +57,6 @@ func GetCorpusForFuzzParseMetricSelector() []string {
// GetCorpusForFuzzParseExpr returns the seed corpus for FuzzParseExpr.
func GetCorpusForFuzzParseExpr() ([]string, error) {
- // Enable experimental features to parse all test expressions.
- parser.EnableExperimentalFunctions = true
- parser.ExperimentalDurationExpr = true
- parser.EnableExtendedRangeSelectors = true
- defer func() {
- parser.EnableExperimentalFunctions = false
- parser.ExperimentalDurationExpr = false
- parser.EnableExtendedRangeSelectors = false
- }()
-
// Get built-in test expressions.
builtInExprs, err := promqltest.GetBuiltInExprs()
if err != nil {
diff --git a/util/fuzzing/fuzz_test.go b/util/fuzzing/fuzz_test.go
index 8356fdad71..ec6d7c4e72 100644
--- a/util/fuzzing/fuzz_test.go
+++ b/util/fuzzing/fuzz_test.go
@@ -33,6 +33,8 @@ const (
// Use package-scope symbol table to avoid memory allocation on every fuzzing operation.
var symbolTable = labels.NewSymbolTable()
+var fuzzParser = parser.NewParser(parser.Options{})
+
// FuzzParseMetricText fuzzes the metric parser with "text/plain" content type.
//
// Note that this is not the parser for the text-based exposition-format; that
@@ -109,7 +111,7 @@ func FuzzParseMetricSelector(f *testing.F) {
if len(in) > maxInputSize {
t.Skip()
}
- _, err := parser.ParseMetricSelector(in)
+ _, err := fuzzParser.ParseMetricSelector(in)
// We don't care about errors, just that we don't panic.
_ = err
})
@@ -117,15 +119,6 @@ func FuzzParseMetricSelector(f *testing.F) {
// FuzzParseExpr fuzzes the expression parser.
func FuzzParseExpr(f *testing.F) {
- parser.EnableExperimentalFunctions = true
- parser.ExperimentalDurationExpr = true
- parser.EnableExtendedRangeSelectors = true
- f.Cleanup(func() {
- parser.EnableExperimentalFunctions = false
- parser.ExperimentalDurationExpr = false
- parser.EnableExtendedRangeSelectors = false
- })
-
// Add seed corpus from built-in test expressions
corpus, err := GetCorpusForFuzzParseExpr()
if err != nil {
@@ -139,11 +132,17 @@ func FuzzParseExpr(f *testing.F) {
f.Add(expr)
}
+ p := parser.NewParser(parser.Options{
+ EnableExperimentalFunctions: true,
+ ExperimentalDurationExpr: true,
+ EnableExtendedRangeSelectors: true,
+ EnableBinopFillModifiers: true,
+ })
f.Fuzz(func(t *testing.T, in string) {
if len(in) > maxInputSize {
t.Skip()
}
- _, err := parser.ParseExpr(in)
+ _, err := p.ParseExpr(in)
// We don't care about errors, just that we don't panic.
_ = err
})
diff --git a/util/kahansum/kahansum.go b/util/kahansum/kahansum.go
new file mode 100644
index 0000000000..d55defcb29
--- /dev/null
+++ b/util/kahansum/kahansum.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kahansum
+
+import "math"
+
+// Inc performs addition of two floating-point numbers using the Kahan summation algorithm.
+// We get incorrect results if this function is inlined; see https://github.com/prometheus/prometheus/issues/16714.
+//
+//go:noinline
+func Inc(inc, sum, c float64) (newSum, newC float64) {
+ t := sum + inc
+ switch {
+ case math.IsInf(t, 0):
+ c = 0
+
+ // Using Neumaier improvement, swap if next term larger than sum.
+ case math.Abs(sum) >= math.Abs(inc):
+ c += (sum - t) + inc
+ default:
+ c += (inc - t) + sum
+ }
+ return t, c
+}
+
+func Dec(dec, sum, c float64) (newSum, newC float64) {
+ return Inc(-dec, sum, c)
+}
diff --git a/util/strutil/strconv_test.go b/util/strutil/strconv_test.go
index b4b87ee816..362fa79a6a 100644
--- a/util/strutil/strconv_test.go
+++ b/util/strutil/strconv_test.go
@@ -36,6 +36,26 @@ var linkTests = []linkTest{
"/graph?g0.expr=sum%28incoming_http_requests_total%7Bsystem%3D%22trackmetadata%22%7D%29&g0.tab=0",
"/graph?g0.expr=sum%28incoming_http_requests_total%7Bsystem%3D%22trackmetadata%22%7D%29&g0.tab=1",
},
+ {
+ "up",
+ "/graph?g0.expr=up&g0.tab=0",
+ "/graph?g0.expr=up&g0.tab=1",
+ },
+ {
+ "rate(http_requests_total[5m])",
+ "/graph?g0.expr=rate%28http_requests_total%5B5m%5D%29&g0.tab=0",
+ "/graph?g0.expr=rate%28http_requests_total%5B5m%5D%29&g0.tab=1",
+ },
+ {
+ "",
+ "/graph?g0.expr=&g0.tab=0",
+ "/graph?g0.expr=&g0.tab=1",
+ },
+ {
+ "metric_name{label=\"value with spaces\"}",
+ "/graph?g0.expr=metric_name%7Blabel%3D%22value+with+spaces%22%7D&g0.tab=0",
+ "/graph?g0.expr=metric_name%7Blabel%3D%22value+with+spaces%22%7D&g0.tab=1",
+ },
}
func TestLink(t *testing.T) {
@@ -51,29 +71,158 @@ func TestLink(t *testing.T) {
}
func TestSanitizeLabelName(t *testing.T) {
- actual := SanitizeLabelName("fooClientLABEL")
- expected := "fooClientLABEL"
- require.Equal(t, expected, actual, "SanitizeLabelName failed for label (%s)", expected)
+ tests := []struct {
+ name string
+ input string
+ expected string
+ }{
+ {
+ name: "valid label name",
+ input: "fooClientLABEL",
+ expected: "fooClientLABEL",
+ },
+ {
+ name: "label with special characters",
+ input: "barClient.LABEL$$##",
+ expected: "barClient_LABEL____",
+ },
+ {
+ name: "label starting with digit",
+ input: "123label",
+ expected: "123label",
+ },
+ {
+ name: "label with dashes",
+ input: "my-label-name",
+ expected: "my_label_name",
+ },
+ {
+ name: "label with spaces",
+ input: "my label name",
+ expected: "my_label_name",
+ },
+ {
+ name: "label with mixed case and numbers",
+ input: "Test123Label456",
+ expected: "Test123Label456",
+ },
+ {
+ name: "label with unicode characters",
+ input: "test-ñ-ü-label",
+ expected: "test_____label",
+ },
+ {
+ name: "empty string",
+ input: "",
+ expected: "",
+ },
+ {
+ name: "only underscores",
+ input: "___",
+ expected: "___",
+ },
+ {
+ name: "label with colons",
+ input: "namespace:metric_name",
+ expected: "namespace_metric_name",
+ },
+ }
- actual = SanitizeLabelName("barClient.LABEL$$##")
- expected = "barClient_LABEL____"
- require.Equal(t, expected, actual, "SanitizeLabelName failed for label (%s)", expected)
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ actual := SanitizeLabelName(tt.input)
+ require.Equal(t, tt.expected, actual, "SanitizeLabelName(%q) = %q, want %q", tt.input, actual, tt.expected)
+ })
+ }
}
func TestSanitizeFullLabelName(t *testing.T) {
- actual := SanitizeFullLabelName("fooClientLABEL")
- expected := "fooClientLABEL"
- require.Equal(t, expected, actual, "SanitizeFullLabelName failed for label (%s)", expected)
+ tests := []struct {
+ name string
+ input string
+ expected string
+ }{
+ {
+ name: "valid label name",
+ input: "fooClientLABEL",
+ expected: "fooClientLABEL",
+ },
+ {
+ name: "label with special characters",
+ input: "barClient.LABEL$$##",
+ expected: "barClient_LABEL____",
+ },
+ {
+ name: "label starting with digit",
+ input: "0zerothClient1LABEL",
+ expected: "_zerothClient1LABEL",
+ },
+ {
+ name: "empty string",
+ input: "",
+ expected: "_",
+ },
+ {
+ name: "label starting with multiple digits",
+ input: "123abc",
+ expected: "_23abc",
+ },
+ {
+ name: "label with dashes",
+ input: "my-label-name",
+ expected: "my_label_name",
+ },
+ {
+ name: "label with spaces",
+ input: "my label name",
+ expected: "my_label_name",
+ },
+ {
+ name: "label with numbers in middle",
+ input: "Test123Label456",
+ expected: "Test123Label456",
+ },
+ {
+ name: "single underscore",
+ input: "_",
+ expected: "_",
+ },
+ {
+ name: "label starting with underscore",
+ input: "_validLabel",
+ expected: "_validLabel",
+ },
+ {
+ name: "label with colons",
+ input: "namespace:metric_name",
+ expected: "namespace_metric_name",
+ },
+ {
+ name: "label with unicode characters",
+ input: "test-ñ-ü-label",
+ expected: "test_____label",
+ },
+ {
+ name: "only digits",
+ input: "12345",
+ expected: "_2345",
+ },
+ {
+ name: "label with mixed invalid characters at start",
+ input: "!@#test",
+ expected: "___test",
+ },
+ {
+ name: "label with consecutive digits at start",
+ input: "0123test",
+ expected: "_123test",
+ },
+ }
- actual = SanitizeFullLabelName("barClient.LABEL$$##")
- expected = "barClient_LABEL____"
- require.Equal(t, expected, actual, "SanitizeFullLabelName failed for label (%s)", expected)
-
- actual = SanitizeFullLabelName("0zerothClient1LABEL")
- expected = "_zerothClient1LABEL"
- require.Equal(t, expected, actual, "SanitizeFullLabelName failed for label (%s)", expected)
-
- actual = SanitizeFullLabelName("")
- expected = "_"
- require.Equal(t, expected, actual, "SanitizeFullLabelName failed for the empty label")
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ actual := SanitizeFullLabelName(tt.input)
+ require.Equal(t, tt.expected, actual, "SanitizeFullLabelName(%q) = %q, want %q", tt.input, actual, tt.expected)
+ })
+ }
}
diff --git a/util/teststorage/appender.go b/util/teststorage/appender.go
index 058a09561c..f1d336c243 100644
--- a/util/teststorage/appender.go
+++ b/util/teststorage/appender.go
@@ -21,15 +21,20 @@ import (
"slices"
"strings"
"sync"
+ "testing"
+ "github.com/google/go-cmp/cmp"
"github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/testutil"
)
// Sample represents test, combined sample for mocking storage.AppenderV2.
@@ -65,13 +70,17 @@ func (s Sample) String() string {
// Print all value types on purpose, to catch bugs for appending multiple sample types at once.
h := ""
if s.H != nil {
- h = s.H.String()
+ h = " " + s.H.String()
}
fh := ""
if s.FH != nil {
- fh = s.FH.String()
+ fh = " " + s.FH.String()
}
- b.WriteString(fmt.Sprintf("%s %v%v%v st@%v t@%v\n", s.L.String(), s.V, h, fh, s.ST, s.T))
+ b.WriteString(fmt.Sprintf("%s %v%v%v st@%v t@%v", s.L.String(), s.V, h, fh, s.ST, s.T))
+ if len(s.ES) > 0 {
+ b.WriteString(fmt.Sprintf(" %v", s.ES))
+ }
+ b.WriteString("\n")
return b.String()
}
@@ -87,6 +96,88 @@ func (s Sample) Equals(other Sample) bool {
slices.EqualFunc(s.ES, other.ES, exemplar.Exemplar.Equals)
}
+// IsStale returns whether the sample represents a stale sample, according to
+// https://prometheus.io/docs/specs/native_histograms/#staleness-markers.
+func (s Sample) IsStale() bool {
+ switch {
+ case s.FH != nil:
+ return value.IsStaleNaN(s.FH.Sum)
+ case s.H != nil:
+ return value.IsStaleNaN(s.H.Sum)
+ default:
+ return value.IsStaleNaN(s.V)
+ }
+}
+
+var sampleComparer = cmp.Comparer(func(a, b Sample) bool {
+ return a.Equals(b)
+})
+
+// RequireEqual is a special require equal that correctly compare Prometheus structures.
+//
+// In comparison to testutil.RequireEqual, this function adds special logic for comparing []Samples.
+//
+// It also ignores ordering between consecutive stale samples to avoid false
+// negatives due to map iteration order in staleness tracking.
+func RequireEqual(t testing.TB, expected, got []Sample, msgAndArgs ...any) {
+ opts := []cmp.Option{sampleComparer}
+ expected = reorderExpectedForStaleness(expected, got)
+ testutil.RequireEqualWithOptions(t, expected, got, opts, msgAndArgs...)
+}
+
+// RequireNotEqual is the negation of RequireEqual.
+func RequireNotEqual(t testing.TB, expected, got []Sample, msgAndArgs ...any) {
+ t.Helper()
+
+ opts := []cmp.Option{cmp.Comparer(labels.Equal), sampleComparer}
+ expected = reorderExpectedForStaleness(expected, got)
+ if !cmp.Equal(expected, got, opts...) {
+ return
+ }
+ require.Fail(t, fmt.Sprintf("Equal, but expected not: \n"+
+ "a: %s\n"+
+ "b: %s", expected, got), msgAndArgs...)
+}
+
+func reorderExpectedForStaleness(expected, got []Sample) []Sample {
+ if len(expected) != len(got) || !includeStaleNaNs(expected) {
+ return expected
+ }
+ result := make([]Sample, len(expected))
+ copy(result, expected)
+
+ // Try to reorder only consecutive stale samples to avoid false negatives
+ // due to map iteration order in staleness tracking.
+ for i := range result {
+ if !result[i].IsStale() {
+ continue
+ }
+ if result[i].Equals(got[i]) {
+ continue
+ }
+ for j := i + 1; j < len(result); j++ {
+ if !result[j].IsStale() {
+ break
+ }
+ if result[j].Equals(got[i]) {
+ // Swap.
+ result[i], result[j] = result[j], result[i]
+ break
+ }
+ }
+ }
+ return result
+}
+
+func includeStaleNaNs(s []Sample) bool {
+ for _, e := range s {
+ if e.IsStale() {
+ return true
+ }
+ }
+ return false
+}
+
// Appendable is a storage.Appendable mock.
// It allows recording all samples that were added through the appender and injecting errors.
// Appendable will panic if more than one Appender is open.
@@ -94,6 +185,7 @@ type Appendable struct {
appendErrFn func(ls labels.Labels) error // If non-nil, inject appender error on every Append, AppendHistogram and ST zero calls.
appendExemplarsError error // If non-nil, inject exemplar error.
commitErr error // If non-nil, inject commit error.
+ skipRecording bool // If true, Appendable won't record samples, useful for benchmarks.
mtx sync.Mutex
openAppenders atomic.Int32 // Guard against multi-appender use.
@@ -104,7 +196,7 @@ type Appendable struct {
rolledbackSamples []Sample
// Optional chain (Appender will collect samples, then run next).
- next storage.Appendable
+ next compatAppendable
}
// NewAppendable returns mock Appendable.
@@ -112,8 +204,13 @@ func NewAppendable() *Appendable {
return &Appendable{}
}
-// Then chains another appender from the provided appendable for the Appender calls.
-func (a *Appendable) Then(appendable storage.Appendable) *Appendable {
+type compatAppendable interface {
+ storage.Appendable
+ storage.AppendableV2
+}
+
+// Then chains another appender from the provided Appendable for the Appender calls.
+func (a *Appendable) Then(appendable compatAppendable) *Appendable {
a.next = appendable
return a
}
@@ -126,10 +223,20 @@ func (a *Appendable) WithErrs(appendErrFn func(ls labels.Labels) error, appendEx
return a
}
+// SkipRecording enables or disables recording appended samples.
+// If skipped, Appendable allocs less, but Result*() methods will give always empty results. This is useful for benchmarking.
+func (a *Appendable) SkipRecording(skipRecording bool) *Appendable {
+ a.skipRecording = skipRecording
+ return a
+}
+
// PendingSamples returns pending samples (samples appended without commit).
func (a *Appendable) PendingSamples() []Sample {
a.mtx.Lock()
defer a.mtx.Unlock()
+ if len(a.pendingSamples) == 0 {
+ return nil
+ }
ret := make([]Sample, len(a.pendingSamples))
copy(ret, a.pendingSamples)
@@ -140,6 +247,9 @@ func (a *Appendable) PendingSamples() []Sample {
func (a *Appendable) ResultSamples() []Sample {
a.mtx.Lock()
defer a.mtx.Unlock()
+ if len(a.resultSamples) == 0 {
+ return nil
+ }
ret := make([]Sample, len(a.resultSamples))
copy(ret, a.resultSamples)
@@ -150,6 +260,9 @@ func (a *Appendable) ResultSamples() []Sample {
func (a *Appendable) RolledbackSamples() []Sample {
a.mtx.Lock()
defer a.mtx.Unlock()
+ if len(a.rolledbackSamples) == 0 {
+ return nil
+ }
ret := make([]Sample, len(a.rolledbackSamples))
copy(ret, a.rolledbackSamples)
@@ -205,28 +318,80 @@ func (a *Appendable) String() string {
var errClosedAppender = errors.New("appender was already committed/rolledback")
-type appender struct {
- err error
- next storage.Appender
+type baseAppender struct {
+ err error
- a *Appendable
+ nextTr storage.AppenderTransaction
+ a *Appendable
}
-func (a *appender) checkErr() error {
+func (a *baseAppender) checkErr() error {
a.a.mtx.Lock()
defer a.a.mtx.Unlock()
return a.err
}
+func (a *baseAppender) Commit() error {
+ if err := a.checkErr(); err != nil {
+ return err
+ }
+ defer a.a.openAppenders.Dec()
+
+ if a.a.commitErr != nil {
+ return a.a.commitErr
+ }
+
+ a.a.mtx.Lock()
+ if !a.a.skipRecording {
+ a.a.resultSamples = append(a.a.resultSamples, a.a.pendingSamples...)
+ a.a.pendingSamples = a.a.pendingSamples[:0]
+ }
+ a.err = errClosedAppender
+ a.a.mtx.Unlock()
+
+ if a.nextTr != nil {
+ return a.nextTr.Commit()
+ }
+ return nil
+}
+
+func (a *baseAppender) Rollback() error {
+ if err := a.checkErr(); err != nil {
+ return err
+ }
+ defer a.a.openAppenders.Dec()
+
+ a.a.mtx.Lock()
+ if !a.a.skipRecording {
+ a.a.rolledbackSamples = append(a.a.rolledbackSamples, a.a.pendingSamples...)
+ a.a.pendingSamples = a.a.pendingSamples[:0]
+ }
+ a.err = errClosedAppender
+ a.a.mtx.Unlock()
+
+ if a.nextTr != nil {
+ return a.nextTr.Rollback()
+ }
+ return nil
+}
+
+type appender struct {
+ baseAppender
+
+ next storage.Appender
+}
+
func (a *Appendable) Appender(ctx context.Context) storage.Appender {
- ret := &appender{a: a}
+ ret := &appender{baseAppender: baseAppender{a: a}}
if a.openAppenders.Inc() > 1 {
ret.err = errors.New("teststorage.Appendable.Appender() concurrent use is not supported; attempted opening new Appender() without Commit/Rollback of the previous one. Extend the implementation if concurrent mock is needed")
+ return ret
}
if a.next != nil {
- ret.next = a.next.Appender(ctx)
+ app := a.next.Appender(ctx)
+ ret.next, ret.nextTr = app, app
}
return ret
}
@@ -244,9 +409,11 @@ func (a *appender) Append(ref storage.SeriesRef, ls labels.Labels, t int64, v fl
}
}
- a.a.mtx.Lock()
- a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, V: v})
- a.a.mtx.Unlock()
+ if !a.a.skipRecording {
+ a.a.mtx.Lock()
+ a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, V: v})
+ a.a.mtx.Unlock()
+ }
if a.next != nil {
return a.next.Append(ref, ls, t, v)
@@ -263,8 +430,9 @@ func computeOrCheckRef(ref storage.SeriesRef, ls labels.Labels) (storage.SeriesR
}
if storage.SeriesRef(h) != ref {
- // Check for buggy ref while we at it.
- return 0, errors.New("teststorage.appender: found input ref not matching labels; potential bug in Appendable user")
+ // Check for buggy ref while we are at it. This only makes sense for cases without .Then*, because further appendable
+ // might have a different ref computation logic e.g. TSDB uses atomic increments.
+ return 0, errors.New("teststorage.appender: found input ref not matching labels; potential bug in Appendable usage")
}
return ref, nil
}
@@ -279,9 +447,11 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, ls labels.Labels, t in
}
}
- a.a.mtx.Lock()
- a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, H: h, FH: fh})
- a.a.mtx.Unlock()
+ if !a.a.skipRecording {
+ a.a.mtx.Lock()
+ a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, H: h, FH: fh})
+ a.a.mtx.Unlock()
+ }
if a.next != nil {
return a.next.AppendHistogram(ref, ls, t, h, fh)
@@ -298,20 +468,25 @@ func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exem
return 0, a.a.appendExemplarsError
}
- a.a.mtx.Lock()
- // NOTE(bwplotka): Eventually exemplar has to be attached to a series and soon
- // the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective
- // with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632
- i := len(a.a.pendingSamples) - 1
- for ; i >= 0; i-- { // Attach exemplars to the last matching sample.
- if ref == storage.SeriesRef(a.a.pendingSamples[i].L.Hash()) {
- a.a.pendingSamples[i].ES = append(a.a.pendingSamples[i].ES, e)
- break
+ if !a.a.skipRecording {
+ var appended bool
+
+ a.a.mtx.Lock()
+ // NOTE(bwplotka): Eventually exemplar has to be attached to a series and soon
+ // the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective
+ // with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632
+ i := len(a.a.pendingSamples) - 1
+ for ; i >= 0; i-- { // Attach exemplars to the last matching sample.
+ if labels.Equal(l, a.a.pendingSamples[i].L) {
+ a.a.pendingSamples[i].ES = append(a.a.pendingSamples[i].ES, e)
+ appended = true
+ break
+ }
+ }
+ a.a.mtx.Unlock()
+ if !appended {
+ return 0, fmt.Errorf("teststorage.appender: exemplar appender without series; ref %v; l %v; exemplar: %v", ref, l, e)
}
- }
- a.a.mtx.Unlock()
- if i < 0 {
- return 0, fmt.Errorf("teststorage.appender: exemplar appender without series; ref %v; l %v; exemplar: %v", ref, l, e)
}
if a.next != nil {
@@ -336,20 +511,25 @@ func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m meta
return 0, err
}
- a.a.mtx.Lock()
- // NOTE(bwplotka): Eventually metadata has to be attached to a series and soon
- // the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective
- // with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632
- i := len(a.a.pendingSamples) - 1
- for ; i >= 0; i-- { // Attach metadata to the last matching sample.
- if ref == storage.SeriesRef(a.a.pendingSamples[i].L.Hash()) {
- a.a.pendingSamples[i].M = m
- break
+ if !a.a.skipRecording {
+ var updated bool
+
+ a.a.mtx.Lock()
+ // NOTE(bwplotka): Eventually metadata has to be attached to a series and soon
+ // the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective
+ // with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632
+ i := len(a.a.pendingSamples) - 1
+ for ; i >= 0; i-- { // Attach metadata to the last matching sample.
+ if labels.Equal(l, a.a.pendingSamples[i].L) {
+ a.a.pendingSamples[i].M = m
+ updated = true
+ break
+ }
+ }
+ a.a.mtx.Unlock()
+ if !updated {
+ return 0, fmt.Errorf("teststorage.appender: metadata update without series; ref %v; l %v; m: %v", ref, l, m)
}
- }
- a.a.mtx.Unlock()
- if i < 0 {
- return 0, fmt.Errorf("teststorage.appender: metadata update without series; ref %v; l %v; m: %v", ref, l, m)
}
if a.next != nil {
@@ -358,42 +538,79 @@ func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m meta
return computeOrCheckRef(ref, l)
}
-func (a *appender) Commit() error {
- if err := a.checkErr(); err != nil {
- return err
- }
- defer a.a.openAppenders.Dec()
+type appenderV2 struct {
+ baseAppender
- if a.a.commitErr != nil {
- return a.a.commitErr
- }
-
- a.a.mtx.Lock()
- a.a.resultSamples = append(a.a.resultSamples, a.a.pendingSamples...)
- a.a.pendingSamples = a.a.pendingSamples[:0]
- a.err = errClosedAppender
- a.a.mtx.Unlock()
-
- if a.a.next != nil {
- return a.next.Commit()
- }
- return nil
+ next storage.AppenderV2
}
-func (a *appender) Rollback() error {
- if err := a.checkErr(); err != nil {
- return err
+func (a *Appendable) AppenderV2(ctx context.Context) storage.AppenderV2 {
+ ret := &appenderV2{baseAppender: baseAppender{a: a}}
+ if a.openAppenders.Inc() > 1 {
+ ret.err = errors.New("teststorage.Appendable.AppenderV2() concurrent use is not supported; attempted opening new AppenderV2() without Commit/Rollback of the previous one. Extend the implementation if concurrent mock is needed")
+ return ret
}
- defer a.a.openAppenders.Dec()
-
- a.a.mtx.Lock()
- a.a.rolledbackSamples = append(a.a.rolledbackSamples, a.a.pendingSamples...)
- a.a.pendingSamples = a.a.pendingSamples[:0]
- a.err = errClosedAppender
- a.a.mtx.Unlock()
if a.next != nil {
- return a.next.Rollback()
+ app := a.next.AppenderV2(ctx)
+ ret.next, ret.nextTr = app, app
}
- return nil
+ return ret
+}
+
+func (a *appenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (_ storage.SeriesRef, err error) {
+ if err := a.checkErr(); err != nil {
+ return 0, err
+ }
+
+ if a.a.appendErrFn != nil {
+ if err := a.a.appendErrFn(ls); err != nil {
+ return 0, err
+ }
+ }
+
+ var partialErr error
+ if !a.a.skipRecording {
+ var es []exemplar.Exemplar
+
+ if len(opts.Exemplars) > 0 {
+ if a.a.appendExemplarsError != nil {
+ var exErrs []error
+ for range opts.Exemplars {
+ exErrs = append(exErrs, a.a.appendExemplarsError)
+ }
+ if len(exErrs) > 0 {
+ partialErr = &storage.AppendPartialError{ExemplarErrors: exErrs}
+ }
+ } else {
+ // As per AppenderV2 interface, opts.Exemplar slice is unsafe for reuse.
+ es = make([]exemplar.Exemplar, len(opts.Exemplars))
+ copy(es, opts.Exemplars)
+ }
+ }
+
+ a.a.mtx.Lock()
+ a.a.pendingSamples = append(a.a.pendingSamples, Sample{
+ MF: opts.MetricFamilyName,
+ M: opts.Metadata,
+ L: ls,
+ ST: st, T: t,
+ V: v, H: h, FH: fh,
+ ES: es,
+ })
+ a.a.mtx.Unlock()
+ }
+
+ if a.next != nil {
+ ref, err = a.next.Append(ref, ls, st, t, v, h, fh, opts)
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ ref, err = computeOrCheckRef(ref, ls)
+ if err != nil {
+ return ref, err
+ }
+ }
+ return ref, partialErr
}
diff --git a/util/teststorage/appender_test.go b/util/teststorage/appender_test.go
index 8c2a825c3a..41260ba43f 100644
--- a/util/teststorage/appender_test.go
+++ b/util/teststorage/appender_test.go
@@ -15,80 +15,220 @@ package teststorage
import (
"errors"
- "fmt"
+ "math"
"testing"
- "github.com/google/go-cmp/cmp"
+ "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/model/value"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/testutil"
)
-// TestSample_RequireEqual ensures standard testutil.RequireEqual is enough for comparisons.
-// This is thanks to the fact metadata has now Equals method.
+func testAppendableV1(t *testing.T, appTest *Appendable, a storage.Appendable) {
+ for _, commit := range []bool{true, false} {
+ appTest.ResultReset()
+
+ app := a.Appender(t.Context())
+
+ ref1, err := app.Append(0, labels.FromStrings(model.MetricNameLabel, "test_metric1", "app", "v1"), 1, 2)
+ require.NoError(t, err)
+
+ h := tsdbutil.GenerateTestHistogram(0)
+ _, err = app.AppendHistogram(0, labels.FromStrings(model.MetricNameLabel, "test_metric2", "app", "v1"), 2, h, nil)
+ require.NoError(t, err)
+
+ fh := tsdbutil.GenerateTestFloatHistogram(0)
+ _, err = app.AppendHistogram(0, labels.FromStrings(model.MetricNameLabel, "test_metric3", "app", "v1"), 3, nil, fh)
+ require.NoError(t, err)
+
+ // Update meta of first series.
+ m1 := metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}
+ _, err = app.UpdateMetadata(ref1, labels.FromStrings(model.MetricNameLabel, "test_metric1", "app", "v1"), m1)
+ require.NoError(t, err)
+
+ // Add exemplars to the first series.
+ e1 := exemplar.Exemplar{Labels: labels.FromStrings(model.MetricNameLabel, "yolo"), HasTs: true, Ts: 1}
+ _, err = app.AppendExemplar(ref1, labels.FromStrings(model.MetricNameLabel, "test_metric1", "app", "v1"), e1)
+ require.NoError(t, err)
+
+ exp := []Sample{
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric1", "app", "v1"), M: m1, T: 1, V: 2, ES: []exemplar.Exemplar{e1}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric2", "app", "v1"), T: 2, H: h},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric3", "app", "v1"), T: 3, FH: fh},
+ }
+ testutil.RequireEqual(t, exp, appTest.PendingSamples())
+ require.Nil(t, appTest.ResultSamples())
+ require.Nil(t, appTest.RolledbackSamples())
+
+ if commit {
+ require.NoError(t, app.Commit())
+ require.Nil(t, appTest.PendingSamples())
+ testutil.RequireEqual(t, exp, appTest.ResultSamples())
+ require.Nil(t, appTest.RolledbackSamples())
+ break
+ }
+
+ require.NoError(t, app.Rollback())
+ require.Nil(t, appTest.PendingSamples())
+ require.Nil(t, appTest.ResultSamples())
+ testutil.RequireEqual(t, exp, appTest.RolledbackSamples())
+ }
+}
+
+func testAppendableV2(t *testing.T, appTest *Appendable, a storage.AppendableV2) {
+ for _, commit := range []bool{true, false} {
+ appTest.ResultReset()
+
+ app := a.AppenderV2(t.Context())
+
+ m1 := metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}
+ e1 := exemplar.Exemplar{Labels: labels.FromStrings(model.MetricNameLabel, "yolo"), HasTs: true, Ts: 1}
+ _, err := app.Append(0, labels.FromStrings(model.MetricNameLabel, "test_metric1", "app", "v2"), -1, 1, 2, nil, nil, storage.AOptions{
+ MetricFamilyName: "test_metric1",
+ Metadata: m1,
+ Exemplars: []exemplar.Exemplar{e1},
+ })
+ require.NoError(t, err)
+
+ h := tsdbutil.GenerateTestHistogram(0)
+ _, err = app.Append(0, labels.FromStrings(model.MetricNameLabel, "test_metric2", "app", "v2"), -2, 2, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ fh := tsdbutil.GenerateTestFloatHistogram(0)
+ _, err = app.Append(0, labels.FromStrings(model.MetricNameLabel, "test_metric3", "app", "v2"), -3, 3, 0, nil, fh, storage.AOptions{})
+ require.NoError(t, err)
+
+ exp := []Sample{
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric1", "app", "v2"), MF: "test_metric1", M: m1, ST: -1, T: 1, V: 2, ES: []exemplar.Exemplar{e1}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric2", "app", "v2"), ST: -2, T: 2, H: h},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric3", "app", "v2"), ST: -3, T: 3, FH: fh},
+ }
+ testutil.RequireEqual(t, exp, appTest.PendingSamples())
+ require.Nil(t, appTest.ResultSamples())
+ require.Nil(t, appTest.RolledbackSamples())
+
+ if commit {
+ require.NoError(t, app.Commit())
+ require.Nil(t, appTest.PendingSamples())
+ testutil.RequireEqual(t, exp, appTest.ResultSamples())
+ require.Nil(t, appTest.RolledbackSamples())
+ break
+ }
+
+ require.NoError(t, app.Rollback())
+ require.Nil(t, appTest.PendingSamples())
+ require.Nil(t, appTest.ResultSamples())
+ testutil.RequireEqual(t, exp, appTest.RolledbackSamples())
+ }
+}
+
+func TestAppendable(t *testing.T) {
+ appTest := NewAppendable()
+ testAppendableV1(t, appTest, appTest)
+ testAppendableV2(t, appTest, appTest)
+}
+
+func TestAppendable_Then(t *testing.T) {
+ nextAppTest := NewAppendable()
+ app := NewAppendable().Then(nextAppTest)
+
+ // Ensure next mock record all the appends when appending to app.
+ testAppendableV1(t, nextAppTest, app)
+ // Ensure next mock record all the appends when appending to app.
+ testAppendableV2(t, nextAppTest, app)
+}
+
+// TestSample_RequireEqual.
func TestSample_RequireEqual(t *testing.T) {
a := []Sample{
{},
- {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
- {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123},
- {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123},
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings(model.MetricNameLabel, "yolo")}}},
}
- testutil.RequireEqual(t, a, a)
+ RequireEqual(t, a, a)
b1 := []Sample{
{},
- {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
- {L: labels.FromStrings("__name__", "test_metric2_diff", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123}, // test_metric2_diff is different.
- {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric2_diff", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123}, // test_metric2_diff is different.
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings(model.MetricNameLabel, "yolo")}}},
}
- requireNotEqual(t, a, b1)
+ RequireNotEqual(t, a, b1)
b2 := []Sample{
{},
- {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
- {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123},
- {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo2")}}}, // exemplar is different.
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123},
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings(model.MetricNameLabel, "yolo2")}}}, // exemplar is different.
}
- requireNotEqual(t, a, b2)
+ RequireNotEqual(t, a, b2)
b3 := []Sample{
{},
- {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
- {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123, T: 123}, // Timestamp is different.
- {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123, T: 123}, // Timestamp is different.
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings(model.MetricNameLabel, "yolo")}}},
}
- requireNotEqual(t, a, b3)
+ RequireNotEqual(t, a, b3)
b4 := []Sample{
{},
- {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
- {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 456.456}, // Value is different.
- {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 456.456}, // Value is different.
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings(model.MetricNameLabel, "yolo")}}},
}
- requireNotEqual(t, a, b4)
+ RequireNotEqual(t, a, b4)
b5 := []Sample{
{},
- {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter2", Unit: "metric", Help: "some help text"}}, // Different type.
- {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123},
- {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric_total"), M: metadata.Metadata{Type: "counter2", Unit: "metric", Help: "some help text"}}, // Different type.
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123},
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings(model.MetricNameLabel, "yolo")}}},
}
- requireNotEqual(t, a, b5)
-}
+ RequireNotEqual(t, a, b5)
-// TODO(bwplotka): While this mimick testutil.RequireEqual just making it negative, this does not literally test
-// testutil.RequireEqual. Either build test suita that mocks `testing.TB` or get rid of testutil.RequireEqual somehow.
-func requireNotEqual(t testing.TB, a, b any) {
- t.Helper()
- if !cmp.Equal(a, b, cmp.Comparer(labels.Equal)) {
- return
+ // NaN comparison.
+ a = []Sample{
+ {},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: math.Float64frombits(value.StaleNaN)},
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings(model.MetricNameLabel, "yolo")}}},
}
- require.Fail(t, fmt.Sprintf("Equal, but expected not: \n"+
- "a: %s\n"+
- "b: %s", a, b))
+ RequireEqual(t, a, a)
+
+ // NaN comparison with different order.
+ a = []Sample{
+ {},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric10", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: math.Float64frombits(value.StaleNaN)},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: math.Float64frombits(value.StaleNaN)},
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings(model.MetricNameLabel, "yolo")}}},
+ }
+ b6 := []Sample{
+ {},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: math.Float64frombits(value.StaleNaN)},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric10", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: math.Float64frombits(value.StaleNaN)},
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings(model.MetricNameLabel, "yolo")}}},
+ }
+ RequireEqual(t, a, b6)
+
+ // Not equal with NaNs.
+ b7 := []Sample{
+ {},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric10", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: math.Float64frombits(value.StaleNaN)},
+ {L: labels.FromStrings(model.MetricNameLabel, "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge2", Unit: "", Help: "other help text"}, V: math.Float64frombits(value.StaleNaN)}, // metadata different
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings(model.MetricNameLabel, "yolo")}}},
+ }
+ RequireNotEqual(t, a, b7)
}
func TestConcurrentAppender_ReturnsErrAppender(t *testing.T) {
@@ -129,3 +269,145 @@ func TestConcurrentAppender_ReturnsErrAppender(t *testing.T) {
require.Error(t, app.Commit())
require.Error(t, app.Rollback())
}
+
+func TestConcurrentAppenderV2_ReturnsErrAppender(t *testing.T) {
+ a := NewAppendable()
+
+ // Non-concurrent multiple use if fine.
+ app := a.AppenderV2(t.Context())
+ require.Equal(t, int32(1), a.openAppenders.Load())
+ require.NoError(t, app.Commit())
+ // Repeated commit fails.
+ require.Error(t, app.Commit())
+
+ app = a.AppenderV2(t.Context())
+ require.NoError(t, app.Rollback())
+ // Commit after rollback fails.
+ require.Error(t, app.Commit())
+
+ a.WithErrs(
+ nil,
+ nil,
+ errors.New("commit err"),
+ )
+ app = a.AppenderV2(t.Context())
+ require.Error(t, app.Commit())
+
+ a.WithErrs(nil, nil, nil)
+ app = a.AppenderV2(t.Context())
+ require.NoError(t, app.Commit())
+ require.Equal(t, int32(0), a.openAppenders.Load())
+
+ // Concurrent use should return appender that errors.
+ _ = a.AppenderV2(t.Context())
+ app = a.AppenderV2(t.Context())
+ _, err := app.Append(0, labels.EmptyLabels(), 0, 0, 0, nil, nil, storage.AOptions{})
+ require.Error(t, err)
+ require.Error(t, app.Commit())
+ require.Error(t, app.Rollback())
+}
+
+func TestReorderExpectedForStaleness(t *testing.T) {
+ testcases := []struct {
+ name string
+ inExpected []Sample
+ inGot []Sample
+ expected []Sample
+ }{
+ {
+ name: "no staleness markers",
+ inExpected: []Sample{
+ {L: labels.FromStrings("a", "1"), T: 1, V: 1},
+ {L: labels.FromStrings("a", "2"), T: 1, V: 2},
+ },
+ inGot: []Sample{
+ {L: labels.FromStrings("a", "2"), T: 1, V: 2},
+ {L: labels.FromStrings("a", "1"), T: 1, V: 1},
+ },
+ },
+ {
+ name: "with staleness markers",
+ inExpected: []Sample{
+ {L: labels.FromStrings("a", "1"), T: 1, V: 1},
+ {L: labels.FromStrings("a", "2"), T: 2, V: 2},
+ {L: labels.FromStrings("a", "3"), T: 3, V: math.Float64frombits(value.StaleNaN)},
+ {L: labels.FromStrings("a", "4"), T: 4, V: math.Float64frombits(value.StaleNaN)},
+ },
+ inGot: []Sample{
+ {L: labels.FromStrings("a", "1"), T: 1, V: 1},
+ {L: labels.FromStrings("a", "2"), T: 2, V: 2},
+ {L: labels.FromStrings("a", "3"), T: 3, V: math.Float64frombits(value.StaleNaN)},
+ {L: labels.FromStrings("a", "4"), T: 4, V: math.Float64frombits(value.StaleNaN)},
+ },
+ },
+ {
+ name: "with staleness markers wrong order",
+ inExpected: []Sample{
+ {L: labels.FromStrings("a", "1"), T: 1, V: 1},
+ {L: labels.FromStrings("a", "2"), T: 2, V: 2},
+ {L: labels.FromStrings("a", "3"), T: 3, V: math.Float64frombits(value.StaleNaN)},
+ {L: labels.FromStrings("a", "4"), T: 4, V: math.Float64frombits(value.StaleNaN)},
+ },
+ inGot: []Sample{
+ {L: labels.FromStrings("a", "2"), T: 2, V: 2},
+ {L: labels.FromStrings("a", "1"), T: 1, V: 1},
+ {L: labels.FromStrings("a", "4"), T: 4, V: math.Float64frombits(value.StaleNaN)},
+ {L: labels.FromStrings("a", "3"), T: 3, V: math.Float64frombits(value.StaleNaN)},
+ },
+ expected: []Sample{
+ {L: labels.FromStrings("a", "1"), T: 1, V: 1},
+ {L: labels.FromStrings("a", "2"), T: 2, V: 2},
+ {L: labels.FromStrings("a", "4"), T: 4, V: math.Float64frombits(value.StaleNaN)},
+ {L: labels.FromStrings("a", "3"), T: 3, V: math.Float64frombits(value.StaleNaN)},
+ },
+ },
+ {
+ name: "with staleness markers wrong order but not consecutive",
+ inExpected: []Sample{
+ {L: labels.FromStrings("a", "1"), T: 1, V: 1},
+ {L: labels.FromStrings("a", "3"), T: 3, V: math.Float64frombits(value.StaleNaN)},
+ {L: labels.FromStrings("a", "2"), T: 2, V: 2},
+ {L: labels.FromStrings("a", "4"), T: 4, V: math.Float64frombits(value.StaleNaN)},
+ },
+ inGot: []Sample{
+ {L: labels.FromStrings("a", "2"), T: 2, V: 2},
+ {L: labels.FromStrings("a", "1"), T: 1, V: 1},
+ {L: labels.FromStrings("a", "4"), T: 4, V: math.Float64frombits(value.StaleNaN)},
+ {L: labels.FromStrings("a", "3"), T: 3, V: math.Float64frombits(value.StaleNaN)},
+ },
+ expected: []Sample{
+ {L: labels.FromStrings("a", "1"), T: 1, V: 1},
+ {L: labels.FromStrings("a", "3"), T: 3, V: math.Float64frombits(value.StaleNaN)},
+ {L: labels.FromStrings("a", "2"), T: 2, V: 2},
+ {L: labels.FromStrings("a", "4"), T: 4, V: math.Float64frombits(value.StaleNaN)},
+ },
+ },
+ }
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ if tc.expected == nil {
+ tc.expected = tc.inExpected
+ }
+ RequireEqual(t, tc.expected, reorderExpectedForStaleness(tc.inExpected, tc.inGot))
+ })
+ }
+}
+
+func TestSampleIsStale(t *testing.T) {
+ s1 := Sample{V: 1}
+ require.False(t, s1.IsStale())
+ s2 := Sample{V: math.Float64frombits(value.StaleNaN)}
+ require.True(t, s2.IsStale())
+ h := tsdbutil.GenerateTestHistogram(0)
+ h1 := Sample{V: math.Float64frombits(value.StaleNaN), H: h}
+ require.False(t, h1.IsStale()) // Histogram takes precedence over V.
+ h.Sum = math.Float64frombits(value.StaleNaN)
+ h2 := Sample{V: 1, H: h}
+ require.True(t, h2.IsStale())
+ fh := tsdbutil.GenerateTestFloatHistogram(0)
+ fh1 := Sample{V: math.Float64frombits(value.StaleNaN), H: h, FH: fh}
+ require.False(t, fh1.IsStale()) // FloatHistogram takes precedence over all.
+ fh.Sum = math.Float64frombits(value.StaleNaN)
+ fh2 := Sample{V: 1, H: tsdbutil.GenerateTestHistogram(1), FH: fh}
+ require.True(t, fh2.IsStale())
+}
diff --git a/util/teststorage/storage.go b/util/teststorage/storage.go
index 17efdda77d..65c2f87e21 100644
--- a/util/teststorage/storage.go
+++ b/util/teststorage/storage.go
@@ -16,66 +16,66 @@ package teststorage
import (
"fmt"
"os"
+ "testing"
"time"
- "github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
- "github.com/prometheus/prometheus/model/exemplar"
- "github.com/prometheus/prometheus/model/labels"
- "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
- "github.com/prometheus/prometheus/util/testutil"
)
+type Option func(opt *tsdb.Options)
+
// New returns a new TestStorage for testing purposes
// that removes all associated files on closing.
-func New(t testutil.T, outOfOrderTimeWindow ...int64) *TestStorage {
- stor, err := NewWithError(outOfOrderTimeWindow...)
+//
+// Caller does not need to close the TestStorage after use, it's deferred via t.Cleanup.
+func New(t testing.TB, o ...Option) *TestStorage {
+ s, err := NewWithError(o...)
require.NoError(t, err)
- return stor
+
+ t.Cleanup(func() {
+ _ = s.Close() // Ignore errors, as it could be a double close.
+ })
+ return s
}
// NewWithError returns a new TestStorage for user facing tests, which reports
// errors directly.
-func NewWithError(outOfOrderTimeWindow ...int64) (*TestStorage, error) {
- dir, err := os.MkdirTemp("", "test_storage")
- if err != nil {
- return nil, fmt.Errorf("opening test directory: %w", err)
- }
-
+//
+// It's a caller responsibility to close the TestStorage after use.
+func NewWithError(o ...Option) (*TestStorage, error) {
// Tests just load data for a series sequentially. Thus we
// need a long appendable window.
opts := tsdb.DefaultOptions()
opts.MinBlockDuration = int64(24 * time.Hour / time.Millisecond)
opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond)
opts.RetentionDuration = 0
+ opts.OutOfOrderTimeWindow = 0
- // Set OutOfOrderTimeWindow if provided, otherwise use default (0)
- if len(outOfOrderTimeWindow) > 0 {
- opts.OutOfOrderTimeWindow = outOfOrderTimeWindow[0]
- } else {
- opts.OutOfOrderTimeWindow = 0 // Default value is zero
+ // Enable exemplars storage by default.
+ opts.EnableExemplarStorage = true
+ opts.MaxExemplars = 1e5
+
+ for _, opt := range o {
+ opt(opts)
+ }
+
+ dir, err := os.MkdirTemp("", "test_storage")
+ if err != nil {
+ return nil, fmt.Errorf("opening test directory: %w", err)
}
db, err := tsdb.Open(dir, nil, nil, opts, tsdb.NewDBStats())
if err != nil {
return nil, fmt.Errorf("opening test storage: %w", err)
}
- reg := prometheus.NewRegistry()
- eMetrics := tsdb.NewExemplarMetrics(reg)
-
- es, err := tsdb.NewCircularExemplarStorage(10, eMetrics, opts.OutOfOrderTimeWindow)
- if err != nil {
- return nil, fmt.Errorf("opening test exemplar storage: %w", err)
- }
- return &TestStorage{DB: db, exemplarStorage: es, dir: dir}, nil
+ return &TestStorage{DB: db, dir: dir}, nil
}
type TestStorage struct {
*tsdb.DB
- exemplarStorage tsdb.ExemplarStorage
- dir string
+ dir string
}
func (s TestStorage) Close() error {
@@ -84,15 +84,3 @@ func (s TestStorage) Close() error {
}
return os.RemoveAll(s.dir)
}
-
-func (s TestStorage) ExemplarAppender() storage.ExemplarAppender {
- return s
-}
-
-func (s TestStorage) ExemplarQueryable() storage.ExemplarQueryable {
- return s.exemplarStorage
-}
-
-func (s TestStorage) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
- return ref, s.exemplarStorage.AddExemplar(l, e)
-}
diff --git a/util/testutil/directory.go b/util/testutil/directory.go
index 706007d322..b65a3f4fa0 100644
--- a/util/testutil/directory.go
+++ b/util/testutil/directory.go
@@ -60,21 +60,12 @@ type (
// their interactions.
temporaryDirectory struct {
path string
- tester T
+ tester testing.TB
}
callbackCloser struct {
fn func()
}
-
- // T implements the needed methods of testing.TB so that we do not need
- // to actually import testing (which has the side effect of adding all
- // the test flags, which we do not want in non-test binaries even if
- // they make use of these utilities for some reason).
- T interface {
- Errorf(format string, args ...any)
- FailNow()
- }
)
func (nilCloser) Close() {
@@ -113,7 +104,7 @@ func (t temporaryDirectory) Path() string {
// NewTemporaryDirectory creates a new temporary directory for transient POSIX
// activities.
-func NewTemporaryDirectory(name string, t T) (handler TemporaryDirectory) {
+func NewTemporaryDirectory(name string, t testing.TB) (handler TemporaryDirectory) {
var (
directory string
err error
diff --git a/web/api/testhelpers/api.go b/web/api/testhelpers/api.go
new file mode 100644
index 0000000000..07d7003b5c
--- /dev/null
+++ b/web/api/testhelpers/api.go
@@ -0,0 +1,244 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package testhelpers provides utilities for testing the Prometheus HTTP API.
+// This file contains helper functions for creating test API instances and managing test lifecycles.
+package testhelpers
+
+import (
+ "context"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/promslog"
+
+ "github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/promql"
+ "github.com/prometheus/prometheus/promql/promqltest"
+ "github.com/prometheus/prometheus/rules"
+ "github.com/prometheus/prometheus/scrape"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb"
+ "github.com/prometheus/prometheus/util/notifications"
+)
+
+// RulesRetriever provides a list of active rules and alerts.
+type RulesRetriever interface {
+ RuleGroups() []*rules.Group
+ AlertingRules() []*rules.AlertingRule
+}
+
+// TargetRetriever provides the list of active/dropped targets to scrape or not.
+type TargetRetriever interface {
+ TargetsActive() map[string][]*scrape.Target
+ TargetsDropped() map[string][]*scrape.Target
+ TargetsDroppedCounts() map[string]int
+ ScrapePoolConfig(string) (*config.ScrapeConfig, error)
+}
+
+// ScrapePoolsRetriever provide the list of all scrape pools.
+type ScrapePoolsRetriever interface {
+ ScrapePools() []string
+}
+
+// AlertmanagerRetriever provides a list of all/dropped AlertManager URLs.
+type AlertmanagerRetriever interface {
+ Alertmanagers() []*url.URL
+ DroppedAlertmanagers() []*url.URL
+}
+
+// TSDBAdminStats provides TSDB admin statistics.
+type TSDBAdminStats interface {
+ CleanTombstones() error
+ Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error
+ Snapshot(dir string, withHead bool) error
+ Stats(statsByLabelName string, limit int) (*tsdb.Stats, error)
+ WALReplayStatus() (tsdb.WALReplayStatus, error)
+ BlockMetas() ([]tsdb.BlockMeta, error)
+}
+
+// APIConfig holds configuration for creating a test API instance.
+type APIConfig struct {
+ // Core dependencies.
+ QueryEngine *LazyLoader[promql.QueryEngine]
+ Queryable *LazyLoader[storage.SampleAndChunkQueryable]
+ ExemplarQueryable *LazyLoader[storage.ExemplarQueryable]
+
+ // Retrievers.
+ RulesRetriever *LazyLoader[RulesRetriever]
+ TargetRetriever *LazyLoader[TargetRetriever]
+ ScrapePoolsRetriever *LazyLoader[ScrapePoolsRetriever]
+ AlertmanagerRetriever *LazyLoader[AlertmanagerRetriever]
+
+ // Admin.
+ TSDBAdmin *LazyLoader[TSDBAdminStats]
+ DBDir string
+
+ // Optional overrides.
+ Config func() config.Config
+ FlagsMap map[string]string
+ Now func() time.Time
+}
+
+// APIWrapper wraps the API and provides a handler for testing.
+type APIWrapper struct {
+ Handler http.Handler
+}
+
+// PrometheusVersion contains build information about Prometheus.
+type PrometheusVersion struct {
+ Version string `json:"version"`
+ Revision string `json:"revision"`
+ Branch string `json:"branch"`
+ BuildUser string `json:"buildUser"`
+ BuildDate string `json:"buildDate"`
+ GoVersion string `json:"goVersion"`
+}
+
+// RuntimeInfo contains runtime information about Prometheus.
+type RuntimeInfo struct {
+ StartTime time.Time `json:"startTime"`
+ CWD string `json:"CWD"`
+ Hostname string `json:"hostname"`
+ ServerTime time.Time `json:"serverTime"`
+ ReloadConfigSuccess bool `json:"reloadConfigSuccess"`
+ LastConfigTime time.Time `json:"lastConfigTime"`
+ CorruptionCount int64 `json:"corruptionCount"`
+ GoroutineCount int `json:"goroutineCount"`
+ GOMAXPROCS int `json:"GOMAXPROCS"`
+ GOMEMLIMIT int64 `json:"GOMEMLIMIT"`
+ GOGC string `json:"GOGC"`
+ GODEBUG string `json:"GODEBUG"`
+ StorageRetention string `json:"storageRetention"`
+}
+
+// NewAPIParams holds all the parameters needed to create a v1.API instance.
+type NewAPIParams struct {
+ QueryEngine promql.QueryEngine
+ Queryable storage.SampleAndChunkQueryable
+ ExemplarQueryable storage.ExemplarQueryable
+ ScrapePoolsRetriever func(context.Context) ScrapePoolsRetriever
+ TargetRetriever func(context.Context) TargetRetriever
+ AlertmanagerRetriever func(context.Context) AlertmanagerRetriever
+ ConfigFunc func() config.Config
+ FlagsMap map[string]string
+ ReadyFunc func(http.HandlerFunc) http.HandlerFunc
+ TSDBAdmin TSDBAdminStats
+ DBDir string
+ Logger *slog.Logger
+ RulesRetriever func(context.Context) RulesRetriever
+ RuntimeInfoFunc func() (RuntimeInfo, error)
+ BuildInfo *PrometheusVersion
+ NotificationsGetter func() []notifications.Notification
+ NotificationsSub func() (<-chan notifications.Notification, func(), bool)
+ Gatherer prometheus.Gatherer
+ Registerer prometheus.Registerer
+}
+
+// PrepareAPI creates a NewAPIParams with sensible defaults for testing.
+func PrepareAPI(t *testing.T, cfg APIConfig) NewAPIParams {
+ t.Helper()
+
+ // Create defaults for unset lazy loaders.
+ if cfg.QueryEngine == nil {
+ cfg.QueryEngine = NewLazyLoader(func() promql.QueryEngine {
+ return promqltest.NewTestEngineWithOpts(t, promql.EngineOpts{
+ Logger: nil,
+ Reg: nil,
+ MaxSamples: 10000,
+ Timeout: 100 * time.Second,
+ NoStepSubqueryIntervalFn: func(int64) int64 { return 60 * 1000 },
+ EnableAtModifier: true,
+ EnableNegativeOffset: true,
+ EnablePerStepStats: true,
+ })
+ })
+ }
+
+ if cfg.Queryable == nil {
+ cfg.Queryable = NewLazyLoader(NewEmptyQueryable)
+ }
+
+ if cfg.ExemplarQueryable == nil {
+ cfg.ExemplarQueryable = NewLazyLoader(NewEmptyExemplarQueryable)
+ }
+
+ if cfg.RulesRetriever == nil {
+ cfg.RulesRetriever = NewLazyLoader(func() RulesRetriever {
+ return NewEmptyRulesRetriever()
+ })
+ }
+
+ if cfg.TargetRetriever == nil {
+ cfg.TargetRetriever = NewLazyLoader(func() TargetRetriever {
+ return NewEmptyTargetRetriever()
+ })
+ }
+
+ if cfg.ScrapePoolsRetriever == nil {
+ cfg.ScrapePoolsRetriever = NewLazyLoader(func() ScrapePoolsRetriever {
+ return NewEmptyScrapePoolsRetriever()
+ })
+ }
+
+ if cfg.AlertmanagerRetriever == nil {
+ cfg.AlertmanagerRetriever = NewLazyLoader(func() AlertmanagerRetriever {
+ return NewEmptyAlertmanagerRetriever()
+ })
+ }
+
+ if cfg.TSDBAdmin == nil {
+ cfg.TSDBAdmin = NewLazyLoader(func() TSDBAdminStats {
+ return NewEmptyTSDBAdminStats()
+ })
+ }
+
+ if cfg.Config == nil {
+ cfg.Config = func() config.Config { return config.Config{} }
+ }
+
+ if cfg.FlagsMap == nil {
+ cfg.FlagsMap = map[string]string{}
+ }
+
+ if cfg.DBDir == "" {
+ cfg.DBDir = t.TempDir()
+ }
+
+ return NewAPIParams{
+ QueryEngine: cfg.QueryEngine.Get(),
+ Queryable: cfg.Queryable.Get(),
+ ExemplarQueryable: cfg.ExemplarQueryable.Get(),
+ ScrapePoolsRetriever: func(context.Context) ScrapePoolsRetriever { return cfg.ScrapePoolsRetriever.Get() },
+ TargetRetriever: func(context.Context) TargetRetriever { return cfg.TargetRetriever.Get() },
+ AlertmanagerRetriever: func(context.Context) AlertmanagerRetriever { return cfg.AlertmanagerRetriever.Get() },
+ ConfigFunc: cfg.Config,
+ FlagsMap: cfg.FlagsMap,
+ ReadyFunc: func(f http.HandlerFunc) http.HandlerFunc { return f },
+ TSDBAdmin: cfg.TSDBAdmin.Get(),
+ DBDir: cfg.DBDir,
+ Logger: promslog.NewNopLogger(),
+ RulesRetriever: func(context.Context) RulesRetriever { return cfg.RulesRetriever.Get() },
+ RuntimeInfoFunc: func() (RuntimeInfo, error) { return RuntimeInfo{}, nil },
+ BuildInfo: &PrometheusVersion{},
+ NotificationsGetter: func() []notifications.Notification { return nil },
+ NotificationsSub: func() (<-chan notifications.Notification, func(), bool) { return nil, func() {}, false },
+ Gatherer: prometheus.NewRegistry(),
+ Registerer: prometheus.NewRegistry(),
+ }
+}
diff --git a/web/api/testhelpers/assertions.go b/web/api/testhelpers/assertions.go
new file mode 100644
index 0000000000..8a0a0d4a97
--- /dev/null
+++ b/web/api/testhelpers/assertions.go
@@ -0,0 +1,262 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file provides assertion helpers for validating API responses in tests.
+package testhelpers
+
+import (
+ "fmt"
+ "slices"
+ "strings"
+
+ "github.com/stretchr/testify/require"
+)
+
+// RequireSuccess asserts that the response has status "success" and returns the response for chaining.
+func (r *Response) RequireSuccess() *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+ require.Equal(r.t, "success", r.JSON["status"], "expected status to be 'success'")
+ return r
+}
+
+// RequireError asserts that the response has status "error" and returns the response for chaining.
+func (r *Response) RequireError() *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+ require.Equal(r.t, "error", r.JSON["status"], "expected status to be 'error'")
+ return r
+}
+
+// RequireStatusCode asserts that the response has the given HTTP status code and returns the response for chaining.
+func (r *Response) RequireStatusCode(expectedCode int) *Response {
+ r.t.Helper()
+ require.Equal(r.t, expectedCode, r.StatusCode, "unexpected HTTP status code")
+ return r
+}
+
+// RequireJSONPathExists asserts that a JSON path exists and returns the response for chaining.
+func (r *Response) RequireJSONPathExists(path string) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ return r
+}
+
+// RequireJSONPathNotExists asserts that a JSON path does not exist and returns the response for chaining.
+func (r *Response) RequireJSONPathNotExists(path string) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.Nil(r.t, value, "JSON path %q should not exist but was found", path)
+ return r
+}
+
+// RequireEquals asserts that a JSON path equals the expected value and returns the response for chaining.
+func (r *Response) RequireEquals(path string, expected any) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ require.Equal(r.t, expected, value, "JSON path %q has unexpected value", path)
+ return r
+}
+
+// RequireJSONArray asserts that a JSON path contains an array and returns the response for chaining.
+func (r *Response) RequireJSONArray(path string) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ _, ok := value.([]any)
+ require.True(r.t, ok, "JSON path %q is not an array", path)
+ return r
+}
+
+// RequireLenAtLeast asserts that a JSON path contains an array with at least minLen elements and returns the response for chaining.
+func (r *Response) RequireLenAtLeast(path string, minLen int) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ arr, ok := value.([]any)
+ require.True(r.t, ok, "JSON path %q is not an array", path)
+ require.GreaterOrEqual(r.t, len(arr), minLen, "JSON path %q has fewer than %d elements", path, minLen)
+ return r
+}
+
+// RequireArrayContains asserts that a JSON path contains an array with the expected element and returns the response for chaining.
+func (r *Response) RequireArrayContains(path string, expected any) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ arr, ok := value.([]any)
+ require.True(r.t, ok, "JSON path %q is not an array", path)
+
+ found := slices.Contains(arr, expected)
+ require.True(r.t, found, "JSON path %q does not contain expected value %v", path, expected)
+ return r
+}
+
+// RequireSome asserts that at least one element in an array satisfies the predicate and returns the response for chaining.
+func (r *Response) RequireSome(path string, predicate func(any) bool) *Response {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ arr, ok := value.([]any)
+ require.True(r.t, ok, "JSON path %q is not an array", path)
+
+ found := slices.ContainsFunc(arr, predicate)
+ require.True(r.t, found, "no element in JSON path %q satisfies the predicate", path)
+ return r
+}
+
+// getJSONPath extracts a value from a JSON object using a simple path notation.
+// Supports paths like "$.data", "$.data.groups", "$.data.groups[0]".
+func getJSONPath(data map[string]any, path string) any {
+ // Remove leading "$." if present.
+ path = strings.TrimPrefix(path, "$.")
+
+ if path == "" {
+ return data
+ }
+
+ parts := strings.Split(path, ".")
+ current := any(data)
+
+ for _, part := range parts {
+ // Handle array indexing (e.g., "groups[0]").
+ if strings.Contains(part, "[") {
+ // Not implementing array indexing for simplicity.
+ // Tests should use direct field access or RequireSome.
+ return nil
+ }
+
+ // Navigate to the next level.
+ m, ok := current.(map[string]any)
+ if !ok {
+ return nil
+ }
+ current = m[part]
+ }
+
+ return current
+}
+
+// RequireVectorResult is a convenience helper for checking vector query results.
+func (r *Response) RequireVectorResult() *Response {
+ r.t.Helper()
+ return r.RequireSuccess().RequireEquals("$.data.resultType", "vector")
+}
+
+// RequireMatrixResult is a convenience helper for checking matrix query results.
+func (r *Response) RequireMatrixResult() *Response {
+ r.t.Helper()
+ return r.RequireSuccess().RequireEquals("$.data.resultType", "matrix")
+}
+
+// RequireScalarResult is a convenience helper for checking scalar query results.
+func (r *Response) RequireScalarResult() *Response {
+ r.t.Helper()
+ return r.RequireSuccess().RequireEquals("$.data.resultType", "scalar")
+}
+
+// RequireRulesGroupNamed asserts that a rules response contains a group with the given name.
+func (r *Response) RequireRulesGroupNamed(name string) *Response {
+ r.t.Helper()
+ return r.RequireSuccess().RequireSome("$.data.groups", func(group any) bool {
+ if g, ok := group.(map[string]any); ok {
+ return g["name"] == name
+ }
+ return false
+ })
+}
+
+// RequireTargetCount asserts that a targets response contains at least n targets.
+func (r *Response) RequireTargetCount(minCount int) *Response {
+ r.t.Helper()
+ r.RequireSuccess()
+
+ // The targets endpoint returns activeTargets as an array of targets.
+ value := getJSONPath(r.JSON, "$.data.activeTargets")
+ require.NotNil(r.t, value, "JSON path $.data.activeTargets does not exist")
+
+ arr, ok := value.([]any)
+ require.True(r.t, ok, "$.data.activeTargets is not an array")
+ require.GreaterOrEqual(r.t, len(arr), minCount, "expected at least %d targets, got %d", minCount, len(arr))
+ return r
+}
+
+// DebugJSON is a helper for debugging JSON responses in tests.
+func (r *Response) DebugJSON() *Response {
+ r.t.Helper()
+ r.t.Logf("Response status code: %d", r.StatusCode)
+ r.t.Logf("Response body: %s", r.Body)
+ if r.JSON != nil {
+ r.t.Logf("Response JSON: %+v", r.JSON)
+ }
+ return r
+}
+
+// RequireContainsSubstring asserts that the response body contains the given substring.
+func (r *Response) RequireContainsSubstring(substring string) *Response {
+ r.t.Helper()
+ require.Contains(r.t, r.Body, substring, "response body does not contain expected substring")
+ return r
+}
+
+// RequireField asserts that a field exists at the given path and returns its value.
+// Note: This method cannot be chained further since it returns the field value, not the Response.
+func (r *Response) RequireField(path string) any {
+ r.t.Helper()
+ require.NotNil(r.t, r.JSON, "response body is not JSON")
+
+ value := getJSONPath(r.JSON, path)
+ require.NotNil(r.t, value, "JSON path %q does not exist", path)
+ return value
+}
+
+// RequireFieldType asserts that a field exists and has the expected type.
+func (r *Response) RequireFieldType(path, expectedType string) *Response {
+ r.t.Helper()
+ value := r.RequireField(path)
+
+ var actualType string
+ switch value.(type) {
+ case string:
+ actualType = "string"
+ case float64:
+ actualType = "number"
+ case bool:
+ actualType = "bool"
+ case []any:
+ actualType = "array"
+ case map[string]any:
+ actualType = "object"
+ default:
+ actualType = fmt.Sprintf("%T", value)
+ }
+
+ require.Equal(r.t, expectedType, actualType, "JSON path %q has unexpected type", path)
+ return r
+}
diff --git a/web/api/testhelpers/fixtures.go b/web/api/testhelpers/fixtures.go
new file mode 100644
index 0000000000..7bb0151dca
--- /dev/null
+++ b/web/api/testhelpers/fixtures.go
@@ -0,0 +1,180 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file provides test fixture data for API tests.
+package testhelpers
+
+import (
+ "time"
+
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/promql"
+ "github.com/prometheus/prometheus/promql/parser"
+ "github.com/prometheus/prometheus/rules"
+ "github.com/prometheus/prometheus/storage"
+)
+
+var testParser = parser.NewParser(parser.Options{})
+
+// FixtureSeries creates a simple series with the "up" metric.
+func FixtureSeries() []storage.Series {
+ // Use timestamps relative to "now" so queries work.
+ now := time.Now().UnixMilli()
+ return []storage.Series{
+ &FakeSeries{
+ labels: labels.FromStrings("__name__", "up", "job", "prometheus", "instance", "localhost:9090"),
+ samples: []promql.FPoint{
+ {T: now - 120000, F: 1},
+ {T: now - 60000, F: 1},
+ {T: now, F: 1},
+ },
+ },
+ }
+}
+
+// FixtureMultipleSeries creates multiple series for testing.
+func FixtureMultipleSeries() []storage.Series {
+ // Use timestamps relative to "now" so queries work.
+ now := time.Now().UnixMilli()
+ return []storage.Series{
+ &FakeSeries{
+ labels: labels.FromStrings("__name__", "up", "job", "prometheus", "instance", "localhost:9090"),
+ samples: []promql.FPoint{
+ {T: now - 60000, F: 1},
+ {T: now, F: 1},
+ },
+ },
+ &FakeSeries{
+ labels: labels.FromStrings("__name__", "up", "job", "node", "instance", "localhost:9100"),
+ samples: []promql.FPoint{
+ {T: now - 60000, F: 1},
+ {T: now, F: 0},
+ },
+ },
+ &FakeSeries{
+ labels: labels.FromStrings("__name__", "http_requests_total", "job", "api", "instance", "localhost:8080"),
+ samples: []promql.FPoint{
+ {T: now - 60000, F: 100},
+ {T: now, F: 150},
+ },
+ },
+ }
+}
+
+// FixtureRuleGroups creates a simple set of rule groups for testing.
+func FixtureRuleGroups() []*rules.Group {
+ // Create a simple recording rule.
+ expr, _ := testParser.ParseExpr("up == 1")
+ recordingRule := rules.NewRecordingRule(
+ "job:up:sum",
+ expr,
+ labels.EmptyLabels(),
+ )
+
+ // Create a simple alerting rule.
+ alertExpr, _ := testParser.ParseExpr("up == 0")
+ alertingRule := rules.NewAlertingRule(
+ "InstanceDown",
+ alertExpr,
+ time.Minute,
+ 0,
+ labels.FromStrings("severity", "critical"),
+ labels.EmptyLabels(),
+ labels.EmptyLabels(),
+ "Instance {{ $labels.instance }} is down",
+ true,
+ nil,
+ )
+
+ // Create a rule group.
+ group := rules.NewGroup(rules.GroupOptions{
+ Name: "example",
+ File: "example.rules",
+ Interval: time.Minute,
+ Rules: []rules.Rule{
+ recordingRule,
+ alertingRule,
+ },
+ })
+
+ return []*rules.Group{group}
+}
+
+// FixtureEmptyRuleGroups returns an empty set of rule groups.
+func FixtureEmptyRuleGroups() []*rules.Group {
+ return []*rules.Group{}
+}
+
+// FixtureSingleSeries creates a single series for simple tests.
+func FixtureSingleSeries(metricName string, value float64) []storage.Series {
+ return []storage.Series{
+ &FakeSeries{
+ labels: labels.FromStrings("__name__", metricName),
+ samples: []promql.FPoint{
+ {T: 0, F: value},
+ },
+ },
+ }
+}
+
+// FixtureHistogramSeries creates a series with native histogram data.
+func FixtureHistogramSeries() []storage.Series {
+ // Use timestamps relative to "now" so queries work.
+ now := time.Now().UnixMilli()
+ return []storage.Series{
+ &FakeHistogramSeries{
+ labels: labels.FromStrings("__name__", "test_histogram", "job", "prometheus", "instance", "localhost:9090"),
+ histograms: []promql.HPoint{
+ {
+ T: now - 60000,
+ H: &histogram.FloatHistogram{
+ Schema: 2,
+ ZeroThreshold: 0.001,
+ ZeroCount: 5,
+ Count: 50,
+ Sum: 100,
+ PositiveSpans: []histogram.Span{
+ {Offset: 0, Length: 2},
+ {Offset: 1, Length: 2},
+ },
+ NegativeSpans: []histogram.Span{
+ {Offset: 0, Length: 1},
+ },
+ PositiveBuckets: []float64{5, 10, 8, 7},
+ NegativeBuckets: []float64{3},
+ },
+ },
+ {
+ T: now,
+ H: &histogram.FloatHistogram{
+ Schema: 2,
+ ZeroThreshold: 0.001,
+ ZeroCount: 8,
+ Count: 60,
+ Sum: 120,
+ PositiveSpans: []histogram.Span{
+ {Offset: 0, Length: 2},
+ {Offset: 1, Length: 2},
+ },
+ NegativeSpans: []histogram.Span{
+ {Offset: 0, Length: 1},
+ },
+ PositiveBuckets: []float64{6, 12, 10, 9},
+ NegativeBuckets: []float64{4},
+ },
+ },
+ },
+ },
+ }
+}
diff --git a/web/api/testhelpers/mocks.go b/web/api/testhelpers/mocks.go
new file mode 100644
index 0000000000..527febb727
--- /dev/null
+++ b/web/api/testhelpers/mocks.go
@@ -0,0 +1,534 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains mock implementations of API dependencies for testing.
+package testhelpers
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/promql"
+ "github.com/prometheus/prometheus/rules"
+ "github.com/prometheus/prometheus/scrape"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb"
+ "github.com/prometheus/prometheus/tsdb/chunkenc"
+ "github.com/prometheus/prometheus/tsdb/chunks"
+ "github.com/prometheus/prometheus/util/annotations"
+)
+
+// LazyLoader allows lazy initialization of mocks per test.
+type LazyLoader[T any] struct {
+ loader func() T
+ value *T
+}
+
+// NewLazyLoader creates a new LazyLoader with the given loader function.
+func NewLazyLoader[T any](loader func() T) *LazyLoader[T] {
+ return &LazyLoader[T]{loader: loader}
+}
+
+// Get returns the loaded value, initializing it if necessary.
+func (l *LazyLoader[T]) Get() T {
+ if l.value == nil {
+ v := l.loader()
+ l.value = &v
+ }
+ return *l.value
+}
+
+// FakeQueryable implements storage.SampleAndChunkQueryable with configurable behavior.
+type FakeQueryable struct {
+ series []storage.Series
+}
+
+func (f *FakeQueryable) Querier(_, _ int64) (storage.Querier, error) {
+ return &FakeQuerier{series: f.series}, nil
+}
+
+func (f *FakeQueryable) ChunkQuerier(_, _ int64) (storage.ChunkQuerier, error) {
+ return &FakeChunkQuerier{series: f.series}, nil
+}
+
+// FakeQuerier implements storage.Querier.
+type FakeQuerier struct {
+ series []storage.Series
+}
+
+func (f *FakeQuerier) Select(_ context.Context, _ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet {
+ return &FakeSeriesSet{series: f.series, idx: -1}
+}
+
+func (f *FakeQuerier) LabelValues(_ context.Context, name string, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+ valuesMap := make(map[string]struct{})
+ for _, s := range f.series {
+ lbls := s.Labels()
+ if val := lbls.Get(name); val != "" {
+ valuesMap[val] = struct{}{}
+ }
+ }
+ values := make([]string, 0, len(valuesMap))
+ for v := range valuesMap {
+ values = append(values, v)
+ }
+ return values, nil, nil
+}
+
+func (f *FakeQuerier) LabelNames(_ context.Context, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+ namesMap := make(map[string]struct{})
+ for _, s := range f.series {
+ lbls := s.Labels()
+ lbls.Range(func(l labels.Label) {
+ namesMap[l.Name] = struct{}{}
+ })
+ }
+ names := make([]string, 0, len(namesMap))
+ for n := range namesMap {
+ names = append(names, n)
+ }
+ return names, nil, nil
+}
+
+func (*FakeQuerier) Close() error {
+ return nil
+}
+
+// FakeChunkQuerier implements storage.ChunkQuerier.
+type FakeChunkQuerier struct {
+ series []storage.Series
+}
+
+func (f *FakeChunkQuerier) Select(_ context.Context, _ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.ChunkSeriesSet {
+ return &FakeChunkSeriesSet{series: f.series, idx: -1}
+}
+
+func (f *FakeChunkQuerier) LabelValues(_ context.Context, name string, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+ valuesMap := make(map[string]struct{})
+ for _, s := range f.series {
+ lbls := s.Labels()
+ if val := lbls.Get(name); val != "" {
+ valuesMap[val] = struct{}{}
+ }
+ }
+ values := make([]string, 0, len(valuesMap))
+ for v := range valuesMap {
+ values = append(values, v)
+ }
+ return values, nil, nil
+}
+
+func (f *FakeChunkQuerier) LabelNames(_ context.Context, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+ namesMap := make(map[string]struct{})
+ for _, s := range f.series {
+ lbls := s.Labels()
+ lbls.Range(func(l labels.Label) {
+ namesMap[l.Name] = struct{}{}
+ })
+ }
+ names := make([]string, 0, len(namesMap))
+ for n := range namesMap {
+ names = append(names, n)
+ }
+ return names, nil, nil
+}
+
+func (*FakeChunkQuerier) Close() error {
+ return nil
+}
+
+// FakeSeriesSet implements storage.SeriesSet.
+type FakeSeriesSet struct {
+ series []storage.Series
+ idx int
+}
+
+func (f *FakeSeriesSet) Next() bool {
+ f.idx++
+ return f.idx < len(f.series)
+}
+
+func (f *FakeSeriesSet) At() storage.Series {
+ return f.series[f.idx]
+}
+
+func (*FakeSeriesSet) Err() error {
+ return nil
+}
+
+func (*FakeSeriesSet) Warnings() annotations.Annotations {
+ return nil
+}
+
+// FakeChunkSeriesSet implements storage.ChunkSeriesSet.
+type FakeChunkSeriesSet struct {
+ series []storage.Series
+ idx int
+}
+
+func (f *FakeChunkSeriesSet) Next() bool {
+ f.idx++
+ return f.idx < len(f.series)
+}
+
+func (f *FakeChunkSeriesSet) At() storage.ChunkSeries {
+ return &FakeChunkSeries{series: f.series[f.idx]}
+}
+
+func (*FakeChunkSeriesSet) Err() error {
+ return nil
+}
+
+func (*FakeChunkSeriesSet) Warnings() annotations.Annotations {
+ return nil
+}
+
+// FakeChunkSeries implements storage.ChunkSeries.
+type FakeChunkSeries struct {
+ series storage.Series
+}
+
+func (f *FakeChunkSeries) Labels() labels.Labels {
+ return f.series.Labels()
+}
+
+func (*FakeChunkSeries) Iterator(_ chunks.Iterator) chunks.Iterator {
+ return &FakeChunkSeriesIterator{}
+}
+
+// FakeChunkSeriesIterator implements chunks.Iterator.
+type FakeChunkSeriesIterator struct{}
+
+func (*FakeChunkSeriesIterator) Next() bool {
+ return false
+}
+
+func (*FakeChunkSeriesIterator) At() chunks.Meta {
+ return chunks.Meta{}
+}
+
+func (*FakeChunkSeriesIterator) Err() error {
+ return nil
+}
+
+// FakeSeries implements storage.Series.
+type FakeSeries struct {
+ labels labels.Labels
+ samples []promql.FPoint
+}
+
+func (f *FakeSeries) Labels() labels.Labels {
+ return f.labels
+}
+
+func (f *FakeSeries) Iterator(chunkenc.Iterator) chunkenc.Iterator {
+ return &FakeSeriesIterator{samples: f.samples, idx: -1}
+}
+
+// FakeSeriesIterator implements chunkenc.Iterator.
+type FakeSeriesIterator struct {
+ samples []promql.FPoint
+ idx int
+}
+
+func (f *FakeSeriesIterator) Next() chunkenc.ValueType {
+ f.idx++
+ if f.idx < len(f.samples) {
+ return chunkenc.ValFloat
+ }
+ return chunkenc.ValNone
+}
+
+func (f *FakeSeriesIterator) Seek(t int64) chunkenc.ValueType {
+ for f.idx < len(f.samples)-1 {
+ f.idx++
+ if f.samples[f.idx].T >= t {
+ return chunkenc.ValFloat
+ }
+ }
+ return chunkenc.ValNone
+}
+
+func (f *FakeSeriesIterator) At() (int64, float64) {
+ s := f.samples[f.idx]
+ return s.T, s.F
+}
+
+func (*FakeSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+ panic("not implemented")
+}
+
+func (*FakeSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+ panic("not implemented")
+}
+
+func (f *FakeSeriesIterator) AtT() int64 {
+ return f.samples[f.idx].T
+}
+
+func (*FakeSeriesIterator) AtST() int64 {
+ return 0
+}
+
+func (*FakeSeriesIterator) Err() error {
+ return nil
+}
+
+// FakeHistogramSeries implements storage.Series for histogram data.
+type FakeHistogramSeries struct {
+ labels labels.Labels
+ histograms []promql.HPoint
+}
+
+func (f *FakeHistogramSeries) Labels() labels.Labels {
+ return f.labels
+}
+
+func (f *FakeHistogramSeries) Iterator(chunkenc.Iterator) chunkenc.Iterator {
+ return &FakeHistogramSeriesIterator{histograms: f.histograms, idx: -1}
+}
+
+// FakeHistogramSeriesIterator implements chunkenc.Iterator for histogram data.
+type FakeHistogramSeriesIterator struct {
+ histograms []promql.HPoint
+ idx int
+}
+
+func (f *FakeHistogramSeriesIterator) Next() chunkenc.ValueType {
+ f.idx++
+ if f.idx < len(f.histograms) {
+ return chunkenc.ValFloatHistogram
+ }
+ return chunkenc.ValNone
+}
+
+func (f *FakeHistogramSeriesIterator) Seek(t int64) chunkenc.ValueType {
+ for f.idx < len(f.histograms)-1 {
+ f.idx++
+ if f.histograms[f.idx].T >= t {
+ return chunkenc.ValFloatHistogram
+ }
+ }
+ return chunkenc.ValNone
+}
+
+func (*FakeHistogramSeriesIterator) At() (int64, float64) {
+ panic("not a float value")
+}
+
+func (*FakeHistogramSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+ panic("not implemented")
+}
+
+func (f *FakeHistogramSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+ h := f.histograms[f.idx]
+ return h.T, h.H
+}
+
+func (f *FakeHistogramSeriesIterator) AtT() int64 {
+ return f.histograms[f.idx].T
+}
+
+func (*FakeHistogramSeriesIterator) AtST() int64 {
+ return 0
+}
+
+func (*FakeHistogramSeriesIterator) Err() error {
+ return nil
+}
+
+// FakeExemplarQueryable implements storage.ExemplarQueryable.
+type FakeExemplarQueryable struct{}
+
+func (*FakeExemplarQueryable) ExemplarQuerier(_ context.Context) (storage.ExemplarQuerier, error) {
+ return &FakeExemplarQuerier{}, nil
+}
+
+// FakeExemplarQuerier implements storage.ExemplarQuerier.
+type FakeExemplarQuerier struct{}
+
+func (*FakeExemplarQuerier) Select(_, _ int64, _ ...[]*labels.Matcher) ([]exemplar.QueryResult, error) {
+ return nil, nil
+}
+
+// FakeRulesRetriever implements v1.RulesRetriever.
+type FakeRulesRetriever struct {
+ groups []*rules.Group
+}
+
+func (f *FakeRulesRetriever) RuleGroups() []*rules.Group {
+ return f.groups
+}
+
+func (f *FakeRulesRetriever) AlertingRules() []*rules.AlertingRule {
+ var alertingRules []*rules.AlertingRule
+ for _, g := range f.groups {
+ for _, r := range g.Rules() {
+ if ar, ok := r.(*rules.AlertingRule); ok {
+ alertingRules = append(alertingRules, ar)
+ }
+ }
+ }
+ return alertingRules
+}
+
+// FakeTargetRetriever implements v1.TargetRetriever.
+type FakeTargetRetriever struct {
+ active map[string][]*scrape.Target
+ dropped map[string][]*scrape.Target
+ droppedCounts map[string]int
+ scrapeConfig map[string]*config.ScrapeConfig
+}
+
+func (f *FakeTargetRetriever) TargetsActive() map[string][]*scrape.Target {
+ if f.active == nil {
+ return make(map[string][]*scrape.Target)
+ }
+ return f.active
+}
+
+func (f *FakeTargetRetriever) TargetsDropped() map[string][]*scrape.Target {
+ if f.dropped == nil {
+ return make(map[string][]*scrape.Target)
+ }
+ return f.dropped
+}
+
+func (f *FakeTargetRetriever) TargetsDroppedCounts() map[string]int {
+ if f.droppedCounts == nil {
+ return make(map[string]int)
+ }
+ return f.droppedCounts
+}
+
+func (f *FakeTargetRetriever) ScrapePoolConfig(name string) (*config.ScrapeConfig, error) {
+ if f.scrapeConfig == nil {
+ return nil, nil
+ }
+ return f.scrapeConfig[name], nil
+}
+
+// FakeScrapePoolsRetriever implements v1.ScrapePoolsRetriever.
+type FakeScrapePoolsRetriever struct {
+ pools []string
+}
+
+func (f *FakeScrapePoolsRetriever) ScrapePools() []string {
+ if f.pools == nil {
+ return []string{}
+ }
+ return f.pools
+}
+
+// FakeAlertmanagerRetriever implements v1.AlertmanagerRetriever.
+type FakeAlertmanagerRetriever struct{}
+
+func (*FakeAlertmanagerRetriever) Alertmanagers() []*url.URL {
+ return nil
+}
+
+func (*FakeAlertmanagerRetriever) DroppedAlertmanagers() []*url.URL {
+ return nil
+}
+
+// FakeTSDBAdminStats implements v1.TSDBAdminStats.
+type FakeTSDBAdminStats struct{}
+
+func (*FakeTSDBAdminStats) CleanTombstones() error {
+ return nil
+}
+
+func (*FakeTSDBAdminStats) Delete(_ context.Context, _, _ int64, _ ...*labels.Matcher) error {
+ return nil
+}
+
+func (*FakeTSDBAdminStats) Snapshot(_ string, _ bool) error {
+ return nil
+}
+
+func (*FakeTSDBAdminStats) Stats(_ string, _ int) (*tsdb.Stats, error) {
+ return &tsdb.Stats{}, nil
+}
+
+func (*FakeTSDBAdminStats) WALReplayStatus() (tsdb.WALReplayStatus, error) {
+ return tsdb.WALReplayStatus{}, nil
+}
+
+func (*FakeTSDBAdminStats) BlockMetas() ([]tsdb.BlockMeta, error) {
+ return []tsdb.BlockMeta{}, nil
+}
+
+// NewEmptyQueryable returns a queryable with no series.
+func NewEmptyQueryable() storage.SampleAndChunkQueryable {
+ return &FakeQueryable{series: []storage.Series{}}
+}
+
+// NewQueryableWithSeries returns a queryable with the given series.
+func NewQueryableWithSeries(series []storage.Series) storage.SampleAndChunkQueryable {
+ return &FakeQueryable{series: series}
+}
+
+// TSDBNotReadyQueryable implements storage.SampleAndChunkQueryable that returns tsdb.ErrNotReady.
+type TSDBNotReadyQueryable struct{}
+
+func (*TSDBNotReadyQueryable) Querier(_, _ int64) (storage.Querier, error) {
+ return nil, tsdb.ErrNotReady
+}
+
+func (*TSDBNotReadyQueryable) ChunkQuerier(_, _ int64) (storage.ChunkQuerier, error) {
+ return nil, tsdb.ErrNotReady
+}
+
+// NewTSDBNotReadyQueryable returns a queryable that always returns tsdb.ErrNotReady.
+func NewTSDBNotReadyQueryable() storage.SampleAndChunkQueryable {
+ return &TSDBNotReadyQueryable{}
+}
+
+// NewEmptyExemplarQueryable returns an exemplar queryable with no exemplars.
+func NewEmptyExemplarQueryable() storage.ExemplarQueryable {
+ return &FakeExemplarQueryable{}
+}
+
+// NewEmptyRulesRetriever returns a rules retriever with no rules.
+func NewEmptyRulesRetriever() *FakeRulesRetriever {
+ return &FakeRulesRetriever{groups: []*rules.Group{}}
+}
+
+// NewRulesRetrieverWithGroups returns a rules retriever with the given groups.
+func NewRulesRetrieverWithGroups(groups []*rules.Group) *FakeRulesRetriever {
+ return &FakeRulesRetriever{groups: groups}
+}
+
+// NewEmptyTargetRetriever returns a target retriever with no targets.
+func NewEmptyTargetRetriever() *FakeTargetRetriever {
+ return &FakeTargetRetriever{}
+}
+
+// NewEmptyScrapePoolsRetriever returns a scrape pools retriever with no pools.
+func NewEmptyScrapePoolsRetriever() *FakeScrapePoolsRetriever {
+ return &FakeScrapePoolsRetriever{pools: []string{}}
+}
+
+// NewEmptyAlertmanagerRetriever returns an alertmanager retriever with no alertmanagers.
+func NewEmptyAlertmanagerRetriever() *FakeAlertmanagerRetriever {
+ return &FakeAlertmanagerRetriever{}
+}
+
+// NewEmptyTSDBAdminStats returns a TSDB admin stats with no-op implementations.
+func NewEmptyTSDBAdminStats() *FakeTSDBAdminStats {
+ return &FakeTSDBAdminStats{}
+}
diff --git a/web/api/testhelpers/openapi.go b/web/api/testhelpers/openapi.go
new file mode 100644
index 0000000000..d2e88943d2
--- /dev/null
+++ b/web/api/testhelpers/openapi.go
@@ -0,0 +1,204 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file provides OpenAPI-specific test utilities for validating spec compliance.
+package testhelpers
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/pb33f/libopenapi"
+ validator "github.com/pb33f/libopenapi-validator"
+ valerrors "github.com/pb33f/libopenapi-validator/errors"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ openAPIValidator31 validator.Validator
+ openAPIValidator32 validator.Validator
+ openAPIValidatorOnce sync.Once
+ openAPIValidatorErr error
+)
+
+// loadOpenAPIValidators loads and caches both OpenAPI 3.1 and 3.2 validators from golden files.
+func loadOpenAPIValidators() (v31, v32 validator.Validator, err error) {
+ openAPIValidatorOnce.Do(func() {
+ // Load OpenAPI 3.1 validator.
+ goldenPath31 := filepath.Join("testdata", "openapi_3.1_golden.yaml")
+ specBytes31, err := os.ReadFile(goldenPath31)
+ if err != nil {
+ openAPIValidatorErr = fmt.Errorf("failed to read OpenAPI 3.1 spec from %s: %w", goldenPath31, err)
+ return
+ }
+
+ doc31, err := libopenapi.NewDocument(specBytes31)
+ if err != nil {
+ openAPIValidatorErr = fmt.Errorf("failed to parse OpenAPI 3.1 document: %w", err)
+ return
+ }
+
+ v31, errs := validator.NewValidator(doc31)
+ if len(errs) > 0 {
+ openAPIValidatorErr = fmt.Errorf("failed to create OpenAPI 3.1 validator: %v", errs)
+ return
+ }
+
+ openAPIValidator31 = v31
+
+ // Load OpenAPI 3.2 validator.
+ goldenPath32 := filepath.Join("testdata", "openapi_3.2_golden.yaml")
+ specBytes32, err := os.ReadFile(goldenPath32)
+ if err != nil {
+ openAPIValidatorErr = fmt.Errorf("failed to read OpenAPI 3.2 spec from %s: %w", goldenPath32, err)
+ return
+ }
+
+ doc32, err := libopenapi.NewDocument(specBytes32)
+ if err != nil {
+ openAPIValidatorErr = fmt.Errorf("failed to parse OpenAPI 3.2 document: %w", err)
+ return
+ }
+
+ v32, errs := validator.NewValidator(doc32)
+ if len(errs) > 0 {
+ openAPIValidatorErr = fmt.Errorf("failed to create OpenAPI 3.2 validator: %v", errs)
+ return
+ }
+
+ openAPIValidator32 = v32
+ })
+
+ if openAPIValidatorErr != nil {
+ return nil, nil, openAPIValidatorErr
+ }
+
+ return openAPIValidator31, openAPIValidator32, nil
+}
+
+// ValidateOpenAPI validates the request and response against both OpenAPI 3.1 and 3.2 specifications.
+// This ensures API endpoints are compatible with both OpenAPI versions.
+// Returns the response for chaining.
+func (r *Response) ValidateOpenAPI() *Response {
+ r.t.Helper()
+
+ // Load both validators (cached after first call).
+ v31, v32, err := loadOpenAPIValidators()
+ require.NoError(r.t, err, "failed to load OpenAPI validators")
+
+ // Validate against OpenAPI 3.1 spec.
+ if r.request != nil {
+ r.validateRequestWithVersion(v31, "3.1")
+ }
+ r.validateResponseWithVersion(v31, "3.1")
+
+ // Validate against OpenAPI 3.2 spec.
+ if r.request != nil {
+ r.validateRequestWithVersion(v32, "3.2")
+ }
+ r.validateResponseWithVersion(v32, "3.2")
+
+ return r
+}
+
+// validateRequestWithVersion validates the HTTP request against a specific OpenAPI version's spec.
+func (r *Response) validateRequestWithVersion(v validator.Validator, version string) {
+ r.t.Helper()
+
+ // Create a validation request from the original request.
+ validationReq := &http.Request{
+ Method: r.request.Method,
+ URL: r.request.URL,
+ Header: r.request.Header,
+ Body: io.NopCloser(bytes.NewReader(r.requestBody)),
+ }
+
+ // Validate the request.
+ valid, errors := v.ValidateHttpRequest(validationReq)
+ if !valid {
+ // Check if the error is because the path doesn't exist in this version.
+ // Some endpoints (like /notifications/live) only exist in 3.2, not 3.1.
+ if isPathNotFoundError(errors) && version == "3.1" && strings.Contains(r.request.URL.Path, "/notifications/live") {
+ // Expected: /notifications/live is only in OpenAPI 3.2.
+ return
+ }
+
+ var errorMessages []string
+ for _, e := range errors {
+ errorMessages = append(errorMessages, e.Error())
+ }
+ require.Fail(r.t, fmt.Sprintf("OpenAPI %s request validation failed", version),
+ "Request to %s %s failed OpenAPI %s validation:\n%v",
+ r.request.Method, r.request.URL.Path, version, errorMessages)
+ }
+}
+
+// validateResponseWithVersion validates the HTTP response against a specific OpenAPI version's spec.
+func (r *Response) validateResponseWithVersion(v validator.Validator, version string) {
+ r.t.Helper()
+
+ // Create a validation request (needed for response validation context).
+ validationReq := &http.Request{
+ Method: r.request.Method,
+ URL: r.request.URL,
+ Header: r.request.Header,
+ }
+
+ // Create a response for validation.
+ validationResp := &http.Response{
+ StatusCode: r.StatusCode,
+ Header: r.responseHeader,
+ Body: io.NopCloser(bytes.NewReader([]byte(r.Body))),
+ Request: validationReq,
+ }
+
+ // Validate the response.
+ valid, errors := v.ValidateHttpResponse(validationReq, validationResp)
+ if !valid {
+ // Check if the error is because the path doesn't exist in this version.
+ // Some endpoints (like /notifications/live) only exist in 3.2, not 3.1.
+ if isPathNotFoundError(errors) && version == "3.1" && strings.Contains(r.request.URL.Path, "/notifications/live") {
+ // Expected: /notifications/live is only in OpenAPI 3.2.
+ return
+ }
+
+ var errorMessages []string
+ for _, e := range errors {
+ errorMessages = append(errorMessages, e.Error())
+ }
+ require.Fail(r.t, fmt.Sprintf("OpenAPI %s response validation failed", version),
+ "Response from %s %s (status %d) failed OpenAPI %s validation:\n%v",
+ r.request.Method, r.request.URL.Path, r.StatusCode, version, errorMessages)
+ }
+}
+
+// isPathNotFoundError checks if the validation errors indicate a path was not found in the spec.
+func isPathNotFoundError(errors []*valerrors.ValidationError) bool {
+ for _, err := range errors {
+ errStr := err.Error()
+ // Check for common "path not found" error messages from libopenapi-validator.
+ if strings.Contains(errStr, "path") && (strings.Contains(errStr, "not found") || strings.Contains(errStr, "does not exist")) {
+ return true
+ }
+ if strings.Contains(errStr, "GET /notifications/live") || strings.Contains(errStr, "/notifications/live not found") {
+ return true
+ }
+ }
+ return false
+}
diff --git a/web/api/testhelpers/request.go b/web/api/testhelpers/request.go
new file mode 100644
index 0000000000..81650e4c49
--- /dev/null
+++ b/web/api/testhelpers/request.go
@@ -0,0 +1,145 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file provides HTTP request builders for testing API endpoints.
+package testhelpers
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+ "testing"
+)
+
+// Response wraps an HTTP response with parsed JSON data.
+// It supports method chaining for assertions.
+//
+// Example usage:
+//
+// testhelpers.GET(t, api, "/api/v1/query", "query", "up").
+// ValidateOpenAPI().
+// RequireSuccess().
+// RequireEquals("$.data.resultType", "vector").
+// RequireLenAtLeast("$.data.result", 1)
+//
+// testhelpers.POST(t, api, "/api/v1/query", "query", "up").
+// ValidateOpenAPI().
+// RequireSuccess().
+// RequireArrayContains("$.data.result", expectedValue)
+type Response struct {
+ StatusCode int
+ Body string
+ JSON map[string]any
+ t *testing.T
+ request *http.Request
+ requestBody []byte
+ responseHeader http.Header
+}
+
+// GET sends a GET request to the API and returns a Response with parsed JSON.
+// queryParams should be pairs of key-value strings.
+func GET(t *testing.T, api *APIWrapper, path string, queryParams ...string) *Response {
+ t.Helper()
+
+ if len(queryParams)%2 != 0 {
+ t.Fatal("queryParams must be key-value pairs")
+ }
+
+ // Build query string.
+ values := url.Values{}
+ for i := 0; i < len(queryParams); i += 2 {
+ values.Add(queryParams[i], queryParams[i+1])
+ }
+
+ fullPath := path
+ if len(values) > 0 {
+ fullPath = path + "?" + values.Encode()
+ }
+
+ req := httptest.NewRequest(http.MethodGet, fullPath, nil)
+ return executeRequest(t, api, req)
+}
+
+// POST sends a POST request to the API with the given body and returns a Response with parsed JSON.
+// bodyParams should be pairs of key-value strings for form data.
+func POST(t *testing.T, api *APIWrapper, path string, bodyParams ...string) *Response {
+ t.Helper()
+
+ if len(bodyParams)%2 != 0 {
+ t.Fatal("bodyParams must be key-value pairs")
+ }
+
+ // Build form data.
+ values := url.Values{}
+ for i := 0; i < len(bodyParams); i += 2 {
+ values.Add(bodyParams[i], bodyParams[i+1])
+ }
+
+ req := httptest.NewRequest(http.MethodPost, path, strings.NewReader(values.Encode()))
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ return executeRequest(t, api, req)
+}
+
+// executeRequest executes an HTTP request and parses the response as JSON.
+func executeRequest(t *testing.T, api *APIWrapper, req *http.Request) *Response {
+ t.Helper()
+
+ // Capture the request body for validation.
+ var requestBody []byte
+ if req.Body != nil {
+ var err error
+ requestBody, err = io.ReadAll(req.Body)
+ if err != nil {
+ t.Fatalf("failed to read request body: %v", err)
+ }
+ // Restore the body for the actual request.
+ req.Body = io.NopCloser(strings.NewReader(string(requestBody)))
+ }
+
+ recorder := httptest.NewRecorder()
+ api.Handler.ServeHTTP(recorder, req)
+
+ result := recorder.Result()
+ defer result.Body.Close()
+
+ bodyBytes, err := io.ReadAll(result.Body)
+ if err != nil {
+ t.Fatalf("failed to read response body: %v", err)
+ }
+
+ resp := &Response{
+ StatusCode: result.StatusCode,
+ Body: string(bodyBytes),
+ t: t,
+ request: req,
+ requestBody: requestBody,
+ responseHeader: result.Header,
+ }
+
+ // Try to parse as JSON.
+ if result.Header.Get("Content-Type") == "application/json" || strings.Contains(result.Header.Get("Content-Type"), "application/json") {
+ var jsonData map[string]any
+ if err := json.Unmarshal(bodyBytes, &jsonData); err != nil {
+ // If JSON parsing fails, leave JSON as nil.
+ // This allows tests to handle non-JSON responses.
+ resp.JSON = nil
+ } else {
+ resp.JSON = jsonData
+ }
+ }
+
+ return resp
+}
diff --git a/web/api/v1/api.go b/web/api/v1/api.go
index f32fee19f8..6e61fd19c6 100644
--- a/web/api/v1/api.go
+++ b/web/api/v1/api.go
@@ -258,13 +258,16 @@ type API struct {
codecs []Codec
featureRegistry features.Collector
+ openAPIBuilder *OpenAPIBuilder
+
+ parser parser.Parser
}
// NewAPI returns an initialized API type.
func NewAPI(
qe promql.QueryEngine,
q storage.SampleAndChunkQueryable,
- ap storage.Appendable,
+ ap storage.Appendable, apV2 storage.AppendableV2,
eq storage.ExemplarQueryable,
spsr func(context.Context) ScrapePoolsRetriever,
tr func(context.Context) TargetRetriever,
@@ -299,6 +302,8 @@ func NewAPI(
appendMetadata bool,
overrideErrorCode OverrideErrorCode,
featureRegistry features.Collector,
+ openAPIOptions OpenAPIOptions,
+ promqlParser parser.Parser,
) *API {
a := &API{
QueryEngine: qe,
@@ -329,17 +334,23 @@ func NewAPI(
notificationsSub: notificationsSub,
overrideErrorCode: overrideErrorCode,
featureRegistry: featureRegistry,
+ openAPIBuilder: NewOpenAPIBuilder(openAPIOptions, logger),
+ parser: promqlParser,
remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame),
}
+ if a.parser == nil {
+ a.parser = parser.NewParser(parser.Options{})
+ }
+
a.InstallCodec(JSONCodec{})
if statsRenderer != nil {
a.statsRenderer = statsRenderer
}
- if ap == nil && (rwEnabled || otlpEnabled) {
+ if (ap == nil || apV2 == nil) && (rwEnabled || otlpEnabled) {
panic("remote write or otlp write enabled, but no appender passed in.")
}
@@ -347,13 +358,11 @@ func NewAPI(
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, stZeroIngestionEnabled, enableTypeAndUnitLabels, appendMetadata)
}
if otlpEnabled {
- a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{
+ a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, apV2, configFunc, remote.OTLPOptions{
ConvertDelta: otlpDeltaToCumulative,
NativeDelta: otlpNativeDeltaIngestion,
LookbackDelta: lookbackDelta,
- IngestSTZeroSample: stZeroIngestionEnabled,
EnableTypeAndUnitLabels: enableTypeAndUnitLabels,
- AppendMetadata: appendMetadata,
})
}
@@ -400,7 +409,7 @@ func (api *API) Register(r *route.Router) {
w.WriteHeader(http.StatusNoContent)
})
return api.ready(httputil.CompressionHandler{
- Handler: hf,
+ Handler: api.openAPIBuilder.WrapHandler(hf),
}.ServeHTTP)
}
@@ -469,6 +478,9 @@ func (api *API) Register(r *route.Router) {
r.Put("/admin/tsdb/delete_series", wrapAgent(api.deleteSeries))
r.Put("/admin/tsdb/clean_tombstones", wrapAgent(api.cleanTombstones))
r.Put("/admin/tsdb/snapshot", wrapAgent(api.snapshot))
+
+ // OpenAPI endpoint.
+ r.Get("/openapi.yaml", api.ready(api.openAPIBuilder.ServeOpenAPI))
}
type QueryData struct {
@@ -556,8 +568,8 @@ func (api *API) query(r *http.Request) (result apiFuncResult) {
}, nil, warnings, qry.Close}
}
-func (*API) formatQuery(r *http.Request) (result apiFuncResult) {
- expr, err := parser.ParseExpr(r.FormValue("query"))
+func (api *API) formatQuery(r *http.Request) (result apiFuncResult) {
+ expr, err := api.parser.ParseExpr(r.FormValue("query"))
if err != nil {
return invalidParamError(err, "query")
}
@@ -565,8 +577,8 @@ func (*API) formatQuery(r *http.Request) (result apiFuncResult) {
return apiFuncResult{expr.Pretty(0), nil, nil, nil}
}
-func (*API) parseQuery(r *http.Request) apiFuncResult {
- expr, err := parser.ParseExpr(r.FormValue("query"))
+func (api *API) parseQuery(r *http.Request) apiFuncResult {
+ expr, err := api.parser.ParseExpr(r.FormValue("query"))
if err != nil {
return invalidParamError(err, "query")
}
@@ -695,7 +707,7 @@ func (api *API) queryExemplars(r *http.Request) apiFuncResult {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
- expr, err := parser.ParseExpr(r.FormValue("query"))
+ expr, err := api.parser.ParseExpr(r.FormValue("query"))
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
@@ -758,7 +770,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
return invalidParamError(err, "end")
}
- matcherSets, err := parseMatchersParam(r.Form["match[]"])
+ matcherSets, err := api.parseMatchersParam(r.Form["match[]"])
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
@@ -846,7 +858,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
return invalidParamError(err, "end")
}
- matcherSets, err := parseMatchersParam(r.Form["match[]"])
+ matcherSets, err := api.parseMatchersParam(r.Form["match[]"])
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
@@ -965,7 +977,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
return invalidParamError(err, "end")
}
- matcherSets, err := parseMatchersParam(r.Form["match[]"])
+ matcherSets, err := api.parseMatchersParam(r.Form["match[]"])
if err != nil {
return invalidParamError(err, "match[]")
}
@@ -1260,7 +1272,7 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult {
var matchers []*labels.Matcher
var err error
if matchTarget != "" {
- matchers, err = parser.ParseMetricSelector(matchTarget)
+ matchers, err = api.parser.ParseMetricSelector(matchTarget)
if err != nil {
return invalidParamError(err, "match_target")
}
@@ -1346,13 +1358,19 @@ func (api *API) targetRelabelSteps(r *http.Request) apiFuncResult {
rules := scrapeConfig.RelabelConfigs
steps := make([]RelabelStep, len(rules))
+ lb := labels.NewBuilder(lbls)
+ keep := true
for i, rule := range rules {
- outLabels, keep := relabel.Process(lbls, rules[:i+1]...)
- steps[i] = RelabelStep{
- Rule: rule,
- Output: outLabels,
- Keep: keep,
+ if keep {
+ keep = relabel.ProcessBuilder(lb, rule)
}
+
+ outLabels := labels.EmptyLabels()
+ if keep {
+ outLabels = lb.Labels()
+ }
+
+ steps[i] = RelabelStep{Rule: rule, Output: outLabels, Keep: keep}
}
return apiFuncResult{&RelabelStepsResponse{Steps: steps}, nil, nil, nil}
@@ -1573,7 +1591,7 @@ func (api *API) rules(r *http.Request) apiFuncResult {
rgSet := queryFormToSet(r.Form["rule_group[]"])
fSet := queryFormToSet(r.Form["file[]"])
- matcherSets, err := parseMatchersParam(r.Form["match[]"])
+ matcherSets, err := api.parseMatchersParam(r.Form["match[]"])
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
@@ -2026,7 +2044,7 @@ func (api *API) deleteSeries(r *http.Request) apiFuncResult {
}
for _, s := range r.Form["match[]"] {
- matchers, err := parser.ParseMetricSelector(s)
+ matchers, err := api.parser.ParseMetricSelector(s)
if err != nil {
return invalidParamError(err, "match[]")
}
@@ -2235,8 +2253,8 @@ func parseDuration(s string) (time.Duration, error) {
return 0, fmt.Errorf("cannot parse %q to a valid duration", s)
}
-func parseMatchersParam(matchers []string) ([][]*labels.Matcher, error) {
- matcherSets, err := parser.ParseMetricSelectors(matchers)
+func (api *API) parseMatchersParam(matchers []string) ([][]*labels.Matcher, error) {
+ matcherSets, err := api.parser.ParseMetricSelectors(matchers)
if err != nil {
return nil, err
}
diff --git a/web/api/v1/api_scenarios_test.go b/web/api/v1/api_scenarios_test.go
new file mode 100644
index 0000000000..5bdccf08d5
--- /dev/null
+++ b/web/api/v1/api_scenarios_test.go
@@ -0,0 +1,510 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/web/api/testhelpers"
+)
+
+// TODO: Generate automated tests from OpenAPI spec to validate API responses.
+
+// TestAPIEmpty tests the API with no metrics and no rules.
+func TestAPIEmpty(t *testing.T) {
+ // Create an API with empty defaults (no series, no rules).
+ api := newTestAPI(t, testhelpers.APIConfig{})
+
+ t.Run("GET /api/v1/labels returns success with empty array", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/labels").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data")
+ })
+
+ t.Run("GET /api/v1/query?query=up returns success (empty result ok)", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", "up").
+ ValidateOpenAPI().
+ RequireSuccess().
+ RequireEquals("$.data.resultType", "vector")
+ })
+
+ t.Run("GET /api/v1/query_range?query=up returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query_range",
+ "query", "up",
+ "start", "0",
+ "end", "100",
+ "step", "10").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "matrix")
+ })
+
+ t.Run("GET /api/v1/series returns success with empty result", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/series",
+ "match[]", "up",
+ "start", "0",
+ "end", "100").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data")
+ })
+
+ t.Run("GET /api/v1/label/__name__/values returns success with empty array", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/label/__name__/values").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data")
+ })
+
+ t.Run("GET /api/v1/targets returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/targets").
+ RequireSuccess().
+ RequireJSONPathExists("$.data.activeTargets")
+ })
+
+ t.Run("GET /api/v1/rules returns success with empty groups", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/rules").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.groups")
+ })
+
+ t.Run("GET /api/v1/alerts returns success with empty alerts", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/alerts").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.alerts")
+ })
+
+ t.Run("GET /api/v1/alertmanagers returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/alertmanagers").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.activeAlertmanagers")
+ })
+
+ t.Run("GET /api/v1/metadata returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/metadata").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data")
+ })
+
+ t.Run("GET /api/v1/status/config returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/status/config").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.yaml")
+ })
+
+ t.Run("GET /api/v1/status/flags returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/status/flags").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data")
+ })
+
+ t.Run("GET /api/v1/status/runtimeinfo returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/status/runtimeinfo").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data")
+ })
+
+ t.Run("GET /api/v1/status/buildinfo returns success", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/status/buildinfo").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data")
+ })
+
+ t.Run("POST /api/v1/query with form data returns success", func(t *testing.T) {
+ testhelpers.POST(t, api, "/api/v1/query", "query", "up").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector")
+ })
+}
+
+// TestAPIWithSeries tests the API with metrics/series data.
+func TestAPIWithSeries(t *testing.T) {
+ // Create an API with sample series data.
+ api := newTestAPI(t, testhelpers.APIConfig{
+ Queryable: testhelpers.NewLazyLoader(func() storage.SampleAndChunkQueryable {
+ return testhelpers.NewQueryableWithSeries(testhelpers.FixtureMultipleSeries())
+ }),
+ })
+
+ t.Run("GET /api/v1/query returns vector with >= 1 sample", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", "up").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector").
+ RequireLenAtLeast("$.data.result", 1)
+ })
+
+ t.Run("GET /api/v1/query_range returns matrix result type", func(t *testing.T) {
+ // Use relative timestamps to match our fixtures.
+ now := time.Now().Unix()
+ testhelpers.GET(t, api, "/api/v1/query_range",
+ "query", "up",
+ "start", strconv.FormatInt(now-120, 10),
+ "end", strconv.FormatInt(now, 10),
+ "step", "60").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "matrix")
+ // Note: Result may be empty if timestamps don't align perfectly with samples.
+ })
+
+ t.Run("GET /api/v1/labels returns non-empty array", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/labels").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data").
+ RequireLenAtLeast("$.data", 1)
+ })
+
+ t.Run("GET /api/v1/label/__name__/values contains expected metric names", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/label/__name__/values").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireArrayContains("$.data", "up").
+ RequireArrayContains("$.data", "http_requests_total")
+ })
+
+ t.Run("GET /api/v1/label/job/values contains expected jobs", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/label/job/values").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data").
+ RequireArrayContains("$.data", "prometheus").
+ RequireArrayContains("$.data", "node").
+ RequireArrayContains("$.data", "api")
+ })
+
+ t.Run("GET /api/v1/series with match returns results", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/series",
+ "match[]", "up",
+ "start", "0",
+ "end", "120").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data").
+ RequireLenAtLeast("$.data", 1)
+ })
+
+ t.Run("GET /api/v1/query with specific job returns filtered results", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", `up{job="prometheus"}`).
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector").
+ RequireLenAtLeast("$.data.result", 1)
+ })
+
+ t.Run("GET /api/v1/query with aggregation returns result", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", "sum(up)").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector")
+ })
+
+ t.Run("POST /api/v1/query returns vector with data", func(t *testing.T) {
+ testhelpers.POST(t, api, "/api/v1/query", "query", "up").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector").
+ RequireLenAtLeast("$.data.result", 1)
+ })
+}
+
+// TestAPIWithRules tests the API with rules configured.
+func TestAPIWithRules(t *testing.T) {
+ // Create an API with rule groups.
+ api := newTestAPI(t, testhelpers.APIConfig{
+ RulesRetriever: testhelpers.NewLazyLoader(func() testhelpers.RulesRetriever {
+ return testhelpers.NewRulesRetrieverWithGroups(testhelpers.FixtureRuleGroups())
+ }),
+ })
+
+ t.Run("GET /api/v1/rules returns groups with rules", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/rules").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.groups").
+ RequireLenAtLeast("$.data.groups", 1).
+ RequireSome("$.data.groups", func(group any) bool {
+ if g, ok := group.(map[string]any); ok {
+ return g["name"] == "example"
+ }
+ return false
+ }).
+ RequireSome("$.data.groups", func(group any) bool {
+ if g, ok := group.(map[string]any); ok {
+ if g["name"] == "example" {
+ // Check that the group has rules.
+ if rules, ok := g["rules"].([]any); ok {
+ return len(rules) > 0
+ }
+ }
+ }
+ return false
+ })
+ })
+
+ t.Run("GET /api/v1/alerts returns alerts array", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/alerts").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.alerts").
+ RequireJSONArray("$.data.alerts")
+ })
+
+ t.Run("GET /api/v1/rules with rule_name filter", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/rules", "rule_name[]", "InstanceDown").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONPathExists("$.data.groups")
+ })
+}
+
+// TestAPITSDBNotReady tests the API when TSDB is not ready (e.g., during WAL replay).
+// TSDB not ready errors are converted to errorUnavailable by setUnavailStatusOnTSDBNotReady,
+// which returns HTTP 500 Internal Server Error (the default for errorUnavailable).
+func TestAPITSDBNotReady(t *testing.T) {
+ // Create an API with a queryable that returns tsdb.ErrNotReady.
+ api := newTestAPI(t, testhelpers.APIConfig{
+ Queryable: testhelpers.NewLazyLoader(testhelpers.NewTSDBNotReadyQueryable),
+ })
+
+ t.Run("GET /api/v1/query returns 500 when TSDB not ready", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", "up").
+ RequireStatusCode(500).
+ ValidateOpenAPI().
+ RequireError()
+ })
+
+ t.Run("POST /api/v1/query returns 500 when TSDB not ready", func(t *testing.T) {
+ testhelpers.POST(t, api, "/api/v1/query", "query", "up").
+ RequireStatusCode(500).
+ ValidateOpenAPI().
+ RequireError()
+ })
+
+ t.Run("GET /api/v1/query_range returns 500 when TSDB not ready", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query_range",
+ "query", "up",
+ "start", "0",
+ "end", "100",
+ "step", "10").
+ RequireStatusCode(500).
+ ValidateOpenAPI().
+ RequireError()
+ })
+
+ t.Run("GET /api/v1/series returns 500 when TSDB not ready", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/series",
+ "match[]", "up",
+ "start", "0",
+ "end", "100").
+ RequireStatusCode(500).
+ ValidateOpenAPI().
+ RequireError()
+ })
+
+ t.Run("GET /api/v1/labels returns 500 when TSDB not ready", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/labels").
+ RequireStatusCode(500).
+ ValidateOpenAPI().
+ RequireError()
+ })
+
+ t.Run("GET /api/v1/label/{name}/values returns 500 when TSDB not ready", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/label/__name__/values").
+ RequireStatusCode(500).
+ ValidateOpenAPI().
+ RequireError()
+ })
+}
+
+// TestAPIWithNativeHistograms tests the API with native histogram data.
+func TestAPIWithNativeHistograms(t *testing.T) {
+ // Create an API with histogram series data.
+ api := newTestAPI(t, testhelpers.APIConfig{
+ Queryable: testhelpers.NewLazyLoader(func() storage.SampleAndChunkQueryable {
+ return testhelpers.NewQueryableWithSeries(testhelpers.FixtureHistogramSeries())
+ }),
+ })
+
+ t.Run("GET /api/v1/query returns vector with native histogram", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", "test_histogram").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector").
+ RequireLenAtLeast("$.data.result", 1).
+ RequireSome("$.data.result", func(item any) bool {
+ sample, ok := item.(map[string]any)
+ if !ok {
+ return false
+ }
+ // Check that the sample has a histogram field (not a value field).
+ _, hasHistogram := sample["histogram"]
+ return hasHistogram
+ })
+ })
+
+ t.Run("POST /api/v1/query returns vector with native histogram", func(t *testing.T) {
+ testhelpers.POST(t, api, "/api/v1/query", "query", "test_histogram").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector").
+ RequireLenAtLeast("$.data.result", 1).
+ RequireSome("$.data.result", func(item any) bool {
+ sample, ok := item.(map[string]any)
+ if !ok {
+ return false
+ }
+ // Check that the sample has a histogram field (not a value field).
+ _, hasHistogram := sample["histogram"]
+ return hasHistogram
+ })
+ })
+
+ t.Run("GET /api/v1/query_range returns matrix with native histogram", func(t *testing.T) {
+ // Use relative timestamps to match our fixtures.
+ now := time.Now().Unix()
+ testhelpers.GET(t, api, "/api/v1/query_range",
+ "query", "test_histogram",
+ "start", strconv.FormatInt(now-120, 10),
+ "end", strconv.FormatInt(now, 10),
+ "step", "60").
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "matrix")
+ })
+
+ t.Run("GET /api/v1/query with histogram selector", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/query", "query", `test_histogram{job="prometheus"}`).
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireEquals("$.data.resultType", "vector").
+ RequireLenAtLeast("$.data.result", 1)
+ })
+
+ t.Run("GET /api/v1/series returns histogram metric series", func(t *testing.T) {
+ testhelpers.GET(t, api, "/api/v1/series",
+ "match[]", "test_histogram",
+ "start", "0",
+ "end", strconv.FormatInt(time.Now().Unix(), 10)).
+ RequireSuccess().
+ ValidateOpenAPI().
+ RequireJSONArray("$.data").
+ RequireLenAtLeast("$.data", 1)
+ })
+}
+
+// TestAPIWithStats tests the API with the stats query parameter.
+func TestAPIWithStats(t *testing.T) {
+ // Create an API with sample series data.
+ api := newTestAPI(t, testhelpers.APIConfig{
+ Queryable: testhelpers.NewLazyLoader(func() storage.SampleAndChunkQueryable {
+ return testhelpers.NewQueryableWithSeries(testhelpers.FixtureMultipleSeries())
+ }),
+ })
+
+ now := time.Now().Unix()
+
+ // Test combinations of methods, endpoints, and stats values.
+ methods := []string{"GET", "POST"}
+ statsValues := []struct {
+ value string
+ expectStats bool
+ }{
+ {"true", true},
+ {"all", true},
+ {"1", true},
+ {"", false},
+ }
+
+ for _, method := range methods {
+ for _, stats := range statsValues {
+ t.Run(method+" /api/v1/query with stats="+stats.value, func(t *testing.T) {
+ var params []string
+ if stats.value != "" {
+ params = []string{"query", "up", "stats", stats.value}
+ } else {
+ params = []string{"query", "up"}
+ }
+
+ var resp *testhelpers.Response
+ if method == "GET" {
+ resp = testhelpers.GET(t, api, "/api/v1/query", params...)
+ } else {
+ resp = testhelpers.POST(t, api, "/api/v1/query", params...)
+ }
+
+ resp.RequireSuccess().ValidateOpenAPI()
+
+ if stats.expectStats {
+ resp.RequireJSONPathExists("$.data.stats").
+ RequireJSONPathExists("$.data.stats.timings").
+ RequireJSONPathExists("$.data.stats.samples")
+ } else {
+ resp.RequireJSONPathNotExists("$.data.stats")
+ }
+ })
+
+ t.Run(method+" /api/v1/query_range with stats="+stats.value, func(t *testing.T) {
+ var params []string
+ if stats.value != "" {
+ params = []string{
+ "query", "up",
+ "start", strconv.FormatInt(now-120, 10),
+ "end", strconv.FormatInt(now, 10),
+ "step", "60",
+ "stats", stats.value,
+ }
+ } else {
+ params = []string{
+ "query", "up",
+ "start", strconv.FormatInt(now-120, 10),
+ "end", strconv.FormatInt(now, 10),
+ "step", "60",
+ }
+ }
+
+ var resp *testhelpers.Response
+ if method == "GET" {
+ resp = testhelpers.GET(t, api, "/api/v1/query_range", params...)
+ } else {
+ resp = testhelpers.POST(t, api, "/api/v1/query_range", params...)
+ }
+
+ resp.RequireSuccess().ValidateOpenAPI()
+
+ if stats.expectStats {
+ resp.RequireJSONPathExists("$.data.stats").
+ RequireJSONPathExists("$.data.stats.timings").
+ RequireJSONPathExists("$.data.stats.samples")
+ } else {
+ resp.RequireJSONPathNotExists("$.data.stats")
+ }
+ })
+ }
+ }
+}
diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go
index 39c1fa6080..1fdb7ab645 100644
--- a/web/api/v1/api_test.go
+++ b/web/api/v1/api_test.go
@@ -63,6 +63,8 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
+var testParser = parser.NewParser(parser.Options{})
+
func testEngine(t *testing.T) *promql.Engine {
t.Helper()
return promqltest.NewTestEngineWithOpts(t, promql.EngineOpts{
@@ -166,8 +168,8 @@ func (t testTargetRetriever) TargetsDroppedCounts() map[string]int {
return r
}
-func (testTargetRetriever) ScrapePoolConfig(_ string) (*config.ScrapeConfig, error) {
- return &config.ScrapeConfig{
+func (testTargetRetriever) ScrapePoolConfig(pool string) (*config.ScrapeConfig, error) {
+ cfg := &config.ScrapeConfig{
RelabelConfigs: []*relabel.Config{
{
Action: relabel.Replace,
@@ -182,20 +184,26 @@ func (testTargetRetriever) ScrapePoolConfig(_ string) (*config.ScrapeConfig, err
Regex: relabel.MustNewRegexp(`example\.com:.*`),
},
},
- }, nil
+ }
+ if pool == "testpool3" {
+ cfg.RelabelConfigs = append(cfg.RelabelConfigs, &relabel.Config{
+ Action: relabel.Replace,
+ TargetLabel: "job",
+ Regex: relabel.MustNewRegexp(".*"),
+ Replacement: "should_not_apply",
+ })
+ }
+ return cfg, nil
}
func (t *testTargetRetriever) SetMetadataStoreForTargets(identifier string, metadata scrape.MetricMetadataStore) error {
targets, ok := t.activeTargets[identifier]
-
if !ok {
- return errors.New("targets not found")
+ return fmt.Errorf("no active target for %v", identifier)
}
-
for _, at := range targets {
at.SetMetadataStore(metadata)
}
-
return nil
}
@@ -244,11 +252,11 @@ type rulesRetrieverMock struct {
}
func (m *rulesRetrieverMock) CreateAlertingRules() {
- expr1, err := parser.ParseExpr(`absent(test_metric3) != 1`)
+ expr1, err := testParser.ParseExpr(`absent(test_metric3) != 1`)
require.NoError(m.testing, err)
- expr2, err := parser.ParseExpr(`up == 1`)
+ expr2, err := testParser.ParseExpr(`up == 1`)
require.NoError(m.testing, err)
- expr3, err := parser.ParseExpr(`vector(1)`)
+ expr3, err := testParser.ParseExpr(`vector(1)`)
require.NoError(m.testing, err)
rule1 := rules.NewAlertingRule(
@@ -323,8 +331,8 @@ func (m *rulesRetrieverMock) CreateAlertingRules() {
func (m *rulesRetrieverMock) CreateRuleGroups() {
m.CreateAlertingRules()
arules := m.AlertingRules()
- storage := teststorage.New(m.testing)
- defer storage.Close()
+ // Create separate storage for recordings to not pollute the main one.
+ s := teststorage.New(m.testing)
engineOpts := promql.EngineOpts{
Logger: nil,
@@ -334,8 +342,8 @@ func (m *rulesRetrieverMock) CreateRuleGroups() {
}
engine := promqltest.NewTestEngineWithOpts(m.testing, engineOpts)
opts := &rules.ManagerOptions{
- QueryFunc: rules.EngineQueryFunc(engine, storage),
- Appendable: storage,
+ QueryFunc: rules.EngineQueryFunc(engine, s),
+ Appendable: s,
Context: context.Background(),
Logger: promslog.NewNopLogger(),
NotifyFunc: func(context.Context, string, ...*rules.Alert) {},
@@ -347,7 +355,7 @@ func (m *rulesRetrieverMock) CreateRuleGroups() {
r = append(r, alertrule)
}
- recordingExpr, err := parser.ParseExpr(`vector(1)`)
+ recordingExpr, err := testParser.ParseExpr(`vector(1)`)
require.NoError(m.testing, err, "unable to parse alert expression")
recordingRule := rules.NewRecordingRule("recording-rule-1", recordingExpr, labels.Labels{})
recordingRule2 := rules.NewRecordingRule("recording-rule-2", recordingExpr, labels.FromStrings("testlabel", "rule"))
@@ -400,8 +408,23 @@ var sampleFlagMap = map[string]string{
"flag2": "value2",
}
+func appendExemplars(t testing.TB, s storage.Storage, ex []exemplar.QueryResult) {
+ t.Helper()
+
+ // TODO(bwplotka): Use AppenderV2.AppendExemplar per series flow
+ // once its implemented: https://github.com/prometheus/prometheus/issues/17632#issuecomment-3759315095
+ app := s.Appender(t.Context())
+ for _, ed := range ex {
+ for _, e := range ed.Exemplars {
+ _, err := app.AppendExemplar(0, ed.SeriesLabels, e)
+ require.NoError(t, err)
+ }
+ }
+ require.NoError(t, app.Commit())
+}
+
func TestEndpoints(t *testing.T) {
- storage := promqltest.LoadedStorage(t, `
+ s := promqltest.LoadedStorage(t, `
load 1m
test_metric1{foo="bar"} 0+100x100
test_metric1{foo="boo"} 1+0x100
@@ -414,8 +437,8 @@ func TestEndpoints(t *testing.T) {
test_metric5{"host.name"="localhost"} 1+0x100
test_metric5{"junk\n{},=: chars"="bar"} 1+0x100
`)
- t.Cleanup(func() { storage.Close() })
+ // Add exemplar testdata here, given promqltest does not support exemplars.
start := time.Unix(0, 0)
exemplars := []exemplar.QueryResult{
{
@@ -459,15 +482,10 @@ func TestEndpoints(t *testing.T) {
},
},
}
- for _, ed := range exemplars {
- _, err := storage.AppendExemplar(0, ed.SeriesLabels, ed.Exemplars[0])
- require.NoError(t, err, "failed to add exemplar: %+v", ed.Exemplars[0])
- }
+ appendExemplars(t, s, exemplars)
now := time.Now()
-
ng := testEngine(t)
-
t.Run("local", func(t *testing.T) {
algr := rulesRetrieverMock{testing: t}
@@ -480,9 +498,9 @@ func TestEndpoints(t *testing.T) {
testTargetRetriever := setupTestTargetRetriever(t)
api := &API{
- Queryable: storage,
+ Queryable: s,
QueryEngine: ng,
- ExemplarQueryable: storage.ExemplarQueryable(),
+ ExemplarQueryable: s,
targetRetriever: testTargetRetriever.toFactory(),
alertmanagerRetriever: testAlertmanagerRetriever{}.toFactory(),
flagsMap: sampleFlagMap,
@@ -490,15 +508,16 @@ func TestEndpoints(t *testing.T) {
config: func() config.Config { return samplePrometheusCfg },
ready: func(f http.HandlerFunc) http.HandlerFunc { return f },
rulesRetriever: algr.toFactory(),
+ parser: testParser,
}
- testEndpoints(t, api, testTargetRetriever, storage, true)
+ testEndpoints(t, api, testTargetRetriever, true)
})
// Run all the API tests against an API that is wired to forward queries via
// the remote read client to a test server, which in turn sends them to the
// data from the test storage.
t.Run("remote", func(t *testing.T) {
- server := setupRemote(storage)
+ server := setupRemote(s)
defer server.Close()
u, err := url.Parse(server.URL)
@@ -520,6 +539,7 @@ func TestEndpoints(t *testing.T) {
remote := remote.NewStorage(promslog.New(&promslogConfig), prometheus.DefaultRegisterer, func() (int64, error) {
return 0, nil
}, dbDir, 1*time.Second, nil, false)
+ t.Cleanup(func() { _ = remote.Close() })
err = remote.ApplyConfig(&config.Config{
RemoteReadConfigs: []*config.RemoteReadConfig{
@@ -545,7 +565,7 @@ func TestEndpoints(t *testing.T) {
api := &API{
Queryable: remote,
QueryEngine: ng,
- ExemplarQueryable: storage.ExemplarQueryable(),
+ ExemplarQueryable: s,
targetRetriever: testTargetRetriever.toFactory(),
alertmanagerRetriever: testAlertmanagerRetriever{}.toFactory(),
flagsMap: sampleFlagMap,
@@ -553,8 +573,9 @@ func TestEndpoints(t *testing.T) {
config: func() config.Config { return samplePrometheusCfg },
ready: func(f http.HandlerFunc) http.HandlerFunc { return f },
rulesRetriever: algr.toFactory(),
+ parser: testParser,
}
- testEndpoints(t, api, testTargetRetriever, storage, false)
+ testEndpoints(t, api, testTargetRetriever, false)
})
}
@@ -567,7 +588,7 @@ func (b byLabels) Less(i, j int) bool { return labels.Compare(b[i], b[j]) < 0 }
func TestGetSeries(t *testing.T) {
// TestEndpoints doesn't have enough label names to test api.labelNames
// endpoint properly. Hence we test it separately.
- storage := promqltest.LoadedStorage(t, `
+ s := promqltest.LoadedStorage(t, `
load 1m
test_metric1{foo1="bar", baz="abc"} 0+100x100
test_metric1{foo2="boo"} 1+0x100
@@ -575,9 +596,10 @@ func TestGetSeries(t *testing.T) {
test_metric2{foo="boo", xyz="qwerty"} 1+0x100
test_metric2{foo="baz", abc="qwerty"} 1+0x100
`)
- t.Cleanup(func() { storage.Close() })
+
api := &API{
- Queryable: storage,
+ Queryable: s,
+ parser: testParser,
}
request := func(method string, matchers ...string) (*http.Request, error) {
u, err := url.Parse("http://example.com")
@@ -642,6 +664,7 @@ func TestGetSeries(t *testing.T) {
expectedErrorType: errorExec,
api: &API{
Queryable: errorTestQueryable{err: errors.New("generic")},
+ parser: testParser,
},
},
{
@@ -650,6 +673,7 @@ func TestGetSeries(t *testing.T) {
expectedErrorType: errorInternal,
api: &API{
Queryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}},
+ parser: testParser,
},
},
} {
@@ -671,7 +695,7 @@ func TestGetSeries(t *testing.T) {
func TestQueryExemplars(t *testing.T) {
start := time.Unix(0, 0)
- storage := promqltest.LoadedStorage(t, `
+ s := promqltest.LoadedStorage(t, `
load 1m
test_metric1{foo="bar"} 0+100x100
test_metric1{foo="boo"} 1+0x100
@@ -682,12 +706,12 @@ func TestQueryExemplars(t *testing.T) {
test_metric4{foo="boo", dup="1"} 1+0x100
test_metric4{foo="boo"} 1+0x100
`)
- t.Cleanup(func() { storage.Close() })
api := &API{
- Queryable: storage,
+ Queryable: s,
QueryEngine: testEngine(t),
- ExemplarQueryable: storage.ExemplarQueryable(),
+ ExemplarQueryable: s,
+ parser: testParser,
}
request := func(method string, qs url.Values) (*http.Request, error) {
@@ -744,6 +768,7 @@ func TestQueryExemplars(t *testing.T) {
expectedErrorType: errorExec,
api: &API{
ExemplarQueryable: errorTestQueryable{err: errors.New("generic")},
+ parser: testParser,
},
query: url.Values{
"query": []string{`test_metric3{foo="boo"} - test_metric4{foo="bar"}`},
@@ -756,6 +781,7 @@ func TestQueryExemplars(t *testing.T) {
expectedErrorType: errorInternal,
api: &API{
ExemplarQueryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}},
+ parser: testParser,
},
query: url.Values{
"query": []string{`test_metric3{foo="boo"} - test_metric4{foo="bar"}`},
@@ -765,15 +791,10 @@ func TestQueryExemplars(t *testing.T) {
},
} {
t.Run(tc.name, func(t *testing.T) {
- es := storage
+ es := s
ctx := context.Background()
- for _, te := range tc.exemplars {
- for _, e := range te.Exemplars {
- _, err := es.AppendExemplar(0, te.SeriesLabels, e)
- require.NoError(t, err)
- }
- }
+ appendExemplars(t, es, tc.exemplars)
req, err := request(http.MethodGet, tc.query)
require.NoError(t, err)
@@ -790,7 +811,7 @@ func TestQueryExemplars(t *testing.T) {
func TestLabelNames(t *testing.T) {
// TestEndpoints doesn't have enough label names to test api.labelNames
// endpoint properly. Hence we test it separately.
- storage := promqltest.LoadedStorage(t, `
+ s := promqltest.LoadedStorage(t, `
load 1m
test_metric1{foo1="bar", baz="abc"} 0+100x100
test_metric1{foo2="boo"} 1+0x100
@@ -798,9 +819,10 @@ func TestLabelNames(t *testing.T) {
test_metric2{foo="boo", xyz="qwerty"} 1+0x100
test_metric2{foo="baz", abc="qwerty"} 1+0x100
`)
- t.Cleanup(func() { storage.Close() })
+
api := &API{
- Queryable: storage,
+ Queryable: s,
+ parser: testParser,
}
request := func(method, limit string, matchers ...string) (*http.Request, error) {
u, err := url.Parse("http://example.com")
@@ -865,6 +887,7 @@ func TestLabelNames(t *testing.T) {
expectedErrorType: errorExec,
api: &API{
Queryable: errorTestQueryable{err: errors.New("generic")},
+ parser: testParser,
},
},
{
@@ -873,6 +896,7 @@ func TestLabelNames(t *testing.T) {
expectedErrorType: errorInternal,
api: &API{
Queryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}},
+ parser: testParser,
},
},
} {
@@ -900,12 +924,12 @@ func (testStats) Builtin() (_ stats.BuiltinStats) {
}
func TestStats(t *testing.T) {
- storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
+ s := teststorage.New(t)
api := &API{
- Queryable: storage,
+ Queryable: s,
QueryEngine: testEngine(t),
+ parser: testParser,
now: func() time.Time {
return time.Unix(123, 0)
},
@@ -1119,7 +1143,7 @@ func setupRemote(s storage.Storage) *httptest.Server {
return httptest.NewServer(handler)
}
-func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.ExemplarStorage, testLabelAPI bool) {
+func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, testLabelAPI bool) {
start := time.Unix(0, 0)
type targetMetadata struct {
@@ -1139,7 +1163,6 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
errType errorType
sorter func(any)
metadata []targetMetadata
- exemplars []exemplar.QueryResult
zeroFunc func(any)
}
@@ -1937,6 +1960,47 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
},
},
},
+ {
+ endpoint: api.targetRelabelSteps,
+ query: url.Values{"scrapePool": []string{"testpool3"}, "labels": []string{`{"job":"test","__address__":"localhost:9090"}`}},
+ response: &RelabelStepsResponse{
+ Steps: []RelabelStep{
+ {
+ Rule: &relabel.Config{
+ Action: relabel.Replace,
+ Replacement: "example.com:443",
+ TargetLabel: "__address__",
+ Regex: relabel.MustNewRegexp(""),
+ NameValidationScheme: model.LegacyValidation,
+ },
+ Output: labels.FromMap(map[string]string{
+ "job": "test",
+ "__address__": "example.com:443",
+ }),
+ Keep: true,
+ },
+ {
+ Rule: &relabel.Config{
+ Action: relabel.Drop,
+ SourceLabels: []model.LabelName{"__address__"},
+ Regex: relabel.MustNewRegexp(`example\.com:.*`),
+ },
+ Output: labels.EmptyLabels(),
+ Keep: false,
+ },
+ {
+ Rule: &relabel.Config{
+ Action: relabel.Replace,
+ TargetLabel: "job",
+ Regex: relabel.MustNewRegexp(".*"),
+ Replacement: "should_not_apply",
+ },
+ Output: labels.EmptyLabels(),
+ Keep: false,
+ },
+ },
+ },
+ },
// With a matching metric.
{
endpoint: api.targetMetadata,
@@ -2047,8 +2111,8 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
},
sorter: func(m any) {
sort.Slice(m.([]metricMetadata), func(i, j int) bool {
- s := m.([]metricMetadata)
- return s[i].MetricFamily < s[j].MetricFamily
+ mm := m.([]metricMetadata)
+ return mm[i].MetricFamily < mm[j].MetricFamily
})
},
},
@@ -3762,17 +3826,16 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
tr.ResetMetadataStore()
for _, tm := range test.metadata {
- tr.SetMetadataStoreForTargets(tm.identifier, &testMetaStore{Metadata: tm.metadata})
- }
-
- for _, te := range test.exemplars {
- for _, e := range te.Exemplars {
- _, err := es.AppendExemplar(0, te.SeriesLabels, e)
- require.NoError(t, err)
- }
+ // TODO: Check error and fixed broken test/bug.
+ // TestEndpoints/local/run_60_metricMetadata_"limit=1&limit_per_metric=1"/GET fails if we check the error.
+ _ = tr.SetMetadataStoreForTargets(tm.identifier, &testMetaStore{Metadata: tm.metadata})
}
res := test.endpoint(req.WithContext(ctx))
+ if res.finalizer != nil {
+ // Finalizers were added to ensure closed readers on API panics, ensure they are closed here too.
+ res.finalizer()
+ }
assertAPIError(t, res.err, test.errType)
if test.sorter != nil {
@@ -4052,6 +4115,7 @@ func TestAdminEndpoints(t *testing.T) {
dbDir: dir,
ready: func(f http.HandlerFunc) http.HandlerFunc { return f },
enableAdmin: tc.enableAdmin,
+ parser: testParser,
}
endpoint := tc.endpoint(api)
@@ -4770,13 +4834,10 @@ func TestExtractQueryOpts(t *testing.T) {
// Test query timeout parameter.
func TestQueryTimeout(t *testing.T) {
- storage := promqltest.LoadedStorage(t, `
+ s := promqltest.LoadedStorage(t, `
load 1m
test_metric1{foo="bar"} 0+100x100
`)
- t.Cleanup(func() {
- _ = storage.Close()
- })
now := time.Now()
@@ -4796,14 +4857,15 @@ func TestQueryTimeout(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
engine := &fakeEngine{}
api := &API{
- Queryable: storage,
+ Queryable: s,
QueryEngine: engine,
- ExemplarQueryable: storage.ExemplarQueryable(),
+ ExemplarQueryable: s,
alertmanagerRetriever: testAlertmanagerRetriever{}.toFactory(),
flagsMap: sampleFlagMap,
now: func() time.Time { return now },
config: func() config.Config { return samplePrometheusCfg },
ready: func(f http.HandlerFunc) http.HandlerFunc { return f },
+ parser: testParser,
}
query := url.Values{
diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go
index 6e55089e16..b041024a48 100644
--- a/web/api/v1/errors_test.go
+++ b/web/api/v1/errors_test.go
@@ -34,6 +34,7 @@ import (
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
+ "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape"
@@ -134,7 +135,7 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable, overri
api := NewAPI(
engine,
q,
- nil,
+ nil, nil,
nil,
func(context.Context) ScrapePoolsRetriever { return &DummyScrapePoolsRetriever{} },
func(context.Context) TargetRetriever { return &DummyTargetRetriever{} },
@@ -169,6 +170,8 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable, overri
false,
overrideErrorCode,
nil,
+ OpenAPIOptions{},
+ parser.NewParser(parser.Options{}),
)
promRouter := route.New().WithPrefix("/api/v1")
diff --git a/web/api/v1/openapi.go b/web/api/v1/openapi.go
new file mode 100644
index 0000000000..59fa8969ef
--- /dev/null
+++ b/web/api/v1/openapi.go
@@ -0,0 +1,320 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements OpenAPI 3.2 specification generation for the Prometheus HTTP API.
+// It provides dynamic spec building with optional path filtering.
+package v1
+
+import (
+ "log/slog"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "sync"
+
+ "github.com/pb33f/libopenapi/datamodel/high/base"
+ v3 "github.com/pb33f/libopenapi/datamodel/high/v3"
+ "github.com/pb33f/libopenapi/orderedmap"
+)
+
+const (
+ // OpenAPI 3.1.0 is the default version with broader compatibility.
+ openAPIVersion31 = "3.1.0"
+ // OpenAPI 3.2.0 supports advanced features like itemSchema for SSE streams.
+ openAPIVersion32 = "3.2.0"
+)
+
+// OpenAPIOptions configures the OpenAPI spec builder.
+type OpenAPIOptions struct {
+ // IncludePaths filters which paths to include in the spec.
+ // If empty, all paths are included.
+ // Paths are matched by prefix (e.g., "/query" matches "/query" and "/query_range").
+ IncludePaths []string
+
+ // ExternalURL is the external URL of the Prometheus server (e.g., "http://prometheus.example.com:9090").
+ ExternalURL string
+
+ // Version is the API version to include in the OpenAPI spec.
+ // If empty, defaults to "0.0.1-undefined".
+ Version string
+}
+
+// OpenAPIBuilder builds and caches OpenAPI specifications.
+type OpenAPIBuilder struct {
+ mu sync.RWMutex
+ cachedYAML31 []byte // Cached OpenAPI 3.1 spec.
+ cachedYAML32 []byte // Cached OpenAPI 3.2 spec.
+ options OpenAPIOptions
+ logger *slog.Logger
+}
+
+// NewOpenAPIBuilder creates a new OpenAPI builder with the given options.
+func NewOpenAPIBuilder(opts OpenAPIOptions, logger *slog.Logger) *OpenAPIBuilder {
+ b := &OpenAPIBuilder{
+ options: opts,
+ logger: logger,
+ }
+
+ b.rebuild()
+ return b
+}
+
+// rebuild constructs the OpenAPI specs for both 3.1 and 3.2 versions based on current options.
+func (b *OpenAPIBuilder) rebuild() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ // Build OpenAPI 3.1 spec.
+ doc31 := b.buildDocument(openAPIVersion31)
+ yamlBytes31, err := doc31.Render()
+ if err != nil {
+ b.logger.Error("failed to render OpenAPI 3.1 spec - this is a bug, please report it", "err", err)
+ return
+ }
+ b.cachedYAML31 = yamlBytes31
+
+ // Build OpenAPI 3.2 spec.
+ doc32 := b.buildDocument(openAPIVersion32)
+ yamlBytes32, err := doc32.Render()
+ if err != nil {
+ b.logger.Error("failed to render OpenAPI 3.2 spec - this is a bug, please report it", "err", err)
+ return
+ }
+ b.cachedYAML32 = yamlBytes32
+}
+
+// ServeOpenAPI returns the OpenAPI specification as YAML.
+// By default, serves OpenAPI 3.1.0. Use ?openapi_version=3.2 for OpenAPI 3.2.0.
+func (b *OpenAPIBuilder) ServeOpenAPI(w http.ResponseWriter, r *http.Request) {
+ // Parse query parameter to determine which version to serve.
+ requestedVersion := r.URL.Query().Get("openapi_version")
+
+ b.mu.RLock()
+ var yamlData []byte
+ switch requestedVersion {
+ case "3.2", "3.2.0":
+ yamlData = b.cachedYAML32
+ case "3.1", "3.1.0":
+ yamlData = b.cachedYAML31
+ default:
+ // Default to OpenAPI 3.1.0 for broader compatibility.
+ yamlData = b.cachedYAML31
+ }
+ b.mu.RUnlock()
+
+ w.Header().Set("Content-Type", "application/yaml; charset=utf-8")
+ w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
+ w.WriteHeader(http.StatusOK)
+ w.Write(yamlData)
+}
+
+// WrapHandler returns the handler unchanged (no validation).
+func (*OpenAPIBuilder) WrapHandler(next http.HandlerFunc) http.HandlerFunc {
+ return next
+}
+
+// shouldIncludePath checks if a path should be included based on options.
+func (b *OpenAPIBuilder) shouldIncludePath(path string) bool {
+ if len(b.options.IncludePaths) == 0 {
+ return true
+ }
+ for _, include := range b.options.IncludePaths {
+ if strings.HasPrefix(path, include) || path == include {
+ return true
+ }
+ }
+ return false
+}
+
+// shouldIncludePathForVersion checks if a path should be included for a specific OpenAPI version.
+func (b *OpenAPIBuilder) shouldIncludePathForVersion(path, version string) bool {
+ // First check IncludePaths filter.
+ if !b.shouldIncludePath(path) {
+ return false
+ }
+
+ // OpenAPI 3.1 excludes paths that require 3.2 features.
+ // The /notifications/live endpoint uses itemSchema which is a 3.2-only feature.
+ if version == openAPIVersion31 && path == "/notifications/live" {
+ return false
+ }
+
+ return true
+}
+
+// buildDocument creates the OpenAPI document for the specified version using high-level structs.
+func (b *OpenAPIBuilder) buildDocument(version string) *v3.Document {
+ return &v3.Document{
+ Version: version,
+ Info: b.buildInfo(),
+ Servers: b.buildServers(),
+ Tags: b.buildTags(version),
+ Paths: b.buildPaths(version),
+ Components: b.buildComponents(),
+ }
+}
+
+// buildInfo constructs the info section.
+func (b *OpenAPIBuilder) buildInfo() *base.Info {
+ apiVersion := b.options.Version
+ if apiVersion == "" {
+ apiVersion = "0.0.1-undefined"
+ }
+ return &base.Info{
+ Title: "Prometheus API",
+ Description: "Prometheus is an Open-Source monitoring system with a dimensional data model, flexible query language, efficient time series database and modern alerting approach.",
+ Version: apiVersion,
+ Contact: &base.Contact{
+ Name: "Prometheus Community",
+ URL: "https://prometheus.io/community/",
+ },
+ }
+}
+
+// buildServers constructs the servers section.
+func (b *OpenAPIBuilder) buildServers() []*v3.Server {
+ // ExternalURL is always set by computeExternalURL in main.go.
+ // It includes scheme, host, port, and optional path prefix (without trailing slash).
+ serverURL := "/api/v1"
+ if b.options.ExternalURL != "" {
+ baseURL, err := url.Parse(b.options.ExternalURL)
+ if err == nil {
+ // Use path.Join to properly append /api/v1 to the existing path.
+ // Then use ResolveReference to construct the full URL.
+ baseURL.Path = path.Join(baseURL.Path, "/api/v1")
+ serverURL = baseURL.String()
+ }
+ }
+ return []*v3.Server{
+ {URL: serverURL},
+ }
+}
+
+// buildTags constructs the global tags list.
+// Tag summary is an OpenAPI 3.2 feature, excluded from 3.1.
+// Tag description is supported in both 3.1 and 3.2.
+func (*OpenAPIBuilder) buildTags(version string) []*base.Tag {
+ // Define tags with all metadata.
+ tagData := []struct {
+ name string
+ summary string
+ description string
+ }{
+ {"query", "Query", "Query and evaluate PromQL expressions."},
+ {"metadata", "Metadata", "Retrieve metric metadata such as type and unit."},
+ {"labels", "Labels", "Query label names and values."},
+ {"series", "Series", "Query and manage time series."},
+ {"targets", "Targets", "Retrieve target and scrape pool information."},
+ {"rules", "Rules", "Query recording and alerting rules."},
+ {"alerts", "Alerts", "Query active alerts and alertmanager discovery."},
+ {"status", "Status", "Retrieve server status and configuration."},
+ {"admin", "Admin", "Administrative operations for TSDB management."},
+ {"features", "Features", "Query enabled features."},
+ {"remote", "Remote Storage", "Remote read and write endpoints."},
+ {"otlp", "OTLP", "OpenTelemetry Protocol metrics ingestion."},
+ {"notifications", "Notifications", "Server notifications and events."},
+ }
+
+ tags := make([]*base.Tag, 0, len(tagData))
+ for _, td := range tagData {
+ tag := &base.Tag{
+ Name: td.name,
+ Description: td.description, // Description is supported in both 3.1 and 3.2.
+ }
+
+ // Summary is an OpenAPI 3.2 feature only.
+ if version == openAPIVersion32 {
+ tag.Summary = td.summary
+ }
+
+ tags = append(tags, tag)
+ }
+
+ return tags
+}
+
+// buildPaths constructs all API path definitions.
+func (b *OpenAPIBuilder) buildPaths(version string) *v3.Paths {
+ pathItems := orderedmap.New[string, *v3.PathItem]()
+
+ allPaths := b.getAllPathDefinitions()
+ for pair := allPaths.First(); pair != nil; pair = pair.Next() {
+ if b.shouldIncludePathForVersion(pair.Key(), version) {
+ pathItems.Set(pair.Key(), pair.Value())
+ }
+ }
+
+ return &v3.Paths{PathItems: pathItems}
+}
+
+// getAllPathDefinitions returns all path definitions.
+func (b *OpenAPIBuilder) getAllPathDefinitions() *orderedmap.Map[string, *v3.PathItem] {
+ paths := orderedmap.New[string, *v3.PathItem]()
+
+ // Query endpoints.
+ paths.Set("/query", b.queryPath())
+ paths.Set("/query_range", b.queryRangePath())
+ paths.Set("/query_exemplars", b.queryExemplarsPath())
+ paths.Set("/format_query", b.formatQueryPath())
+ paths.Set("/parse_query", b.parseQueryPath())
+
+ // Label endpoints.
+ paths.Set("/labels", b.labelsPath())
+ paths.Set("/label/{name}/values", b.labelValuesPath())
+
+ // Series endpoints.
+ paths.Set("/series", b.seriesPath())
+
+ // Metadata endpoints.
+ paths.Set("/metadata", b.metadataPath())
+
+ // Target endpoints.
+ paths.Set("/scrape_pools", b.scrapePoolsPath())
+ paths.Set("/targets", b.targetsPath())
+ paths.Set("/targets/metadata", b.targetsMetadataPath())
+ paths.Set("/targets/relabel_steps", b.targetsRelabelStepsPath())
+
+ // Rules and alerts endpoints.
+ paths.Set("/rules", b.rulesPath())
+ paths.Set("/alerts", b.alertsPath())
+ paths.Set("/alertmanagers", b.alertmanagersPath())
+
+ // Status endpoints.
+ paths.Set("/status/config", b.statusConfigPath())
+ paths.Set("/status/runtimeinfo", b.statusRuntimeInfoPath())
+ paths.Set("/status/buildinfo", b.statusBuildInfoPath())
+ paths.Set("/status/flags", b.statusFlagsPath())
+ paths.Set("/status/tsdb", b.statusTSDBPath())
+ paths.Set("/status/tsdb/blocks", b.statusTSDBBlocksPath())
+ paths.Set("/status/walreplay", b.statusWALReplayPath())
+
+ // Admin endpoints.
+ paths.Set("/admin/tsdb/delete_series", b.adminDeleteSeriesPath())
+ paths.Set("/admin/tsdb/clean_tombstones", b.adminCleanTombstonesPath())
+ paths.Set("/admin/tsdb/snapshot", b.adminSnapshotPath())
+
+ // Remote endpoints.
+ paths.Set("/read", b.remoteReadPath())
+ paths.Set("/write", b.remoteWritePath())
+ paths.Set("/otlp/v1/metrics", b.otlpWritePath())
+
+ // Notifications endpoints.
+ paths.Set("/notifications", b.notificationsPath())
+ paths.Set("/notifications/live", b.notificationsLivePath())
+
+ // Features endpoint.
+ paths.Set("/features", b.featuresPath())
+
+ return paths
+}
diff --git a/web/api/v1/openapi_coverage_test.go b/web/api/v1/openapi_coverage_test.go
new file mode 100644
index 0000000000..103f82e08e
--- /dev/null
+++ b/web/api/v1/openapi_coverage_test.go
@@ -0,0 +1,258 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ _ "embed"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "strconv"
+ "strings"
+ "testing"
+
+ v3 "github.com/pb33f/libopenapi/datamodel/high/v3"
+ "github.com/prometheus/common/promslog"
+ "github.com/stretchr/testify/require"
+)
+
+//go:embed api.go
+var apiGoSource string
+
+// routeInfo represents a route extracted from the Register function.
+type routeInfo struct {
+ method string
+ path string
+}
+
+// extractRoutesFromRegister parses the api.go source and extracts all routes
+// registered in the (*API) Register function using AST.
+func extractRoutesFromRegister(t *testing.T, source string) []routeInfo {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "api.go", source, parser.ParseComments)
+ require.NoError(t, err, "failed to parse api.go")
+
+ var registerFunc *ast.FuncDecl
+
+ // Find the Register method on *API.
+ ast.Inspect(f, func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok || fn.Body == nil {
+ return true
+ }
+
+ if fn.Name.Name != "Register" {
+ return true
+ }
+
+ // Ensure it's a method on *API.
+ if fn.Recv == nil || len(fn.Recv.List) != 1 {
+ return true
+ }
+
+ star, ok := fn.Recv.List[0].Type.(*ast.StarExpr)
+ if !ok {
+ return true
+ }
+
+ ident, ok := star.X.(*ast.Ident)
+ if !ok || ident.Name != "API" {
+ return true
+ }
+
+ registerFunc = fn
+ return false // Stop walking once found.
+ })
+
+ require.NotNil(t, registerFunc, "Register method not found")
+
+ var routes []routeInfo
+
+ // Extract all r.Get, r.Post, r.Put, r.Delete, r.Options calls.
+ ast.Inspect(registerFunc.Body, func(n ast.Node) bool {
+ call, ok := n.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+
+ // Check if it's a router method call.
+ method := sel.Sel.Name
+ if method != "Get" && method != "Post" && method != "Put" && method != "Delete" && method != "Del" && method != "Options" {
+ return true
+ }
+
+ // Ensure the receiver is 'r'.
+ if x, ok := sel.X.(*ast.Ident); !ok || x.Name != "r" {
+ return true
+ }
+
+ if len(call.Args) == 0 {
+ return true
+ }
+
+ // Extract the path from the first argument.
+ lit, ok := call.Args[0].(*ast.BasicLit)
+ if !ok || lit.Kind != token.STRING {
+ return true
+ }
+
+ path, err := strconv.Unquote(lit.Value)
+ if err != nil {
+ return true
+ }
+
+ // Normalize Del to DELETE.
+ if method == "Del" {
+ method = "Delete"
+ }
+
+ routes = append(routes, routeInfo{
+ method: strings.ToUpper(method),
+ path: path,
+ })
+ return true
+ })
+
+ return routes
+}
+
+// normalizePathForOpenAPI converts route paths with colon parameters to OpenAPI format.
+// e.g., "/label/:name/values" -> "/label/{name}/values".
+func normalizePathForOpenAPI(path string) string {
+ // Replace :param with {param}.
+ parts := strings.Split(path, "/")
+ for i, part := range parts {
+ if trimmed, ok := strings.CutPrefix(part, ":"); ok {
+ parts[i] = "{" + trimmed + "}"
+ }
+ }
+ return strings.Join(parts, "/")
+}
+
+// TestOpenAPICoverage verifies that all routes registered in the Register function
+// are documented in the OpenAPI specification.
+func TestOpenAPICoverage(t *testing.T) {
+ // Extract routes from api.go using AST.
+ routes := extractRoutesFromRegister(t, apiGoSource)
+ require.NotEmpty(t, routes, "no routes found in Register function")
+
+ // Build OpenAPI spec.
+ builder := NewOpenAPIBuilder(OpenAPIOptions{}, promslog.NewNopLogger())
+ allPaths := builder.getAllPathDefinitions()
+
+ // Create a map of OpenAPI paths for quick lookup.
+ // Key is the normalized path, value is the PathItem.
+ openAPIPaths := make(map[string]bool)
+ for pair := allPaths.First(); pair != nil; pair = pair.Next() {
+ pathItem := pair.Value()
+ path := pair.Key()
+
+ // Track which methods are defined for this path.
+ if pathItem.Get != nil {
+ openAPIPaths[path+":GET"] = true
+ }
+ if pathItem.Post != nil {
+ openAPIPaths[path+":POST"] = true
+ }
+ if pathItem.Put != nil {
+ openAPIPaths[path+":PUT"] = true
+ }
+ if pathItem.Delete != nil {
+ openAPIPaths[path+":DELETE"] = true
+ }
+ if pathItem.Options != nil {
+ openAPIPaths[path+":OPTIONS"] = true
+ }
+ }
+
+ // Check coverage for each route.
+ var missingRoutes []string
+ ignoredRoutes := map[string]bool{
+ "/*path:OPTIONS": true, // Wildcard OPTIONS handler.
+ "/openapi.yaml:GET": true, // Self-referential endpoint.
+ "/notifications/live:GET": true, // SSE endpoint (version-specific).
+ }
+
+ for _, route := range routes {
+ normalizedPath := normalizePathForOpenAPI(route.path)
+ key := normalizedPath + ":" + route.method
+
+ // Skip ignored routes.
+ if ignoredRoutes[key] {
+ continue
+ }
+
+ if !openAPIPaths[key] {
+ missingRoutes = append(missingRoutes, key)
+ }
+ }
+
+ if len(missingRoutes) > 0 {
+ t.Errorf("The following routes are registered but not documented in OpenAPI spec:\n%s",
+ strings.Join(missingRoutes, "\n"))
+ }
+}
+
+// TestOpenAPIHasNoExtraRoutes verifies that the OpenAPI spec doesn't document
+// routes that aren't actually registered.
+func TestOpenAPIHasNoExtraRoutes(t *testing.T) {
+ // Extract routes from api.go using AST.
+ routes := extractRoutesFromRegister(t, apiGoSource)
+ require.NotEmpty(t, routes, "no routes found in Register function")
+
+ // Create a map of registered routes.
+ registeredRoutes := make(map[string]bool)
+ for _, route := range routes {
+ normalizedPath := normalizePathForOpenAPI(route.path)
+ key := normalizedPath + ":" + route.method
+ registeredRoutes[key] = true
+ }
+
+ // Build OpenAPI spec.
+ builder := NewOpenAPIBuilder(OpenAPIOptions{}, promslog.NewNopLogger())
+ allPaths := builder.getAllPathDefinitions()
+
+ // Check if any OpenAPI paths are not registered.
+ var extraRoutes []string
+
+ for pair := allPaths.First(); pair != nil; pair = pair.Next() {
+ pathItem := pair.Value()
+ path := pair.Key()
+
+ checkMethod := func(method string, op *v3.Operation) {
+ if op != nil {
+ key := path + ":" + method
+ if !registeredRoutes[key] {
+ extraRoutes = append(extraRoutes, key)
+ }
+ }
+ }
+
+ checkMethod("GET", pathItem.Get)
+ checkMethod("POST", pathItem.Post)
+ checkMethod("PUT", pathItem.Put)
+ checkMethod("DELETE", pathItem.Delete)
+ checkMethod("OPTIONS", pathItem.Options)
+ }
+
+ if len(extraRoutes) > 0 {
+ t.Errorf("The following routes are documented in OpenAPI but not registered:\n%s",
+ strings.Join(extraRoutes, "\n"))
+ }
+}
diff --git a/web/api/v1/openapi_examples.go b/web/api/v1/openapi_examples.go
new file mode 100644
index 0000000000..50e155b184
--- /dev/null
+++ b/web/api/v1/openapi_examples.go
@@ -0,0 +1,1013 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains example request bodies and response data for OpenAPI documentation.
+// Examples are included in the generated spec to provide realistic usage scenarios for API consumers.
+package v1
+
+import (
+ "github.com/pb33f/libopenapi/datamodel/high/base"
+ "github.com/pb33f/libopenapi/orderedmap"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/promql"
+)
+
+// Example builders for request bodies.
+
+func queryPostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("simpleQuery", &base.Example{
+ Summary: "Simple instant query",
+ Value: createYAMLNode(map[string]any{"query": "up"}),
+ })
+
+ examples.Set("queryWithTime", &base.Example{
+ Summary: "Query with specific timestamp",
+ Value: createYAMLNode(map[string]any{
+ "query": "up{job=\"prometheus\"}",
+ "time": "2026-01-02T13:37:00.000Z",
+ }),
+ })
+
+ examples.Set("queryWithLimit", &base.Example{
+ Summary: "Query with limit and statistics",
+ Value: createYAMLNode(map[string]any{
+ "query": "rate(prometheus_http_requests_total{handler=\"/api/v1/query\"}[5m])",
+ "limit": 100,
+ "stats": "all",
+ }),
+ })
+
+ return examples
+}
+
+// queryRangePostExamples returns examples for POST /query_range endpoint.
+func queryRangePostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("basicRange", &base.Example{
+ Summary: "Basic range query",
+ Value: createYAMLNode(map[string]any{
+ "query": "up",
+ "start": "2026-01-02T12:37:00.000Z",
+ "end": "2026-01-02T13:37:00.000Z",
+ "step": "15s",
+ }),
+ })
+
+ examples.Set("rateQuery", &base.Example{
+ Summary: "Rate calculation over time range",
+ Value: createYAMLNode(map[string]any{
+ "query": "rate(prometheus_http_requests_total{handler=\"/api/v1/query\"}[5m])",
+ "start": "2026-01-02T12:37:00.000Z",
+ "end": "2026-01-02T13:37:00.000Z",
+ "step": "30s",
+ "timeout": "30s",
+ }),
+ })
+
+ return examples
+}
+
+// queryExemplarsPostExamples returns examples for POST /query_exemplars endpoint.
+func queryExemplarsPostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("basicExemplar", &base.Example{
+ Summary: "Query exemplars for a metric",
+ Value: createYAMLNode(map[string]any{"query": "prometheus_http_requests_total"}),
+ })
+
+ examples.Set("exemplarWithTimeRange", &base.Example{
+ Summary: "Exemplars within specific time range",
+ Value: createYAMLNode(map[string]any{
+ "query": "prometheus_http_requests_total{job=\"prometheus\"}",
+ "start": "2026-01-02T12:37:00.000Z",
+ "end": "2026-01-02T13:37:00.000Z",
+ }),
+ })
+
+ return examples
+}
+
+// formatQueryPostExamples returns examples for POST /format_query endpoint.
+func formatQueryPostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("simpleFormat", &base.Example{
+ Summary: "Format a simple query",
+ Value: createYAMLNode(map[string]any{"query": "up{job=\"prometheus\"}"}),
+ })
+
+ examples.Set("complexFormat", &base.Example{
+ Summary: "Format a complex query",
+ Value: createYAMLNode(map[string]any{"query": "sum(rate(http_requests_total[5m])) by (job, status)"}),
+ })
+
+ return examples
+}
+
+// parseQueryPostExamples returns examples for POST /parse_query endpoint.
+func parseQueryPostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("simpleParse", &base.Example{
+ Summary: "Parse a simple query",
+ Value: createYAMLNode(map[string]any{"query": "up"}),
+ })
+
+ examples.Set("complexParse", &base.Example{
+ Summary: "Parse a complex query",
+ Value: createYAMLNode(map[string]any{"query": "rate(http_requests_total{job=\"api\"}[5m])"}),
+ })
+
+ return examples
+}
+
+// labelsPostExamples returns examples for POST /labels endpoint.
+func labelsPostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("allLabels", &base.Example{
+ Summary: "Get all label names",
+ Value: createYAMLNode(map[string]any{}),
+ })
+
+ examples.Set("labelsWithTimeRange", &base.Example{
+ Summary: "Get label names within time range",
+ Value: createYAMLNode(map[string]any{
+ "start": "2026-01-02T12:37:00.000Z",
+ "end": "2026-01-02T13:37:00.000Z",
+ }),
+ })
+
+ examples.Set("labelsWithMatch", &base.Example{
+ Summary: "Get label names matching series selector",
+ Value: createYAMLNode(map[string]any{
+ "match[]": []string{"up", "process_start_time_seconds{job=\"prometheus\"}"},
+ }),
+ })
+
+ return examples
+}
+
+// seriesPostExamples returns examples for POST /series endpoint.
+func seriesPostExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("seriesMatch", &base.Example{
+ Summary: "Find series by label matchers",
+ Value: createYAMLNode(map[string]any{
+ "match[]": []string{"up"},
+ }),
+ })
+
+ examples.Set("seriesWithTimeRange", &base.Example{
+ Summary: "Find series with time range",
+ Value: createYAMLNode(map[string]any{
+ "match[]": []string{"up", "process_cpu_seconds_total{job=\"prometheus\"}"},
+ "start": "2026-01-02T12:37:00.000Z",
+ "end": "2026-01-02T13:37:00.000Z",
+ }),
+ })
+
+ return examples
+}
+
+// Example builders for response bodies.
+
+// queryResponseExamples returns examples for /query response.
+func queryResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ vectorResult := promql.Vector{
+ promql.Sample{
+ Metric: labels.FromStrings("__name__", "up", "job", "prometheus", "instance", "demo.prometheus.io:9090"),
+ T: 1767436620000,
+ F: 1,
+ },
+ promql.Sample{
+ Metric: labels.FromStrings("__name__", "up", "env", "demo", "job", "alertmanager", "instance", "demo.prometheus.io:9093"),
+ T: 1767436620000,
+ F: 1,
+ },
+ }
+
+ examples.Set("vectorResult", &base.Example{
+ Summary: "Instant vector query: up",
+ Value: vectorExample(vectorResult),
+ })
+
+ examples.Set("scalarResult", &base.Example{
+ Summary: "Scalar query: scalar(42)",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "resultType": "scalar",
+ "result": []any{1767436620, "42"},
+ },
+ }),
+ })
+
+ matrixResult := promql.Matrix{
+ promql.Series{
+ Metric: labels.FromStrings("__name__", "up", "job", "prometheus", "instance", "demo.prometheus.io:9090"),
+ Floats: []promql.FPoint{
+ {T: 1767436320000, F: 1},
+ {T: 1767436620000, F: 1},
+ },
+ },
+ }
+
+ examples.Set("matrixResult", &base.Example{
+ Summary: "Range vector query: up[5m]",
+ Value: matrixExample(matrixResult),
+ })
+
+ // TODO: Add native histogram example.
+
+ return examples
+}
+
+// queryRangeResponseExamples returns examples for /query_range response.
+func queryRangeResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ matrixResult := promql.Matrix{
+ promql.Series{
+ Metric: labels.FromStrings("__name__", "up", "job", "prometheus", "instance", "demo.prometheus.io:9090"),
+ Floats: []promql.FPoint{
+ {T: 1767433020000, F: 1},
+ {T: 1767434820000, F: 1},
+ {T: 1767436620000, F: 1},
+ },
+ },
+ }
+
+ examples.Set("matrixResult", &base.Example{
+ Summary: "Range query: rate(prometheus_http_requests_total[5m])",
+ Value: matrixExample(matrixResult),
+ })
+
+ // TODO: Add native histogram example.
+
+ return examples
+}
+
+// labelsResponseExamples returns examples for /labels response.
+func labelsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("labelNames", &base.Example{
+ Summary: "List of label names",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []string{
+ "__name__", "active", "address", "alertmanager", "alertname", "alertstate",
+ "backend", "branch", "code", "collector", "component", "device",
+ "env", "endpoint", "fstype", "handler", "instance", "job",
+ "le", "method", "mode", "name",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// seriesResponseExamples returns examples for /series response.
+func seriesResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("seriesList", &base.Example{
+ Summary: "List of series matching the selector",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []map[string]string{
+ {
+ "__name__": "up",
+ "env": "demo",
+ "instance": "demo.prometheus.io:8080",
+ "job": "cadvisor",
+ },
+ {
+ "__name__": "up",
+ "env": "demo",
+ "instance": "demo.prometheus.io:9093",
+ "job": "alertmanager",
+ },
+ {
+ "__name__": "up",
+ "env": "demo",
+ "instance": "demo.prometheus.io:9100",
+ "job": "node",
+ },
+ {
+ "__name__": "up",
+ "instance": "demo.prometheus.io:3000",
+ "job": "grafana",
+ },
+ {
+ "__name__": "up",
+ "instance": "demo.prometheus.io:8996",
+ "job": "random",
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// targetsResponseExamples returns examples for /targets response.
+func targetsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("targetsList", &base.Example{
+ Summary: "Active and dropped targets",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "activeTargets": []map[string]any{
+ {
+ "discoveredLabels": map[string]string{
+ "__address__": "demo.prometheus.io:9093",
+ "__meta_filepath": "/etc/prometheus/file_sd/alertmanager.yml",
+ "__metrics_path__": "/metrics",
+ "__scheme__": "http",
+ "env": "demo",
+ "job": "alertmanager",
+ },
+ "labels": map[string]string{
+ "env": "demo",
+ "instance": "demo.prometheus.io:9093",
+ "job": "alertmanager",
+ },
+ "scrapePool": "alertmanager",
+ "scrapeUrl": "http://demo.prometheus.io:9093/metrics",
+ "globalUrl": "http://demo.prometheus.io:9093/metrics",
+ "lastError": "",
+ "lastScrape": "2026-01-02T13:36:40.200Z",
+ "lastScrapeDuration": 0.006576866,
+ "health": "up",
+ "scrapeInterval": "15s",
+ "scrapeTimeout": "10s",
+ },
+ },
+ "droppedTargets": []map[string]any{},
+ "droppedTargetCounts": map[string]int{
+ "alertmanager": 0,
+ "blackbox": 0,
+ "caddy": 0,
+ "cadvisor": 0,
+ "grafana": 0,
+ "node": 0,
+ "prometheus": 0,
+ "random": 0,
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// rulesResponseExamples returns examples for /rules response.
+func rulesResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("ruleGroups", &base.Example{
+ Summary: "Alerting and recording rules",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "groups": []map[string]any{
+ {
+ "name": "ansible managed alert rules",
+ "file": "/etc/prometheus/rules/ansible_managed.yml",
+ "interval": 15,
+ "limit": 0,
+ "rules": []map[string]any{
+ {
+ "state": "firing",
+ "name": "Watchdog",
+ "query": "vector(1)",
+ "duration": 600,
+ "keepFiringFor": 0,
+ "labels": map[string]string{"severity": "warning"},
+ "annotations": map[string]string{"description": "This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the \"DeadMansSnitch\" integration in PagerDuty.", "summary": "Ensure entire alerting pipeline is functional"},
+ "health": "ok",
+ "evaluationTime": 0.000356688,
+ "lastEvaluation": "2026-01-02T13:36:56.874Z",
+ "type": "alerting",
+ },
+ },
+ "evaluationTime": 0.000561635,
+ "lastEvaluation": "2026-01-02T13:36:56.874Z",
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// alertsResponseExamples returns examples for /alerts response.
+func alertsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("activeAlerts", &base.Example{
+ Summary: "Currently active alerts",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "alerts": []map[string]any{
+ {
+ "labels": map[string]string{
+ "alertname": "Watchdog",
+ "severity": "warning",
+ },
+ "annotations": map[string]string{
+ "description": "This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the \"DeadMansSnitch\" integration in PagerDuty.",
+ "summary": "Ensure entire alerting pipeline is functional",
+ },
+ "state": "firing",
+ "activeAt": "2026-01-02T13:30:00.000Z",
+ "value": "1e+00",
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// queryExemplarsResponseExamples returns examples for /query_exemplars response.
+func queryExemplarsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("exemplarsResult", &base.Example{
+ Summary: "Exemplars for a metric with trace IDs",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []map[string]any{
+ {
+ "seriesLabels": map[string]string{
+ "__name__": "http_requests_total",
+ "job": "api-server",
+ "method": "GET",
+ },
+ "exemplars": []map[string]any{
+ {
+ "labels": map[string]string{
+ "traceID": "abc123def456",
+ },
+ "value": "1.5",
+ "timestamp": 1689956451.781,
+ },
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// formatQueryResponseExamples returns examples for /format_query response.
+func formatQueryResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("formattedQuery", &base.Example{
+ Summary: "Formatted PromQL query",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": "sum by(job, status) (rate(http_requests_total[5m]))",
+ }),
+ })
+
+ return examples
+}
+
+// parseQueryResponseExamples returns examples for /parse_query response.
+func parseQueryResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("parsedQuery", &base.Example{
+ Summary: "Parsed PromQL expression tree",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "resultType": "vector",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// labelValuesResponseExamples returns examples for /label/{name}/values response.
+func labelValuesResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("labelValues", &base.Example{
+ Summary: "List of values for a label",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []string{"alertmanager", "blackbox", "caddy", "cadvisor", "grafana", "node", "prometheus", "random"},
+ }),
+ })
+
+ return examples
+}
+
+// metadataResponseExamples returns examples for /metadata response.
+func metadataResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("metricMetadata", &base.Example{
+ Summary: "Metadata for metrics",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string][]map[string]any{
+ "prometheus_rule_group_iterations_missed_total": {
+ {
+ "type": "counter",
+ "help": "The total number of rule group evaluations missed due to slow rule group evaluation.",
+ "unit": "",
+ },
+ },
+ "prometheus_sd_updates_total": {
+ {
+ "type": "counter",
+ "help": "Total number of update events sent to the SD consumers.",
+ "unit": "",
+ },
+ },
+ "go_gc_stack_starting_size_bytes": {
+ {
+ "type": "gauge",
+ "help": "The stack size of new goroutines. Sourced from /gc/stack/starting-size:bytes.",
+ "unit": "",
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// scrapePoolsResponseExamples returns examples for /scrape_pools response.
+func scrapePoolsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("scrapePoolsList", &base.Example{
+ Summary: "List of scrape pool names",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "scrapePools": []string{"alertmanager", "blackbox", "caddy", "cadvisor", "grafana", "node", "prometheus", "random"},
+ },
+ }),
+ })
+
+ return examples
+}
+
+// targetsMetadataResponseExamples returns examples for /targets/metadata response.
+func targetsMetadataResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("targetMetadata", &base.Example{
+ Summary: "Metadata for targets",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []map[string]any{
+ {
+ "target": map[string]string{
+ "instance": "localhost:9090",
+ "job": "prometheus",
+ },
+ "type": "gauge",
+ "help": "The current health status of the target",
+ "unit": "",
+ "metric": "up",
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// targetsRelabelStepsResponseExamples returns examples for /targets/relabel_steps response.
+func targetsRelabelStepsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("relabelSteps", &base.Example{
+ Summary: "Relabel steps for a target",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "steps": []map[string]any{
+ {
+ "rule": map[string]any{
+ "source_labels": []string{"__address__"},
+ "target_label": "instance",
+ "action": "replace",
+ "regex": "(.*)",
+ "replacement": "$1",
+ },
+ "output": map[string]string{
+ "__address__": "localhost:9090",
+ "instance": "localhost:9090",
+ "job": "prometheus",
+ },
+ "keep": true,
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// alertmanagersResponseExamples returns examples for /alertmanagers response.
+func alertmanagersResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("alertmanagerDiscovery", &base.Example{
+ Summary: "Alertmanager discovery results",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "activeAlertmanagers": []map[string]any{
+ {
+ "url": "http://demo.prometheus.io:9093/api/v2/alerts",
+ },
+ },
+ "droppedAlertmanagers": []map[string]any{},
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusConfigResponseExamples returns examples for /status/config response.
+func statusConfigResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("configYAML", &base.Example{
+ Summary: "Prometheus configuration",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "yaml": "global:\n scrape_interval: 15s\n scrape_timeout: 10s\n evaluation_interval: 15s\n external_labels:\n environment: demo-prometheus-io\nalerting:\n alertmanagers:\n - scheme: http\n static_configs:\n - targets:\n - demo.prometheus.io:9093\nrule_files:\n- /etc/prometheus/rules/*.yml\n",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusRuntimeInfoResponseExamples returns examples for /status/runtimeinfo response.
+func statusRuntimeInfoResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("runtimeInfo", &base.Example{
+ Summary: "Runtime information",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "startTime": "2026-01-01T13:37:00.000Z",
+ "CWD": "/",
+ "hostname": "demo-prometheus-io",
+ "serverTime": "2026-01-02T13:37:00.000Z",
+ "reloadConfigSuccess": true,
+ "lastConfigTime": "2026-01-01T13:37:00.000Z",
+ "corruptionCount": 0,
+ "goroutineCount": 88,
+ "GOMAXPROCS": 2,
+ "GOMEMLIMIT": int64(3703818240),
+ "GOGC": "75",
+ "GODEBUG": "",
+ "storageRetention": "31d",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusBuildInfoResponseExamples returns examples for /status/buildinfo response.
+func statusBuildInfoResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("buildInfo", &base.Example{
+ Summary: "Build information",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "version": "3.7.3",
+ "revision": "0a41f0000705c69ab8e0f9a723fc73e39ed62b07",
+ "branch": "HEAD",
+ "buildUser": "root@08c890a84441",
+ "buildDate": "20251030-07:26:10",
+ "goVersion": "go1.25.3",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusFlagsResponseExamples returns examples for /status/flags response.
+func statusFlagsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("flags", &base.Example{
+ Summary: "Command-line flags",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]string{
+ "agent": "false",
+ "alertmanager.notification-queue-capacity": "10000",
+ "config.file": "/etc/prometheus/prometheus.yml",
+ "enable-feature": "exemplar-storage,native-histograms",
+ "query.max-concurrency": "20",
+ "query.timeout": "2m",
+ "storage.tsdb.path": "/prometheus",
+ "storage.tsdb.retention.time": "15d",
+ "web.console.libraries": "/usr/share/prometheus/console_libraries",
+ "web.console.templates": "/usr/share/prometheus/consoles",
+ "web.enable-admin-api": "true",
+ "web.enable-lifecycle": "true",
+ "web.listen-address": "0.0.0.0:9090",
+ "web.page-title": "Prometheus Time Series Collection and Processing Server",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusTSDBResponseExamples returns examples for /status/tsdb response.
+func statusTSDBResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("tsdbStats", &base.Example{
+ Summary: "TSDB statistics",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "headStats": map[string]any{
+ "numSeries": 9925,
+ "numLabelPairs": 2512,
+ "chunkCount": 37525,
+ "minTime": int64(1767362400712),
+ "maxTime": int64(1767436620000),
+ },
+ "seriesCountByMetricName": []map[string]any{
+ {
+ "name": "up",
+ "value": 100,
+ },
+ {
+ "name": "http_requests_total",
+ "value": 500,
+ },
+ },
+ "labelValueCountByLabelName": []map[string]any{
+ {
+ "name": "__name__",
+ "value": 5,
+ },
+ {
+ "name": "job",
+ "value": 3,
+ },
+ },
+ "memoryInBytesByLabelName": []map[string]any{
+ {
+ "name": "__name__",
+ "value": 1024,
+ },
+ {
+ "name": "job",
+ "value": 512,
+ },
+ },
+ "seriesCountByLabelValuePair": []map[string]any{
+ {
+ "name": "job=prometheus",
+ "value": 100,
+ },
+ {
+ "name": "instance=localhost:9090",
+ "value": 100,
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusTSDBBlocksResponseExamples returns examples for /status/tsdb/blocks response.
+func statusTSDBBlocksResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("tsdbBlocks", &base.Example{
+ Summary: "TSDB block information",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "blocks": []map[string]any{
+ {
+ "ulid": "01KC4D6GXQA4CRHYKV78NEBVAE",
+ "minTime": int64(1764568801099),
+ "maxTime": int64(1764763200000),
+ "stats": map[string]any{
+ "numSamples": 129505582,
+ "numSeries": 10661,
+ "numChunks": 1073962,
+ },
+ "compaction": map[string]any{
+ "level": 4,
+ "sources": []string{
+ "01KBCJ7TR8A4QAJ3AA1J651P5S",
+ "01KBCS3J0E34567YPB8Y5W0E24",
+ "01KBCZZ9KRTYGG3E7HVQFGC3S3",
+ },
+ },
+ "version": 1,
+ },
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// statusWALReplayResponseExamples returns examples for /status/walreplay response.
+func statusWALReplayResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("walReplay", &base.Example{
+ Summary: "WAL replay status",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "min": 3209,
+ "max": 3214,
+ "current": 3214,
+ },
+ }),
+ })
+
+ return examples
+}
+
+// deleteSeriesResponseExamples returns examples for /admin/tsdb/delete_series response.
+func deleteSeriesResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("deletionSuccess", &base.Example{
+ Summary: "Successful series deletion",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ }),
+ })
+
+ return examples
+}
+
+// cleanTombstonesResponseExamples returns examples for /admin/tsdb/clean_tombstones response.
+func cleanTombstonesResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("tombstonesCleaned", &base.Example{
+ Summary: "Tombstones cleaned successfully",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ }),
+ })
+
+ return examples
+}
+
+// seriesDeleteResponseExamples returns examples for DELETE /series response.
+func seriesDeleteResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("seriesDeleted", &base.Example{
+ Summary: "Series marked for deletion",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ }),
+ })
+
+ return examples
+}
+
+// snapshotResponseExamples returns examples for /admin/tsdb/snapshot response.
+func snapshotResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("snapshotCreated", &base.Example{
+ Summary: "Snapshot created successfully",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": map[string]any{
+ "name": "20260102T133700Z-a1b2c3d4e5f67890",
+ },
+ }),
+ })
+
+ return examples
+}
+
+// notificationsResponseExamples returns examples for /notifications response.
+func notificationsResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("notifications", &base.Example{
+ Summary: "Server notifications",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []map[string]any{
+ {
+ "text": "Configuration reload has failed.",
+ "date": "2026-01-02T16:14:50.046Z",
+ "active": true,
+ },
+ },
+ }),
+ })
+
+ return examples
+}
+
+// notificationLiveExamples provides example SSE messages for the live notifications endpoint.
+func notificationLiveExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("activeNotification", &base.Example{
+ Summary: "Active notification SSE message",
+ Description: "An SSE message containing an active server notification.",
+ Value: createYAMLNode(map[string]any{
+ "data": "{\"text\":\"Configuration reload has failed.\",\"date\":\"2026-01-02T16:14:50.046Z\",\"active\":true}",
+ }),
+ })
+
+ return examples
+}
+
+// featuresResponseExamples returns examples for /features response.
+func featuresResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("enabledFeatures", &base.Example{
+ Summary: "Enabled feature flags",
+ Value: createYAMLNode(map[string]any{
+ "status": "success",
+ "data": []string{"exemplar-storage", "remote-write-receiver"},
+ }),
+ })
+
+ return examples
+}
+
+// errorResponseExamples returns examples for error responses.
+func errorResponseExamples() *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+
+ examples.Set("tsdbNotReady", &base.Example{
+ Summary: "TSDB not ready",
+ Value: createYAMLNode(map[string]any{
+ "status": "error",
+ "errorType": "internal",
+ "error": "TSDB not ready",
+ }),
+ })
+
+ return examples
+}
diff --git a/web/api/v1/openapi_golden_test.go b/web/api/v1/openapi_golden_test.go
new file mode 100644
index 0000000000..468d56e46d
--- /dev/null
+++ b/web/api/v1/openapi_golden_test.go
@@ -0,0 +1,176 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "flag"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "go.yaml.in/yaml/v3"
+
+ "github.com/prometheus/prometheus/web/api/testhelpers"
+)
+
+var updateOpenAPISpec = flag.Bool("update-openapi-spec", false, "update openapi golden files with the current specs")
+
+// TestOpenAPIGolden_3_1 verifies that the OpenAPI 3.1 spec matches the golden file.
+func TestOpenAPIGolden_3_1(t *testing.T) {
+ // Create an API instance to serve the OpenAPI spec.
+ api := newTestAPI(t, testhelpers.APIConfig{})
+
+ // Fetch the OpenAPI 3.1 spec from the API (default, no query param).
+ resp := testhelpers.GET(t, api, "/api/v1/openapi.yaml")
+ require.Equal(t, 200, resp.StatusCode, "expected HTTP 200 for OpenAPI spec endpoint")
+ require.NotEmpty(t, resp.Body, "OpenAPI spec should not be empty")
+
+ goldenPath := filepath.Join("testdata", "openapi_3.1_golden.yaml")
+
+ if *updateOpenAPISpec {
+ // Update mode: write the current spec to the golden file.
+ t.Logf("Updating golden file: %s", goldenPath)
+
+ // Ensure the testdata directory exists.
+ err := os.MkdirAll(filepath.Dir(goldenPath), 0o755)
+ require.NoError(t, err, "failed to create testdata directory")
+
+ // Write the golden file.
+ err = os.WriteFile(goldenPath, []byte(resp.Body), 0o644)
+ require.NoError(t, err, "failed to write golden file")
+
+ t.Logf("Golden file updated successfully")
+ return
+ }
+
+ // Comparison mode: verify the spec matches the golden file.
+ goldenData, err := os.ReadFile(goldenPath)
+ require.NoError(t, err, "failed to read golden file (run with -update-openapi-spec to generate it)")
+
+ require.Equal(t, string(goldenData), resp.Body,
+ "OpenAPI 3.1 spec does not match golden file. Run 'go test -update-openapi-spec' to update.")
+
+ // Verify version field is 3.1.0.
+ var spec map[string]any
+ err = yaml.Unmarshal([]byte(resp.Body), &spec)
+ require.NoError(t, err)
+ require.Equal(t, "3.1.0", spec["openapi"], "OpenAPI version should be 3.1.0")
+
+ // Verify /notifications/live is NOT present in 3.1 spec.
+ paths := spec["paths"].(map[string]any)
+ _, found := paths["/notifications/live"]
+ require.False(t, found, "/notifications/live should not be in OpenAPI 3.1 spec")
+}
+
+// TestOpenAPIGolden_3_2 verifies that the OpenAPI 3.2 spec matches the golden file.
+func TestOpenAPIGolden_3_2(t *testing.T) {
+ // Create an API instance to serve the OpenAPI spec.
+ api := newTestAPI(t, testhelpers.APIConfig{})
+
+ // Fetch the OpenAPI 3.2 spec from the API with query parameter.
+ resp := testhelpers.GET(t, api, "/api/v1/openapi.yaml?openapi_version=3.2")
+ require.Equal(t, 200, resp.StatusCode, "expected HTTP 200 for OpenAPI spec endpoint")
+ require.NotEmpty(t, resp.Body, "OpenAPI spec should not be empty")
+
+ goldenPath := filepath.Join("testdata", "openapi_3.2_golden.yaml")
+
+ if *updateOpenAPISpec {
+ // Update mode: write the current spec to the golden file.
+ t.Logf("Updating golden file: %s", goldenPath)
+
+ // Ensure the testdata directory exists.
+ err := os.MkdirAll(filepath.Dir(goldenPath), 0o755)
+ require.NoError(t, err, "failed to create testdata directory")
+
+ // Write the golden file.
+ err = os.WriteFile(goldenPath, []byte(resp.Body), 0o644)
+ require.NoError(t, err, "failed to write golden file")
+
+ t.Logf("Golden file updated successfully")
+ return
+ }
+
+ // Comparison mode: verify the spec matches the golden file.
+ goldenData, err := os.ReadFile(goldenPath)
+ require.NoError(t, err, "failed to read golden file (run with -update-openapi-spec to generate it)")
+
+ require.Equal(t, string(goldenData), resp.Body,
+ "OpenAPI 3.2 spec does not match golden file. Run 'go test -update-openapi-spec' to update.")
+
+ // Verify version field is 3.2.0.
+ var spec map[string]any
+ err = yaml.Unmarshal([]byte(resp.Body), &spec)
+ require.NoError(t, err)
+ require.Equal(t, "3.2.0", spec["openapi"], "OpenAPI version should be 3.2.0")
+
+ // Verify /notifications/live IS present in 3.2 spec.
+ paths := spec["paths"].(map[string]any)
+ _, found := paths["/notifications/live"]
+ require.True(t, found, "/notifications/live should be in OpenAPI 3.2 spec")
+}
+
+// TestOpenAPIVersionSelection verifies version query parameter handling.
+func TestOpenAPIVersionSelection(t *testing.T) {
+ api := newTestAPI(t, testhelpers.APIConfig{})
+
+ tests := []struct {
+ name string
+ url string
+ expectedVersion string
+ expectLivePath bool
+ }{
+ {
+ name: "default to 3.1.0",
+ url: "/api/v1/openapi.yaml",
+ expectedVersion: "3.1.0",
+ expectLivePath: false,
+ },
+ {
+ name: "explicit 3.1",
+ url: "/api/v1/openapi.yaml?openapi_version=3.1",
+ expectedVersion: "3.1.0",
+ expectLivePath: false,
+ },
+ {
+ name: "explicit 3.2",
+ url: "/api/v1/openapi.yaml?openapi_version=3.2",
+ expectedVersion: "3.2.0",
+ expectLivePath: true,
+ },
+ {
+ name: "invalid version defaults to 3.1.0",
+ url: "/api/v1/openapi.yaml?openapi_version=4.0",
+ expectedVersion: "3.1.0",
+ expectLivePath: false,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ resp := testhelpers.GET(t, api, tc.url)
+ require.Equal(t, 200, resp.StatusCode)
+
+ var spec map[string]any
+ err := yaml.Unmarshal([]byte(resp.Body), &spec)
+ require.NoError(t, err)
+
+ require.Equal(t, tc.expectedVersion, spec["openapi"])
+
+ paths := spec["paths"].(map[string]any)
+ _, found := paths["/notifications/live"]
+ require.Equal(t, tc.expectLivePath, found)
+ })
+ }
+}
diff --git a/web/api/v1/openapi_helpers.go b/web/api/v1/openapi_helpers.go
new file mode 100644
index 0000000000..76f6001693
--- /dev/null
+++ b/web/api/v1/openapi_helpers.go
@@ -0,0 +1,343 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "time"
+
+ jsoniter "github.com/json-iterator/go"
+ "github.com/pb33f/libopenapi/datamodel/high/base"
+ v3 "github.com/pb33f/libopenapi/datamodel/high/v3"
+ "github.com/pb33f/libopenapi/orderedmap"
+ yaml "go.yaml.in/yaml/v4"
+
+ "github.com/prometheus/prometheus/promql"
+)
+
+// Helper functions for building common structures.
+
+// exampleTime is a reference time used for timestamp examples.
+var exampleTime = time.Date(2026, 1, 2, 13, 37, 0, 0, time.UTC)
+
+func boolPtr(b bool) *bool {
+ return &b
+}
+
+func int64Ptr(i int64) *int64 {
+ return &i
+}
+
+type example struct {
+ name string
+ value any
+}
+
+// exampleMap creates an Examples map from the provided examples.
+func exampleMap(exs []example) *orderedmap.Map[string, *base.Example] {
+ examples := orderedmap.New[string, *base.Example]()
+ for _, ex := range exs {
+ examples.Set(ex.name, &base.Example{
+ Value: createYAMLNode(ex.value),
+ })
+ }
+ return examples
+}
+
+func schemaRef(ref string) *base.SchemaProxy {
+ return base.CreateSchemaProxyRef(ref)
+}
+
+func schemaFromType(t string) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{Type: []string{t}})
+}
+
+func stringSchema() *base.SchemaProxy {
+ return schemaFromType("string")
+}
+
+func integerSchema() *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"integer"},
+ Format: "int64",
+ })
+}
+
+func stringSchemaWithDescription(description string) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Description: description,
+ })
+}
+
+func stringSchemaWithDescriptionAndExample(description string, example any) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Description: description,
+ Example: createYAMLNode(example),
+ })
+}
+
+func integerSchemaWithDescription(description string) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"integer"},
+ Format: "int64",
+ Description: description,
+ })
+}
+
+func integerSchemaWithDescriptionAndExample(description string, example any) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"integer"},
+ Format: "int64",
+ Description: description,
+ Example: createYAMLNode(example),
+ })
+}
+
+func stringArraySchemaWithDescription(description string) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ Description: description,
+ })
+}
+
+func stringArraySchemaWithDescriptionAndExample(description string, example any) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ Description: description,
+ Example: createYAMLNode(example),
+ })
+}
+
+func statusSchema() *base.SchemaProxy {
+ successNode := &yaml.Node{Kind: yaml.ScalarNode, Value: "success"}
+ errorNode := &yaml.Node{Kind: yaml.ScalarNode, Value: "error"}
+ exampleNode := &yaml.Node{Kind: yaml.ScalarNode, Value: "success"}
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Enum: []*yaml.Node{successNode, errorNode},
+ Description: "Response status.",
+ Example: exampleNode,
+ })
+}
+
+func warningsSchema() *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ Description: "Only set if there were warnings while executing the request. There will still be data in the data field.",
+ })
+}
+
+func infosSchema() *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ Description: "Only set if there were info-level annotations while executing the request.",
+ })
+}
+
+func timestampSchema() *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Format: "date-time",
+ Description: "RFC3339 timestamp.",
+ }),
+ base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Format: "unixtime",
+ Description: "Unix timestamp in seconds.",
+ }),
+ },
+ Description: "Timestamp in RFC3339 format or Unix timestamp in seconds.",
+ })
+}
+
+func stringSchemaWithConstValue(value string) *base.SchemaProxy {
+ node := &yaml.Node{Kind: yaml.ScalarNode, Value: value}
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Enum: []*yaml.Node{node},
+ })
+}
+
+func dateTimeSchemaWithDescription(description string) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Format: "date-time",
+ Description: description,
+ })
+}
+
+func numberSchemaWithDescription(description string) *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Format: "double",
+ Description: description,
+ })
+}
+
+func errorResponse() *v3.Response {
+ content := orderedmap.New[string, *v3.MediaType]()
+ content.Set("application/json", &v3.MediaType{
+ Schema: schemaRef("#/components/schemas/Error"),
+ })
+ return &v3.Response{
+ Description: "Error",
+ Content: content,
+ }
+}
+
+func noContentResponse() *v3.Response {
+ return &v3.Response{Description: "No Content"}
+}
+
+func responsesNoContent() *v3.Responses {
+ codes := orderedmap.New[string, *v3.Response]()
+ codes.Set("204", noContentResponse())
+ codes.Set("default", errorResponse())
+ return &v3.Responses{Codes: codes}
+}
+
+func pathParam(name, description string, schema *base.SchemaProxy) *v3.Parameter {
+ return &v3.Parameter{
+ Name: name,
+ In: "path",
+ Description: description,
+ Required: boolPtr(true),
+ Schema: schema,
+ }
+}
+
+// createYAMLNode converts Go data to yaml.Node for use in examples.
+func createYAMLNode(data any) *yaml.Node {
+ node := &yaml.Node{}
+ bytes, _ := yaml.Marshal(data)
+ _ = yaml.Unmarshal(bytes, node)
+ return node
+}
+
+// formRequestBodyWithExamples creates a form-encoded request body with examples.
+func formRequestBodyWithExamples(schemaRef string, examples *orderedmap.Map[string, *base.Example], description string) *v3.RequestBody {
+ content := orderedmap.New[string, *v3.MediaType]()
+ mediaType := &v3.MediaType{
+ Schema: base.CreateSchemaProxyRef("#/components/schemas/" + schemaRef),
+ }
+ if examples != nil {
+ mediaType.Examples = examples
+ }
+ content.Set("application/x-www-form-urlencoded", mediaType)
+ return &v3.RequestBody{
+ Required: boolPtr(true),
+ Description: description,
+ Content: content,
+ }
+}
+
+// jsonResponseWithExamples creates a JSON response with examples.
+func jsonResponseWithExamples(schemaRef string, examples *orderedmap.Map[string, *base.Example], description string) *v3.Response {
+ content := orderedmap.New[string, *v3.MediaType]()
+ mediaType := &v3.MediaType{
+ Schema: base.CreateSchemaProxyRef("#/components/schemas/" + schemaRef),
+ }
+ if examples != nil {
+ mediaType.Examples = examples
+ }
+ content.Set("application/json", mediaType)
+ return &v3.Response{
+ Description: description,
+ Content: content,
+ }
+}
+
+// responsesWithErrorExamples creates responses with both success and error examples.
+func responsesWithErrorExamples(okSchemaRef string, successExamples, errorExamples *orderedmap.Map[string, *base.Example], successDescription, errorDescription string) *v3.Responses {
+ codes := orderedmap.New[string, *v3.Response]()
+ codes.Set("200", jsonResponseWithExamples(okSchemaRef, successExamples, successDescription))
+ codes.Set("default", jsonResponseWithExamples("Error", errorExamples, errorDescription))
+ return &v3.Responses{Codes: codes}
+}
+
+// timestampExamples returns examples for timestamp parameters (RFC3339 and epoch).
+func timestampExamples(t time.Time) []example {
+ return []example{
+ {"RFC3339", t.Format(time.RFC3339Nano)},
+ {"epoch", t.Unix()},
+ }
+}
+
+// queryParamWithExample creates a query parameter with examples.
+func queryParamWithExample(name, description string, required bool, schema *base.SchemaProxy, examples []example) *v3.Parameter {
+ param := &v3.Parameter{
+ Name: name,
+ In: "query",
+ Description: description,
+ Required: &required,
+ Explode: boolPtr(false),
+ Schema: schema,
+ }
+ if len(examples) > 0 {
+ param.Examples = exampleMap(examples)
+ }
+ return param
+}
+
+// marshalToYAMLNode marshals a value using jsoniter (production marshaling) and converts to yaml.Node.
+// The result is an inline JSON representation that preserves integer types for timestamps.
+func marshalToYAMLNode(v any) *yaml.Node {
+ jsonAPI := jsoniter.ConfigCompatibleWithStandardLibrary
+ jsonBytes, err := jsonAPI.Marshal(v)
+ if err != nil {
+ panic(err)
+ }
+ node := &yaml.Node{}
+ if err := yaml.Unmarshal(jsonBytes, node); err != nil {
+ panic(err)
+ }
+ return node
+}
+
+// vectorExample creates an example for a vector query response using production marshaling.
+func vectorExample(v promql.Vector) *yaml.Node {
+ type response struct {
+ Status string `json:"status"`
+ Data struct {
+ ResultType string `json:"resultType"`
+ Result promql.Vector `json:"result"`
+ } `json:"data"`
+ }
+ resp := response{Status: "success"}
+ resp.Data.ResultType = "vector"
+ resp.Data.Result = v
+ return marshalToYAMLNode(resp)
+}
+
+// matrixExample creates an example for a matrix query response using production marshaling.
+func matrixExample(m promql.Matrix) *yaml.Node {
+ type response struct {
+ Status string `json:"status"`
+ Data struct {
+ ResultType string `json:"resultType"`
+ Result promql.Matrix `json:"result"`
+ } `json:"data"`
+ }
+ resp := response{Status: "success"}
+ resp.Data.ResultType = "matrix"
+ resp.Data.Result = m
+ return marshalToYAMLNode(resp)
+}
diff --git a/web/api/v1/openapi_paths.go b/web/api/v1/openapi_paths.go
new file mode 100644
index 0000000000..2f5ab592f7
--- /dev/null
+++ b/web/api/v1/openapi_paths.go
@@ -0,0 +1,626 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines all API path specifications including parameters, request bodies,
+// and response schemas. Each path definition corresponds to an endpoint registered in api.go.
+package v1
+
+import (
+ "time"
+
+ "github.com/pb33f/libopenapi/datamodel/high/base"
+ v3 "github.com/pb33f/libopenapi/datamodel/high/v3"
+ "github.com/pb33f/libopenapi/orderedmap"
+)
+
+// Path definition methods for API endpoints.
+
+func (*OpenAPIBuilder) queryPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("limit", "The maximum number of metrics to return.", false, integerSchema(), []example{{"example", 100}}),
+ queryParamWithExample("time", "The evaluation timestamp (optional, defaults to current time).", false, timestampSchema(), timestampExamples(exampleTime)),
+ queryParamWithExample("query", "The PromQL query to execute.", true, stringSchema(), []example{{"example", "up"}}),
+ queryParamWithExample("timeout", "Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag.", false, stringSchema(), []example{{"example", "30s"}}),
+ queryParamWithExample("lookback_delta", "Override the lookback period for this query. Optional.", false, stringSchema(), []example{{"example", "5m"}}),
+ queryParamWithExample("stats", "When provided, include query statistics in the response. The special value 'all' enables more comprehensive statistics.", false, stringSchema(), []example{{"example", "all"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "query",
+ Summary: "Evaluate an instant query",
+ Tags: []string{"query"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("QueryOutputBody", queryResponseExamples(), errorResponseExamples(), "Query executed successfully.", "Error executing query."),
+ },
+ Post: &v3.Operation{
+ OperationId: "query-post",
+ Summary: "Evaluate an instant query",
+ Tags: []string{"query"},
+ RequestBody: formRequestBodyWithExamples("QueryPostInputBody", queryPostExamples(), "Submit an instant query. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("QueryOutputBody", queryResponseExamples(), errorResponseExamples(), "Instant query executed successfully.", "Error executing instant query."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) queryRangePath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("limit", "The maximum number of metrics to return.", false, integerSchema(), []example{{"example", 100}}),
+ queryParamWithExample("start", "The start time of the query.", true, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))),
+ queryParamWithExample("end", "The end time of the query.", true, timestampSchema(), timestampExamples(exampleTime)),
+ queryParamWithExample("step", "The step size of the query.", true, stringSchema(), []example{{"example", "15s"}}),
+ queryParamWithExample("query", "The query to execute.", true, stringSchema(), []example{{"example", "rate(prometheus_http_requests_total{handler=\"/api/v1/query\"}[5m])"}}),
+ queryParamWithExample("timeout", "Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag.", false, stringSchema(), []example{{"example", "30s"}}),
+ queryParamWithExample("lookback_delta", "Override the lookback period for this query. Optional.", false, stringSchema(), []example{{"example", "5m"}}),
+ queryParamWithExample("stats", "When provided, include query statistics in the response. The special value 'all' enables more comprehensive statistics.", false, stringSchema(), []example{{"example", "all"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "query-range",
+ Summary: "Evaluate a range query",
+ Tags: []string{"query"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("QueryRangeOutputBody", queryRangeResponseExamples(), errorResponseExamples(), "Range query executed successfully.", "Error executing range query."),
+ },
+ Post: &v3.Operation{
+ OperationId: "query-range-post",
+ Summary: "Evaluate a range query",
+ Tags: []string{"query"},
+ RequestBody: formRequestBodyWithExamples("QueryRangePostInputBody", queryRangePostExamples(), "Submit a range query. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("QueryRangeOutputBody", queryRangeResponseExamples(), errorResponseExamples(), "Range query executed successfully.", "Error executing range query."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) queryExemplarsPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("start", "Start timestamp for exemplars query.", false, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))),
+ queryParamWithExample("end", "End timestamp for exemplars query.", false, timestampSchema(), timestampExamples(exampleTime)),
+ queryParamWithExample("query", "PromQL query to extract exemplars for.", true, stringSchema(), []example{{"example", "prometheus_http_requests_total"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "query-exemplars",
+ Summary: "Query exemplars",
+ Tags: []string{"query"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("QueryExemplarsOutputBody", queryExemplarsResponseExamples(), errorResponseExamples(), "Exemplars retrieved successfully.", "Error retrieving exemplars."),
+ },
+ Post: &v3.Operation{
+ OperationId: "query-exemplars-post",
+ Summary: "Query exemplars",
+ Tags: []string{"query"},
+ RequestBody: formRequestBodyWithExamples("QueryExemplarsPostInputBody", queryExemplarsPostExamples(), "Submit an exemplars query. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("QueryExemplarsOutputBody", queryExemplarsResponseExamples(), errorResponseExamples(), "Exemplars query completed successfully.", "Error processing exemplars query."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) formatQueryPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("query", "PromQL expression to format.", true, stringSchema(), []example{{"example", "sum(rate(http_requests_total[5m])) by (job)"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "format-query",
+ Summary: "Format a PromQL query",
+ Tags: []string{"query"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("FormatQueryOutputBody", formatQueryResponseExamples(), errorResponseExamples(), "Query formatted successfully.", "Error formatting query."),
+ },
+ Post: &v3.Operation{
+ OperationId: "format-query-post",
+ Summary: "Format a PromQL query",
+ Tags: []string{"query"},
+ RequestBody: formRequestBodyWithExamples("FormatQueryPostInputBody", formatQueryPostExamples(), "Submit a PromQL query to format. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("FormatQueryOutputBody", formatQueryResponseExamples(), errorResponseExamples(), "Query formatting completed successfully.", "Error formatting query."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) parseQueryPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("query", "PromQL expression to parse.", true, stringSchema(), []example{{"example", "up{job=\"prometheus\"}"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "parse-query",
+ Summary: "Parse a PromQL query",
+ Tags: []string{"query"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("ParseQueryOutputBody", parseQueryResponseExamples(), errorResponseExamples(), "Query parsed successfully.", "Error parsing query."),
+ },
+ Post: &v3.Operation{
+ OperationId: "parse-query-post",
+ Summary: "Parse a PromQL query",
+ Tags: []string{"query"},
+ RequestBody: formRequestBodyWithExamples("ParseQueryPostInputBody", parseQueryPostExamples(), "Submit a PromQL query to parse. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("ParseQueryOutputBody", parseQueryResponseExamples(), errorResponseExamples(), "Query parsed successfully via POST.", "Error parsing query via POST."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) labelsPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("start", "Start timestamp for label names query.", false, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))),
+ queryParamWithExample("end", "End timestamp for label names query.", false, timestampSchema(), timestampExamples(exampleTime)),
+ queryParamWithExample("match[]", "Series selector argument.", false, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"{job=\"prometheus\"}"}}}),
+ queryParamWithExample("limit", "Maximum number of label names to return.", false, integerSchema(), []example{{"example", 100}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "labels",
+ Summary: "Get label names",
+ Tags: []string{"labels"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("LabelsOutputBody", labelsResponseExamples(), errorResponseExamples(), "Label names retrieved successfully.", "Error retrieving label names."),
+ },
+ Post: &v3.Operation{
+ OperationId: "labels-post",
+ Summary: "Get label names",
+ Tags: []string{"labels"},
+ RequestBody: formRequestBodyWithExamples("LabelsPostInputBody", labelsPostExamples(), "Submit a label names query. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("LabelsOutputBody", labelsResponseExamples(), errorResponseExamples(), "Label names retrieved successfully via POST.", "Error retrieving label names via POST."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) labelValuesPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ pathParam("name", "Label name.", stringSchema()),
+ queryParamWithExample("start", "Start timestamp for label values query.", false, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))),
+ queryParamWithExample("end", "End timestamp for label values query.", false, timestampSchema(), timestampExamples(exampleTime)),
+ queryParamWithExample("match[]", "Series selector argument.", false, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"{job=\"prometheus\"}"}}}),
+ queryParamWithExample("limit", "Maximum number of label values to return.", false, integerSchema(), []example{{"example", 1000}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "label-values",
+ Summary: "Get label values",
+ Tags: []string{"labels"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("LabelValuesOutputBody", labelValuesResponseExamples(), errorResponseExamples(), "Label values retrieved successfully.", "Error retrieving label values."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) seriesPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("start", "Start timestamp for series query.", false, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))),
+ queryParamWithExample("end", "End timestamp for series query.", false, timestampSchema(), timestampExamples(exampleTime)),
+ queryParamWithExample("match[]", "Series selector argument.", true, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"{job=\"prometheus\"}"}}}),
+ queryParamWithExample("limit", "Maximum number of series to return.", false, integerSchema(), []example{{"example", 100}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "series",
+ Summary: "Find series by label matchers",
+ Tags: []string{"series"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("SeriesOutputBody", seriesResponseExamples(), errorResponseExamples(), "Series returned matching the provided label matchers.", "Error retrieving series."),
+ },
+ Post: &v3.Operation{
+ OperationId: "series-post",
+ Summary: "Find series by label matchers",
+ Tags: []string{"series"},
+ RequestBody: formRequestBodyWithExamples("SeriesPostInputBody", seriesPostExamples(), "Submit a series query. This endpoint accepts the same parameters as the GET version."),
+ Responses: responsesWithErrorExamples("SeriesOutputBody", seriesResponseExamples(), errorResponseExamples(), "Series returned matching the provided label matchers via POST.", "Error retrieving series via POST."),
+ },
+ Delete: &v3.Operation{
+ OperationId: "delete-series",
+ Summary: "Delete series",
+ Description: "Delete series matching selectors. Note: This is deprecated, use POST /admin/tsdb/delete_series instead.",
+ Tags: []string{"series"},
+ Responses: responsesWithErrorExamples("SeriesDeleteOutputBody", seriesDeleteResponseExamples(), errorResponseExamples(), "Series marked for deletion.", "Error deleting series."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) metadataPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("limit", "The maximum number of metrics to return.", false, integerSchema(), []example{{"example", 100}}),
+ queryParamWithExample("limit_per_metric", "The maximum number of metadata entries per metric.", false, integerSchema(), []example{{"example", 10}}),
+ queryParamWithExample("metric", "A metric name to filter metadata for.", false, stringSchema(), []example{{"example", "http_requests_total"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-metadata",
+ Summary: "Get metadata",
+ Tags: []string{"metadata"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("MetadataOutputBody", metadataResponseExamples(), errorResponseExamples(), "Metric metadata retrieved successfully.", "Error retrieving metadata."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) scrapePoolsPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-scrape-pools",
+ Summary: "Get scrape pools",
+ Tags: []string{"targets"},
+ Responses: responsesWithErrorExamples("ScrapePoolsOutputBody", scrapePoolsResponseExamples(), errorResponseExamples(), "Scrape pools retrieved successfully.", "Error retrieving scrape pools."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) targetsPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("scrapePool", "Filter targets by scrape pool name.", false, stringSchema(), []example{{"example", "prometheus"}}),
+ queryParamWithExample("state", "Filter by state: active, dropped, or any.", false, stringSchema(), []example{{"example", "active"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-targets",
+ Summary: "Get targets",
+ Tags: []string{"targets"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("TargetsOutputBody", targetsResponseExamples(), errorResponseExamples(), "Target discovery information retrieved successfully.", "Error retrieving targets."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) targetsMetadataPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("match_target", "Label selector to filter targets.", false, stringSchema(), []example{{"example", "{job=\"prometheus\"}"}}),
+ queryParamWithExample("metric", "Metric name to retrieve metadata for.", false, stringSchema(), []example{{"example", "http_requests_total"}}),
+ queryParamWithExample("limit", "Maximum number of targets to match.", false, integerSchema(), []example{{"example", 10}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-targets-metadata",
+ Summary: "Get targets metadata",
+ Tags: []string{"targets"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("TargetMetadataOutputBody", targetsMetadataResponseExamples(), errorResponseExamples(), "Target metadata retrieved successfully.", "Error retrieving target metadata."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) targetsRelabelStepsPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("scrapePool", "Name of the scrape pool.", true, stringSchema(), []example{{"example", "prometheus"}}),
+ queryParamWithExample("labels", "JSON-encoded labels to apply relabel rules to.", true, stringSchema(), []example{{"example", "{\"__address__\":\"localhost:9090\",\"job\":\"prometheus\"}"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-targets-relabel-steps",
+ Summary: "Get targets relabel steps",
+ Tags: []string{"targets"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("TargetRelabelStepsOutputBody", targetsRelabelStepsResponseExamples(), errorResponseExamples(), "Relabel steps retrieved successfully.", "Error retrieving relabel steps."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) rulesPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("type", "Filter by rule type: alert or record.", false, stringSchema(), []example{{"example", "alert"}}),
+ queryParamWithExample("rule_name[]", "Filter by rule name.", false, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"HighErrorRate"}}}),
+ queryParamWithExample("rule_group[]", "Filter by rule group name.", false, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"example_alerts"}}}),
+ queryParamWithExample("file[]", "Filter by file path.", false, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"/etc/prometheus/rules.yml"}}}),
+ queryParamWithExample("match[]", "Label matchers to filter rules.", false, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"{severity=\"critical\"}"}}}),
+ queryParamWithExample("exclude_alerts", "Exclude active alerts from response.", false, stringSchema(), []example{{"example", "false"}}),
+ queryParamWithExample("group_limit", "Maximum number of rule groups to return.", false, integerSchema(), []example{{"example", 100}}),
+ queryParamWithExample("group_next_token", "Pagination token for next page.", false, stringSchema(), []example{{"example", "abc123"}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "rules",
+ Summary: "Get alerting and recording rules",
+ Tags: []string{"rules"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("RulesOutputBody", rulesResponseExamples(), errorResponseExamples(), "Rules retrieved successfully.", "Error retrieving rules."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) alertsPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "alerts",
+ Summary: "Get active alerts",
+ Tags: []string{"alerts"},
+ Responses: responsesWithErrorExamples("AlertsOutputBody", alertsResponseExamples(), errorResponseExamples(), "Active alerts retrieved successfully.", "Error retrieving alerts."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) alertmanagersPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "alertmanagers",
+ Summary: "Get Alertmanager discovery",
+ Tags: []string{"alerts"},
+ Responses: responsesWithErrorExamples("AlertmanagersOutputBody", alertmanagersResponseExamples(), errorResponseExamples(), "Alertmanager targets retrieved successfully.", "Error retrieving Alertmanager targets."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusConfigPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-status-config",
+ Summary: "Get status config",
+ Tags: []string{"status"},
+ Responses: responsesWithErrorExamples("StatusConfigOutputBody", statusConfigResponseExamples(), errorResponseExamples(), "Configuration retrieved successfully.", "Error retrieving configuration."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusRuntimeInfoPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-status-runtimeinfo",
+ Summary: "Get status runtimeinfo",
+ Tags: []string{"status"},
+ Responses: responsesWithErrorExamples("StatusRuntimeInfoOutputBody", statusRuntimeInfoResponseExamples(), errorResponseExamples(), "Runtime information retrieved successfully.", "Error retrieving runtime information."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusBuildInfoPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-status-buildinfo",
+ Summary: "Get status buildinfo",
+ Tags: []string{"status"},
+ Responses: responsesWithErrorExamples("StatusBuildInfoOutputBody", statusBuildInfoResponseExamples(), errorResponseExamples(), "Build information retrieved successfully.", "Error retrieving build information."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusFlagsPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-status-flags",
+ Summary: "Get status flags",
+ Tags: []string{"status"},
+ Responses: responsesWithErrorExamples("StatusFlagsOutputBody", statusFlagsResponseExamples(), errorResponseExamples(), "Command-line flags retrieved successfully.", "Error retrieving flags."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusTSDBPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("limit", "The maximum number of items to return per category.", false, integerSchema(), []example{{"example", 10}}),
+ }
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "status-tsdb",
+ Summary: "Get TSDB status",
+ Tags: []string{"status"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("StatusTSDBOutputBody", statusTSDBResponseExamples(), errorResponseExamples(), "TSDB status retrieved successfully.", "Error retrieving TSDB status."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusTSDBBlocksPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "status-tsdb-blocks",
+ Summary: "Get TSDB blocks information",
+ Tags: []string{"status"},
+ Responses: responsesWithErrorExamples("StatusTSDBBlocksOutputBody", statusTSDBBlocksResponseExamples(), errorResponseExamples(), "TSDB blocks information retrieved successfully.", "Error retrieving TSDB blocks."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) statusWALReplayPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-status-walreplay",
+ Summary: "Get status walreplay",
+ Tags: []string{"status"},
+ Responses: responsesWithErrorExamples("StatusWALReplayOutputBody", statusWALReplayResponseExamples(), errorResponseExamples(), "WAL replay status retrieved successfully.", "Error retrieving WAL replay status."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) adminDeleteSeriesPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("match[]", "Series selectors to identify series to delete.", true, base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }), []example{{"example", []string{"{__name__=~\"test.*\"}"}}}),
+ queryParamWithExample("start", "Start timestamp for deletion.", false, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))),
+ queryParamWithExample("end", "End timestamp for deletion.", false, timestampSchema(), timestampExamples(exampleTime)),
+ }
+ return &v3.PathItem{
+ Post: &v3.Operation{
+ OperationId: "deleteSeriesPost",
+ Summary: "Delete series matching selectors",
+ Description: "Deletes data for a selection of series in a time range.",
+ Tags: []string{"admin"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("DeleteSeriesOutputBody", deleteSeriesResponseExamples(), errorResponseExamples(), "Series deleted successfully.", "Error deleting series."),
+ },
+ Put: &v3.Operation{
+ OperationId: "deleteSeriesPut",
+ Summary: "Delete series matching selectors via PUT",
+ Description: "Deletes data for a selection of series in a time range using PUT method.",
+ Tags: []string{"admin"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("DeleteSeriesOutputBody", deleteSeriesResponseExamples(), errorResponseExamples(), "Series deleted successfully via PUT.", "Error deleting series via PUT."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) adminCleanTombstonesPath() *v3.PathItem {
+ return &v3.PathItem{
+ Post: &v3.Operation{
+ OperationId: "cleanTombstonesPost",
+ Summary: "Clean tombstones in the TSDB",
+ Description: "Removes deleted data from disk and cleans up existing tombstones.",
+ Tags: []string{"admin"},
+ Responses: responsesWithErrorExamples("CleanTombstonesOutputBody", cleanTombstonesResponseExamples(), errorResponseExamples(), "Tombstones cleaned successfully.", "Error cleaning tombstones."),
+ },
+ Put: &v3.Operation{
+ OperationId: "cleanTombstonesPut",
+ Summary: "Clean tombstones in the TSDB via PUT",
+ Description: "Removes deleted data from disk and cleans up existing tombstones using PUT method.",
+ Tags: []string{"admin"},
+ Responses: responsesWithErrorExamples("CleanTombstonesOutputBody", cleanTombstonesResponseExamples(), errorResponseExamples(), "Tombstones cleaned successfully via PUT.", "Error cleaning tombstones via PUT."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) adminSnapshotPath() *v3.PathItem {
+ params := []*v3.Parameter{
+ queryParamWithExample("skip_head", "If true, do not snapshot data in the head block.", false, stringSchema(), []example{{"example", "false"}}),
+ }
+ return &v3.PathItem{
+ Post: &v3.Operation{
+ OperationId: "snapshotPost",
+ Summary: "Create a snapshot of the TSDB",
+ Description: "Creates a snapshot of all current data.",
+ Tags: []string{"admin"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("SnapshotOutputBody", snapshotResponseExamples(), errorResponseExamples(), "Snapshot created successfully.", "Error creating snapshot."),
+ },
+ Put: &v3.Operation{
+ OperationId: "snapshotPut",
+ Summary: "Create a snapshot of the TSDB via PUT",
+ Description: "Creates a snapshot of all current data using PUT method.",
+ Tags: []string{"admin"},
+ Parameters: params,
+ Responses: responsesWithErrorExamples("SnapshotOutputBody", snapshotResponseExamples(), errorResponseExamples(), "Snapshot created successfully via PUT.", "Error creating snapshot via PUT."),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) remoteReadPath() *v3.PathItem {
+ return &v3.PathItem{
+ Post: &v3.Operation{
+ OperationId: "remoteRead",
+ Summary: "Remote read endpoint",
+ Description: "Prometheus remote read endpoint for federated queries. Accepts and returns Protocol Buffer encoded data.",
+ Tags: []string{"remote"},
+ Responses: responsesNoContent(),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) remoteWritePath() *v3.PathItem {
+ return &v3.PathItem{
+ Post: &v3.Operation{
+ OperationId: "remoteWrite",
+ Summary: "Remote write endpoint",
+ Description: "Prometheus remote write endpoint for sending metrics. Accepts Protocol Buffer encoded write requests.",
+ Tags: []string{"remote"},
+ Responses: responsesNoContent(),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) otlpWritePath() *v3.PathItem {
+ return &v3.PathItem{
+ Post: &v3.Operation{
+ OperationId: "otlpWrite",
+ Summary: "OTLP metrics write endpoint",
+ Description: "OpenTelemetry Protocol metrics ingestion endpoint. Accepts OTLP/HTTP metrics in Protocol Buffer format.",
+ Tags: []string{"otlp"},
+ Responses: responsesNoContent(),
+ },
+ }
+}
+
+func (*OpenAPIBuilder) notificationsPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-notifications",
+ Summary: "Get notifications",
+ Tags: []string{"notifications"},
+ Responses: responsesWithErrorExamples("NotificationsOutputBody", notificationsResponseExamples(), errorResponseExamples(), "Notifications retrieved successfully.", "Error retrieving notifications."),
+ },
+ }
+}
+
+// notificationsLivePath defines the /notifications/live endpoint.
+// This endpoint uses OpenAPI 3.2's itemSchema feature for documenting SSE streams.
+// It is excluded from the OpenAPI 3.1 specification.
+func (*OpenAPIBuilder) notificationsLivePath() *v3.PathItem {
+ codes := orderedmap.New[string, *v3.Response]()
+ content := orderedmap.New[string, *v3.MediaType]()
+
+ // Create a schema for the SSE message structure.
+ // Each SSE message has a 'data' field containing JSON.
+ sseItemProps := orderedmap.New[string, *base.SchemaProxy]()
+ sseItemProps.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"string"},
+ Description: "SSE data field containing JSON-encoded notification.",
+ ContentMediaType: "application/json",
+ ContentSchema: schemaRef("#/components/schemas/Notification"),
+ }))
+
+ content.Set("text/event-stream", &v3.MediaType{
+ // Use ItemSchema (OpenAPI 3.2) instead of Schema to describe each SSE message.
+ ItemSchema: base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Title: "Server Sent Event Message",
+ Description: "A single SSE message. The data field contains a JSON-encoded Notification object.",
+ Properties: sseItemProps,
+ Required: []string{"data"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ }),
+ Examples: notificationLiveExamples(),
+ })
+
+ codes.Set("200", &v3.Response{
+ Description: "Server-sent events stream established.",
+ Content: content,
+ })
+ codes.Set("default", errorResponse())
+
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "notifications-live",
+ Summary: "Stream live notifications via Server-Sent Events",
+ Description: "Subscribe to real-time server notifications using SSE. Each event contains a JSON-encoded Notification object in the data field.",
+ Tags: []string{"notifications"},
+ Responses: &v3.Responses{Codes: codes},
+ },
+ }
+}
+
+func (*OpenAPIBuilder) featuresPath() *v3.PathItem {
+ return &v3.PathItem{
+ Get: &v3.Operation{
+ OperationId: "get-features",
+ Summary: "Get features",
+ Tags: []string{"features"},
+ Responses: responsesWithErrorExamples("FeaturesOutputBody", featuresResponseExamples(), errorResponseExamples(), "Feature flags retrieved successfully.", "Error retrieving features."),
+ },
+ }
+}
diff --git a/web/api/v1/openapi_schemas.go b/web/api/v1/openapi_schemas.go
new file mode 100644
index 0000000000..de39b43e37
--- /dev/null
+++ b/web/api/v1/openapi_schemas.go
@@ -0,0 +1,1296 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines all OpenAPI schema definitions for API request and response types.
+// Schemas are organized by functional area: query, labels, series, metadata, targets,
+// rules, alerts, and status endpoints.
+package v1
+
+import (
+ "github.com/pb33f/libopenapi/datamodel/high/base"
+ v3 "github.com/pb33f/libopenapi/datamodel/high/v3"
+ "github.com/pb33f/libopenapi/orderedmap"
+)
+
+// Schema definitions and components builder.
+
+func (b *OpenAPIBuilder) buildComponents() *v3.Components {
+ schemas := orderedmap.New[string, *base.SchemaProxy]()
+
+ // Core schemas.
+ schemas.Set("Error", b.errorSchema())
+ schemas.Set("Labels", b.labelsSchema())
+
+ // Query schemas.
+ schemas.Set("QueryOutputBody", b.responseBodySchema("QueryData", "Response body for instant query."))
+ schemas.Set("QueryRangeOutputBody", b.responseBodySchema("QueryData", "Response body for range query."))
+ schemas.Set("QueryPostInputBody", b.queryPostInputBodySchema())
+ schemas.Set("QueryRangePostInputBody", b.queryRangePostInputBodySchema())
+ schemas.Set("QueryExemplarsOutputBody", b.simpleResponseBodySchema())
+ schemas.Set("QueryExemplarsPostInputBody", b.queryExemplarsPostInputBodySchema())
+ schemas.Set("FormatQueryOutputBody", b.formatQueryOutputBodySchema())
+ schemas.Set("FormatQueryPostInputBody", b.formatQueryPostInputBodySchema())
+ schemas.Set("ParseQueryOutputBody", b.simpleResponseBodySchema())
+ schemas.Set("ParseQueryPostInputBody", b.parseQueryPostInputBodySchema())
+ schemas.Set("QueryData", b.queryDataSchema())
+ schemas.Set("QueryStats", b.queryStatsSchema())
+ schemas.Set("FloatSample", b.floatSampleSchema())
+ schemas.Set("HistogramSample", b.histogramSampleSchema())
+ schemas.Set("FloatSeries", b.floatSeriesSchema())
+ schemas.Set("HistogramSeries", b.histogramSeriesSchema())
+ schemas.Set("HistogramValue", b.histogramValueSchema())
+
+ // Label schemas.
+ schemas.Set("LabelsOutputBody", b.stringArrayResponseBodySchema())
+ schemas.Set("LabelsPostInputBody", b.labelsPostInputBodySchema())
+ schemas.Set("LabelValuesOutputBody", b.stringArrayResponseBodySchema())
+
+ // Series schemas.
+ schemas.Set("SeriesOutputBody", b.labelsArrayResponseBodySchema())
+ schemas.Set("SeriesPostInputBody", b.seriesPostInputBodySchema())
+ schemas.Set("SeriesDeleteOutputBody", b.simpleResponseBodySchema())
+
+ // Metadata schemas.
+ schemas.Set("Metadata", b.metadataSchema())
+ schemas.Set("MetadataOutputBody", b.metadataOutputBodySchema())
+ schemas.Set("MetricMetadata", b.metricMetadataSchema())
+
+ // Target schemas.
+ schemas.Set("Target", b.targetSchema())
+ schemas.Set("DroppedTarget", b.droppedTargetSchema())
+ schemas.Set("TargetDiscovery", b.targetDiscoverySchema())
+ schemas.Set("TargetsOutputBody", b.refResponseBodySchema("TargetDiscovery", "Response body for targets endpoint."))
+ schemas.Set("TargetMetadataOutputBody", b.metricMetadataArrayResponseBodySchema())
+ schemas.Set("ScrapePoolsDiscovery", b.scrapePoolsDiscoverySchema())
+ schemas.Set("ScrapePoolsOutputBody", b.refResponseBodySchema("ScrapePoolsDiscovery", "Response body for scrape pools endpoint."))
+
+ // Relabel schemas.
+ schemas.Set("Config", b.configSchema())
+ schemas.Set("RelabelStep", b.relabelStepSchema())
+ schemas.Set("RelabelStepsResponse", b.relabelStepsResponseSchema())
+ schemas.Set("TargetRelabelStepsOutputBody", b.refResponseBodySchema("RelabelStepsResponse", "Response body for target relabel steps endpoint."))
+
+ // Rule schemas.
+ schemas.Set("RuleGroup", b.ruleGroupSchema())
+ schemas.Set("RuleDiscovery", b.ruleDiscoverySchema())
+ schemas.Set("RulesOutputBody", b.refResponseBodySchema("RuleDiscovery", "Response body for rules endpoint."))
+
+ // Alert schemas.
+ schemas.Set("Alert", b.alertSchema())
+ schemas.Set("AlertDiscovery", b.alertDiscoverySchema())
+ schemas.Set("AlertsOutputBody", b.refResponseBodySchema("AlertDiscovery", "Response body for alerts endpoint."))
+ schemas.Set("AlertmanagerTarget", b.alertmanagerTargetSchema())
+ schemas.Set("AlertmanagerDiscovery", b.alertmanagerDiscoverySchema())
+ schemas.Set("AlertmanagersOutputBody", b.refResponseBodySchema("AlertmanagerDiscovery", "Response body for alertmanagers endpoint."))
+
+ // Status schemas.
+ schemas.Set("StatusConfigData", b.statusConfigDataSchema())
+ schemas.Set("StatusConfigOutputBody", b.refResponseBodySchema("StatusConfigData", "Response body for status config endpoint."))
+ schemas.Set("RuntimeInfo", b.runtimeInfoSchema())
+ schemas.Set("StatusRuntimeInfoOutputBody", b.refResponseBodySchema("RuntimeInfo", "Response body for status runtime info endpoint."))
+ schemas.Set("PrometheusVersion", b.prometheusVersionSchema())
+ schemas.Set("StatusBuildInfoOutputBody", b.refResponseBodySchema("PrometheusVersion", "Response body for status build info endpoint."))
+ schemas.Set("StatusFlagsOutputBody", b.statusFlagsOutputBodySchema())
+ schemas.Set("HeadStats", b.headStatsSchema())
+ schemas.Set("TSDBStat", b.tsdbStatSchema())
+ schemas.Set("TSDBStatus", b.tsdbStatusSchema())
+ schemas.Set("StatusTSDBOutputBody", b.refResponseBodySchema("TSDBStatus", "Response body for status TSDB endpoint."))
+ schemas.Set("BlockDesc", b.blockDescSchema())
+ schemas.Set("BlockStats", b.blockStatsSchema())
+ schemas.Set("BlockMetaCompaction", b.blockMetaCompactionSchema())
+ schemas.Set("BlockMeta", b.blockMetaSchema())
+ schemas.Set("StatusTSDBBlocksData", b.statusTSDBBlocksDataSchema())
+ schemas.Set("StatusTSDBBlocksOutputBody", b.refResponseBodySchema("StatusTSDBBlocksData", "Response body for status TSDB blocks endpoint."))
+ schemas.Set("StatusWALReplayData", b.statusWALReplayDataSchema())
+ schemas.Set("StatusWALReplayOutputBody", b.refResponseBodySchema("StatusWALReplayData", "Response body for status WAL replay endpoint."))
+
+ // Admin schemas.
+ schemas.Set("DeleteSeriesOutputBody", b.statusOnlyResponseBodySchema())
+ schemas.Set("CleanTombstonesOutputBody", b.statusOnlyResponseBodySchema())
+ schemas.Set("DataStruct", b.dataStructSchema())
+ schemas.Set("SnapshotOutputBody", b.refResponseBodySchema("DataStruct", "Response body for snapshot endpoint."))
+
+ // Notification schemas.
+ schemas.Set("Notification", b.notificationSchema())
+ schemas.Set("NotificationsOutputBody", b.notificationArrayResponseBodySchema())
+
+ // Features schema.
+ schemas.Set("FeaturesOutputBody", b.simpleResponseBodySchema())
+
+ return &v3.Components{Schemas: schemas}
+}
+
+// Schema definitions using high-level structs.
+
+func (*OpenAPIBuilder) errorSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("errorType", stringSchemaWithDescriptionAndExample("Type of error that occurred.", "bad_data"))
+ props.Set("error", stringSchemaWithDescriptionAndExample("Human-readable error message.", "invalid parameter"))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Error response.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "errorType", "error"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) labelsSchema() *base.SchemaProxy {
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Label set represented as a key-value map.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: true},
+ })
+}
+
+func (*OpenAPIBuilder) responseBodySchema(dataSchemaRef, description string) *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", schemaRef("#/components/schemas/"+dataSchemaRef))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: description,
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (b *OpenAPIBuilder) refResponseBodySchema(dataSchemaRef, description string) *base.SchemaProxy {
+ return b.responseBodySchema(dataSchemaRef, description)
+}
+
+func (*OpenAPIBuilder) simpleResponseBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Description: "Response data (structure varies by endpoint).",
+ Example: createYAMLNode(map[string]any{"result": "ok"}),
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Generic response body.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) statusOnlyResponseBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body containing only status.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) stringArrayResponseBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ Example: createYAMLNode([]string{"__name__", "job", "instance"}),
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body with an array of strings.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) labelsArrayResponseBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/Labels")},
+ Example: createYAMLNode([]map[string]string{{"__name__": "up", "job": "prometheus", "instance": "localhost:9090"}}),
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body with an array of label sets.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) metricMetadataArrayResponseBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/MetricMetadata")},
+ Example: createYAMLNode([]map[string]any{
+ {
+ "target": map[string]string{
+ "instance": "localhost:9090",
+ "job": "prometheus",
+ },
+ "metric": "up",
+ "type": "gauge",
+ "help": "The current health status of the target",
+ "unit": "",
+ },
+ }),
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body with an array of metric metadata.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) notificationArrayResponseBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/Notification")},
+ Example: createYAMLNode([]map[string]any{
+ {"text": "Server is running", "date": "2023-07-21T20:00:00.000Z", "active": true},
+ }),
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body with an array of notifications.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) floatSampleSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("metric", schemaRef("#/components/schemas/Labels"))
+ props.Set("value", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Timestamp and float value as [unixTimestamp, stringValue].",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}}),
+ stringSchema(),
+ },
+ })},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ Example: createYAMLNode([]any{1767436620, "1"}),
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "A sample with a float value.",
+ Required: []string{"metric", "value"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) histogramValueSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("count", stringSchemaWithDescription("Total count of observations."))
+ props.Set("sum", stringSchemaWithDescription("Sum of all observed values."))
+ props.Set("buckets", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Histogram buckets as [boundary_rule, lower, upper, count].",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}}),
+ stringSchema(),
+ },
+ })},
+ })},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Native histogram value representation.",
+ Required: []string{"count", "sum"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) histogramSampleSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("metric", schemaRef("#/components/schemas/Labels"))
+ props.Set("histogram", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Timestamp and histogram value as [unixTimestamp, histogramObject].",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}}),
+ schemaRef("#/components/schemas/HistogramValue"),
+ },
+ })},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ Example: createYAMLNode([]any{1767436620, map[string]any{"count": "60", "sum": "120", "buckets": []any{}}}),
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "A sample with a native histogram value.",
+ Required: []string{"metric", "histogram"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) floatSeriesSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("metric", schemaRef("#/components/schemas/Labels"))
+ props.Set("values", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Array of [timestamp, stringValue] pairs for float values.",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}}),
+ stringSchema(),
+ },
+ })},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ })},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "A time series with float values.",
+ Required: []string{"metric", "values"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) histogramSeriesSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("metric", schemaRef("#/components/schemas/Labels"))
+ props.Set("histograms", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Array of [timestamp, histogramObject] pairs for histogram values.",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}}),
+ schemaRef("#/components/schemas/HistogramValue"),
+ },
+ })},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ })},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "A time series with native histogram values.",
+ Required: []string{"metric", "histograms"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) queryDataSchema() *base.SchemaProxy {
+ // Vector query result.
+ vectorProps := orderedmap.New[string, *base.SchemaProxy]()
+ vectorProps.Set("resultType", stringSchemaWithConstValue("vector"))
+ vectorProps.Set("result", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Array of samples (either float or histogram).",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ AnyOf: []*base.SchemaProxy{
+ schemaRef("#/components/schemas/FloatSample"),
+ schemaRef("#/components/schemas/HistogramSample"),
+ },
+ })},
+ }))
+ vectorProps.Set("stats", schemaRef("#/components/schemas/QueryStats"))
+
+ // Matrix query result.
+ matrixProps := orderedmap.New[string, *base.SchemaProxy]()
+ matrixProps.Set("resultType", stringSchemaWithConstValue("matrix"))
+ matrixProps.Set("result", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Array of time series (either float or histogram).",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ AnyOf: []*base.SchemaProxy{
+ schemaRef("#/components/schemas/FloatSeries"),
+ schemaRef("#/components/schemas/HistogramSeries"),
+ },
+ })},
+ }))
+ matrixProps.Set("stats", schemaRef("#/components/schemas/QueryStats"))
+
+ // Scalar query result.
+ scalarProps := orderedmap.New[string, *base.SchemaProxy]()
+ scalarProps.Set("resultType", stringSchemaWithConstValue("scalar"))
+ scalarProps.Set("result", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Scalar value as [timestamp, stringValue].",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ OneOf: []*base.SchemaProxy{
+ base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}}),
+ stringSchema(),
+ },
+ })},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ }))
+ scalarProps.Set("stats", schemaRef("#/components/schemas/QueryStats"))
+
+ // String query result.
+ stringResultProps := orderedmap.New[string, *base.SchemaProxy]()
+ stringResultProps.Set("resultType", stringSchemaWithConstValue("string"))
+ stringResultProps.Set("result", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "String value as [timestamp, stringValue].",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ }))
+ stringResultProps.Set("stats", schemaRef("#/components/schemas/QueryStats"))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Description: "Query result data. The structure of 'result' depends on 'resultType'.",
+ AnyOf: []*base.SchemaProxy{
+ // resultType: vector -> result: array of samples.
+ base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Required: []string{"resultType", "result"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: vectorProps,
+ }),
+ // resultType: matrix -> result: array of series.
+ base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Required: []string{"resultType", "result"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: matrixProps,
+ }),
+ // resultType: scalar -> result: [timestamp, value].
+ base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Required: []string{"resultType", "result"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: scalarProps,
+ }),
+ // resultType: string -> result: [timestamp, stringValue].
+ base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Required: []string{"resultType", "result"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: stringResultProps,
+ }),
+ },
+ Example: createYAMLNode(map[string]any{
+ "resultType": "vector",
+ "result": []map[string]any{
+ {
+ "metric": map[string]string{"__name__": "up", "job": "prometheus"},
+ "value": []any{1627845600, "1"},
+ },
+ },
+ }),
+ })
+}
+
+func (*OpenAPIBuilder) queryStatsSchema() *base.SchemaProxy {
+ // Timings object.
+ timingsProps := orderedmap.New[string, *base.SchemaProxy]()
+ timingsProps.Set("evalTotalTime", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Description: "Total evaluation time in seconds.",
+ }))
+ timingsProps.Set("resultSortTime", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Description: "Time spent sorting results in seconds.",
+ }))
+ timingsProps.Set("queryPreparationTime", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Description: "Query preparation time in seconds.",
+ }))
+ timingsProps.Set("innerEvalTime", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Description: "Inner evaluation time in seconds.",
+ }))
+ timingsProps.Set("execQueueTime", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Description: "Execution queue wait time in seconds.",
+ }))
+ timingsProps.Set("execTotalTime", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"number"},
+ Description: "Total execution time in seconds.",
+ }))
+
+ // Samples object.
+ samplesProps := orderedmap.New[string, *base.SchemaProxy]()
+ samplesProps.Set("totalQueryableSamples", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"integer"},
+ Description: "Total number of samples that were queryable.",
+ }))
+ samplesProps.Set("peakSamples", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"integer"},
+ Description: "Peak number of samples in memory.",
+ }))
+ samplesProps.Set("totalQueryableSamplesPerStep", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Total queryable samples per step (only included with stats=all).",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Timestamp and sample count as [timestamp, count].",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{Type: []string{"number"}})},
+ MinItems: int64Ptr(2),
+ MaxItems: int64Ptr(2),
+ })},
+ }))
+
+ // Main stats object.
+ statsProps := orderedmap.New[string, *base.SchemaProxy]()
+ statsProps.Set("timings", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Properties: timingsProps,
+ }))
+ statsProps.Set("samples", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Properties: samplesProps,
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Query execution statistics (included when the stats query parameter is provided).",
+ Properties: statsProps,
+ })
+}
+
+func (*OpenAPIBuilder) queryPostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("query", stringSchemaWithDescriptionAndExample("Form field: The PromQL query to execute.", "up"))
+ props.Set("time", stringSchemaWithDescriptionAndExample("Form field: The evaluation timestamp (optional, defaults to current time).", "2023-07-21T20:10:51.781Z"))
+ props.Set("limit", integerSchemaWithDescriptionAndExample("Form field: The maximum number of metrics to return.", 100))
+ props.Set("timeout", stringSchemaWithDescriptionAndExample("Form field: Evaluation timeout (optional, defaults to and is capped by the value of the -query.timeout flag).", "30s"))
+ props.Set("lookback_delta", stringSchemaWithDescriptionAndExample("Form field: Override the lookback period for this query (optional).", "5m"))
+ props.Set("stats", stringSchemaWithDescriptionAndExample("Form field: When provided, include query statistics in the response (the special value 'all' enables more comprehensive statistics).", "all"))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for instant query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"query"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) queryRangePostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("query", stringSchemaWithDescriptionAndExample("Form field: The query to execute.", "rate(http_requests_total[5m])"))
+ props.Set("start", stringSchemaWithDescriptionAndExample("Form field: The start time of the query.", "2023-07-21T20:10:30.781Z"))
+ props.Set("end", stringSchemaWithDescriptionAndExample("Form field: The end time of the query.", "2023-07-21T20:20:30.781Z"))
+ props.Set("step", stringSchemaWithDescriptionAndExample("Form field: The step size of the query.", "15s"))
+ props.Set("limit", integerSchemaWithDescriptionAndExample("Form field: The maximum number of metrics to return.", 100))
+ props.Set("timeout", stringSchemaWithDescriptionAndExample("Form field: Evaluation timeout (optional, defaults to and is capped by the value of the -query.timeout flag).", "30s"))
+ props.Set("lookback_delta", stringSchemaWithDescriptionAndExample("Form field: Override the lookback period for this query (optional).", "5m"))
+ props.Set("stats", stringSchemaWithDescriptionAndExample("Form field: When provided, include query statistics in the response (the special value 'all' enables more comprehensive statistics).", "all"))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for range query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"query", "start", "end", "step"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) queryExemplarsPostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("query", stringSchemaWithDescriptionAndExample("Form field: The query to execute.", "http_requests_total"))
+ props.Set("start", stringSchemaWithDescriptionAndExample("Form field: The start time of the query.", "2023-07-21T20:00:00.000Z"))
+ props.Set("end", stringSchemaWithDescriptionAndExample("Form field: The end time of the query.", "2023-07-21T21:00:00.000Z"))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for exemplars query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"query"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) formatQueryOutputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", stringSchemaWithDescriptionAndExample("Formatted query string.", "sum by(status) (rate(http_requests_total[5m]))"))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body for format query endpoint.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) formatQueryPostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("query", stringSchemaWithDescriptionAndExample("Form field: The query to format.", "sum(rate(http_requests_total[5m])) by (status)"))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for format query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"query"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) parseQueryPostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("query", stringSchemaWithDescriptionAndExample("Form field: The query to parse.", "sum(rate(http_requests_total[5m]))"))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for parse query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"query"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) labelsPostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("start", stringSchemaWithDescriptionAndExample("Form field: The start time of the query.", "2023-07-21T20:00:00.000Z"))
+ props.Set("end", stringSchemaWithDescriptionAndExample("Form field: The end time of the query.", "2023-07-21T21:00:00.000Z"))
+ props.Set("match[]", stringArraySchemaWithDescriptionAndExample("Form field: Series selector argument that selects the series from which to read the label names.", []string{"{job=\"prometheus\"}"}))
+ props.Set("limit", integerSchemaWithDescriptionAndExample("Form field: The maximum number of label names to return.", 100))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for labels query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) seriesPostInputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("start", stringSchemaWithDescriptionAndExample("Form field: The start time of the query.", "2023-07-21T20:00:00.000Z"))
+ props.Set("end", stringSchemaWithDescriptionAndExample("Form field: The end time of the query.", "2023-07-21T21:00:00.000Z"))
+ props.Set("match[]", stringArraySchemaWithDescriptionAndExample("Form field: Series selector argument that selects the series to return.", []string{"{job=\"prometheus\"}"}))
+ props.Set("limit", integerSchemaWithDescriptionAndExample("Form field: The maximum number of series to return.", 100))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "POST request body for series query.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"match[]"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) metadataSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("type", stringSchemaWithDescription("Metric type (counter, gauge, histogram, summary, or untyped)."))
+ props.Set("unit", stringSchemaWithDescription("Unit of the metric."))
+ props.Set("help", stringSchemaWithDescription("Help text describing the metric."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Metric metadata.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"type", "unit", "help"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) metadataOutputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{
+ A: base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/Metadata")},
+ }),
+ },
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body for metadata endpoint.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) metricMetadataSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("target", schemaRef("#/components/schemas/Labels"))
+ props.Set("metric", stringSchemaWithDescription("Metric name."))
+ props.Set("type", stringSchemaWithDescription("Metric type (counter, gauge, histogram, summary, or untyped)."))
+ props.Set("help", stringSchemaWithDescription("Help text describing the metric."))
+ props.Set("unit", stringSchemaWithDescription("Unit of the metric."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Target metric metadata.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"target", "type", "help", "unit"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) targetSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("discoveredLabels", schemaRef("#/components/schemas/Labels"))
+ props.Set("labels", schemaRef("#/components/schemas/Labels"))
+ props.Set("scrapePool", stringSchemaWithDescription("Name of the scrape pool."))
+ props.Set("scrapeUrl", stringSchemaWithDescription("URL of the target."))
+ props.Set("globalUrl", stringSchemaWithDescription("Global URL of the target."))
+ props.Set("lastError", stringSchemaWithDescription("Last error message from scraping."))
+ props.Set("lastScrape", dateTimeSchemaWithDescription("Timestamp of the last scrape."))
+ props.Set("lastScrapeDuration", numberSchemaWithDescription("Duration of the last scrape in seconds."))
+ props.Set("health", stringSchemaWithDescription("Health status of the target (up, down, or unknown)."))
+ props.Set("scrapeInterval", stringSchemaWithDescription("Scrape interval for this target."))
+ props.Set("scrapeTimeout", stringSchemaWithDescription("Scrape timeout for this target."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Scrape target information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"discoveredLabels", "labels", "scrapePool", "scrapeUrl", "globalUrl", "lastError", "lastScrape", "lastScrapeDuration", "health", "scrapeInterval", "scrapeTimeout"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) droppedTargetSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("discoveredLabels", schemaRef("#/components/schemas/Labels"))
+ props.Set("scrapePool", stringSchemaWithDescription("Name of the scrape pool."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Dropped target information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"discoveredLabels", "scrapePool"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) targetDiscoverySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("activeTargets", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/Target")},
+ }))
+ props.Set("droppedTargets", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/DroppedTarget")},
+ }))
+ props.Set("droppedTargetCounts", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{A: integerSchema()},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Target discovery information including active and dropped targets.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"activeTargets", "droppedTargets", "droppedTargetCounts"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) scrapePoolsDiscoverySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("scrapePools", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "List of all configured scrape pools.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"scrapePools"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) configSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("source_labels", stringArraySchemaWithDescription("Source labels for relabeling."))
+ props.Set("separator", stringSchemaWithDescription("Separator for source label values."))
+ props.Set("regex", stringSchemaWithDescription("Regular expression for matching."))
+ props.Set("modulus", integerSchemaWithDescription("Modulus for hash-based relabeling."))
+ props.Set("target_label", stringSchemaWithDescription("Target label name."))
+ props.Set("replacement", stringSchemaWithDescription("Replacement value."))
+ props.Set("action", stringSchemaWithDescription("Relabel action."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Relabel configuration.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) relabelStepSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("rule", schemaRef("#/components/schemas/Config"))
+ props.Set("output", schemaRef("#/components/schemas/Labels"))
+ props.Set("keep", base.CreateSchemaProxy(&base.Schema{Type: []string{"boolean"}}))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Relabel step showing the rule, output, and whether the target was kept.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"rule", "output", "keep"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) relabelStepsResponseSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("steps", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/RelabelStep")},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Relabeling steps response.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"steps"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) ruleGroupSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("name", stringSchemaWithDescription("Name of the rule group."))
+ props.Set("file", stringSchemaWithDescription("File containing the rule group."))
+ props.Set("rules", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Description: "Rules in this group.",
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{Type: []string{"object"}, Description: "Rule definition."})},
+ }))
+ props.Set("interval", numberSchemaWithDescription("Evaluation interval in seconds."))
+ props.Set("limit", integerSchemaWithDescription("Maximum number of alerts for this group."))
+ props.Set("evaluationTime", numberSchemaWithDescription("Time taken to evaluate the group in seconds."))
+ props.Set("lastEvaluation", dateTimeSchemaWithDescription("Timestamp of the last evaluation."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Rule group information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"name", "file", "rules", "interval", "limit", "evaluationTime", "lastEvaluation"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) ruleDiscoverySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("groups", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/RuleGroup")},
+ }))
+ props.Set("groupNextToken", stringSchemaWithDescription("Pagination token for the next page of groups."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Rule discovery information containing all rule groups.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"groups"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) alertSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("labels", schemaRef("#/components/schemas/Labels"))
+ props.Set("annotations", schemaRef("#/components/schemas/Labels"))
+ props.Set("state", stringSchemaWithDescription("State of the alert (pending, firing, or inactive)."))
+ props.Set("value", stringSchemaWithDescription("Value of the alert expression."))
+ props.Set("activeAt", dateTimeSchemaWithDescription("Timestamp when the alert became active."))
+ props.Set("keepFiringSince", dateTimeSchemaWithDescription("Timestamp since the alert has been kept firing."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Alert information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"labels", "annotations", "state", "value"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) alertDiscoverySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("alerts", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/Alert")},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Alert discovery information containing all active alerts.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"alerts"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) alertmanagerTargetSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("url", stringSchemaWithDescription("URL of the Alertmanager instance."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Alertmanager target information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"url"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) alertmanagerDiscoverySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("activeAlertmanagers", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/AlertmanagerTarget")},
+ }))
+ props.Set("droppedAlertmanagers", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/AlertmanagerTarget")},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Alertmanager discovery information including active and dropped instances.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"activeAlertmanagers", "droppedAlertmanagers"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) statusConfigDataSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("yaml", stringSchemaWithDescription("Prometheus configuration in YAML format."))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Prometheus configuration.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"yaml"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) runtimeInfoSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("startTime", base.CreateSchemaProxy(&base.Schema{Type: []string{"string"}, Format: "date-time"}))
+ props.Set("CWD", stringSchema())
+ props.Set("hostname", stringSchema())
+ props.Set("serverTime", base.CreateSchemaProxy(&base.Schema{Type: []string{"string"}, Format: "date-time"}))
+ props.Set("reloadConfigSuccess", base.CreateSchemaProxy(&base.Schema{Type: []string{"boolean"}}))
+ props.Set("lastConfigTime", base.CreateSchemaProxy(&base.Schema{Type: []string{"string"}, Format: "date-time"}))
+ props.Set("corruptionCount", integerSchema())
+ props.Set("goroutineCount", integerSchema())
+ props.Set("GOMAXPROCS", integerSchema())
+ props.Set("GOMEMLIMIT", integerSchema())
+ props.Set("GOGC", stringSchema())
+ props.Set("GODEBUG", stringSchema())
+ props.Set("storageRetention", stringSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Prometheus runtime information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"startTime", "CWD", "hostname", "serverTime", "reloadConfigSuccess", "lastConfigTime", "corruptionCount", "goroutineCount", "GOMAXPROCS", "GOMEMLIMIT", "GOGC", "GODEBUG", "storageRetention"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) prometheusVersionSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("version", stringSchema())
+ props.Set("revision", stringSchema())
+ props.Set("branch", stringSchema())
+ props.Set("buildUser", stringSchema())
+ props.Set("buildDate", stringSchema())
+ props.Set("goVersion", stringSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Prometheus version information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"version", "revision", "branch", "buildUser", "buildDate", "goVersion"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) statusFlagsOutputBodySchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("status", statusSchema())
+ props.Set("data", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }))
+ props.Set("warnings", warningsSchema())
+ props.Set("infos", infosSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Response body for status flags endpoint.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"status", "data"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) headStatsSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("numSeries", integerSchema())
+ props.Set("numLabelPairs", integerSchema())
+ props.Set("chunkCount", integerSchema())
+ props.Set("minTime", integerSchema())
+ props.Set("maxTime", integerSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "TSDB head statistics.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"numSeries", "numLabelPairs", "chunkCount", "minTime", "maxTime"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) tsdbStatSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("name", stringSchema())
+ props.Set("value", integerSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "TSDB statistic.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"name", "value"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) tsdbStatusSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("headStats", schemaRef("#/components/schemas/HeadStats"))
+ props.Set("seriesCountByMetricName", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/TSDBStat")},
+ }))
+ props.Set("labelValueCountByLabelName", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/TSDBStat")},
+ }))
+ props.Set("memoryInBytesByLabelName", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/TSDBStat")},
+ }))
+ props.Set("seriesCountByLabelValuePair", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/TSDBStat")},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "TSDB status information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"headStats", "seriesCountByMetricName", "labelValueCountByLabelName", "memoryInBytesByLabelName", "seriesCountByLabelValuePair"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) blockDescSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("ulid", stringSchema())
+ props.Set("minTime", integerSchema())
+ props.Set("maxTime", integerSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Block descriptor.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"ulid", "minTime", "maxTime"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) blockStatsSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("numSamples", integerSchema())
+ props.Set("numSeries", integerSchema())
+ props.Set("numChunks", integerSchema())
+ props.Set("numTombstones", integerSchema())
+ props.Set("numFloatSamples", integerSchema())
+ props.Set("numHistogramSamples", integerSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Block statistics.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) blockMetaCompactionSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("level", integerSchema())
+ props.Set("sources", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }))
+ props.Set("parents", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/BlockDesc")},
+ }))
+ props.Set("failed", base.CreateSchemaProxy(&base.Schema{Type: []string{"boolean"}}))
+ props.Set("deletable", base.CreateSchemaProxy(&base.Schema{Type: []string{"boolean"}}))
+ props.Set("hints", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Block compaction metadata.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"level"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) blockMetaSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("ulid", stringSchema())
+ props.Set("minTime", integerSchema())
+ props.Set("maxTime", integerSchema())
+ props.Set("stats", schemaRef("#/components/schemas/BlockStats"))
+ props.Set("compaction", schemaRef("#/components/schemas/BlockMetaCompaction"))
+ props.Set("version", integerSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Block metadata.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"ulid", "minTime", "maxTime", "compaction", "version"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) statusTSDBBlocksDataSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("blocks", base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"array"},
+ Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/BlockMeta")},
+ }))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "TSDB blocks information.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"blocks"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) statusWALReplayDataSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("min", integerSchema())
+ props.Set("max", integerSchema())
+ props.Set("current", integerSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "WAL replay status.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"min", "max", "current"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) dataStructSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("name", stringSchema())
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Generic data structure with a name field.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"name"},
+ Properties: props,
+ })
+}
+
+func (*OpenAPIBuilder) notificationSchema() *base.SchemaProxy {
+ props := orderedmap.New[string, *base.SchemaProxy]()
+ props.Set("text", stringSchema())
+ props.Set("date", base.CreateSchemaProxy(&base.Schema{Type: []string{"string"}, Format: "date-time"}))
+ props.Set("active", base.CreateSchemaProxy(&base.Schema{Type: []string{"boolean"}}))
+
+ return base.CreateSchemaProxy(&base.Schema{
+ Type: []string{"object"},
+ Description: "Server notification.",
+ AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false},
+ Required: []string{"text", "date", "active"},
+ Properties: props,
+ })
+}
diff --git a/web/api/v1/openapi_test.go b/web/api/v1/openapi_test.go
new file mode 100644
index 0000000000..21547734c2
--- /dev/null
+++ b/web/api/v1/openapi_test.go
@@ -0,0 +1,289 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+
+ "github.com/prometheus/common/promslog"
+ "github.com/stretchr/testify/require"
+ "go.yaml.in/yaml/v2"
+)
+
+// TestOpenAPIHTTPHandler verifies that the OpenAPI endpoint serves a valid specification
+// with correct headers, structure conforming to OpenAPI 3.1 standards, and consistent responses.
+func TestOpenAPIHTTPHandler(t *testing.T) {
+ builder := NewOpenAPIBuilder(OpenAPIOptions{}, promslog.NewNopLogger())
+
+ // First request.
+ req1 := httptest.NewRequest(http.MethodGet, "/api/v1/openapi.yaml", nil)
+ rec1 := httptest.NewRecorder()
+ builder.ServeOpenAPI(rec1, req1)
+
+ // Verify status code and headers.
+ require.Equal(t, http.StatusOK, rec1.Code)
+ require.True(t, strings.HasPrefix(rec1.Header().Get("Content-Type"), "application/yaml"), "Content-Type should start with application/yaml")
+ require.Equal(t, "no-cache, no-store, must-revalidate", rec1.Header().Get("Cache-Control"))
+
+ // Verify it is valid YAML.
+ var spec map[string]any
+ err := yaml.Unmarshal(rec1.Body.Bytes(), &spec)
+ require.NoError(t, err)
+
+ // Verify structure.
+ require.Contains(t, spec, "openapi")
+ require.Contains(t, spec, "info")
+ require.Contains(t, spec, "paths")
+ require.Contains(t, spec, "components")
+
+ // Verify OpenAPI version (default is 3.1.0).
+ require.Equal(t, "3.1.0", spec["openapi"])
+
+ // Verify info section.
+ info, ok := spec["info"].(map[any]any)
+ require.True(t, ok, "info should be a map")
+ require.Equal(t, "Prometheus API", info["title"])
+
+ // Verify paths exist.
+ paths, ok := spec["paths"].(map[any]any)
+ require.True(t, ok, "paths should be a map")
+ require.NotEmpty(t, paths, "paths should not be empty")
+
+ // Second request to verify response consistency.
+ req2 := httptest.NewRequest(http.MethodGet, "/api/v1/openapi.yaml", nil)
+ rec2 := httptest.NewRecorder()
+ builder.ServeOpenAPI(rec2, req2)
+
+ // Both responses should be identical.
+ require.Equal(t, rec1.Body.String(), rec2.Body.String())
+}
+
+// TestOpenAPIPathFiltering verifies that the IncludePaths option correctly filters
+// which API paths are included in the generated specification.
+func TestOpenAPIPathFiltering(t *testing.T) {
+ tests := []struct {
+ name string
+ includePaths []string
+ wantPaths []string
+ excludePaths []string
+ }{
+ {
+ name: "no filter includes all",
+ includePaths: nil,
+ wantPaths: []string{"/query", "/labels", "/alerts", "/targets"},
+ },
+ {
+ name: "filter query paths",
+ includePaths: []string{"/query"},
+ wantPaths: []string{"/query", "/query_range", "/query_exemplars"},
+ excludePaths: []string{"/labels", "/alerts", "/targets"},
+ },
+ {
+ name: "filter status paths",
+ includePaths: []string{"/status"},
+ wantPaths: []string{"/status/config", "/status/flags", "/status/runtimeinfo"},
+ excludePaths: []string{"/query", "/alerts", "/targets"},
+ },
+ {
+ name: "filter multiple prefixes",
+ includePaths: []string{"/label", "/series"},
+ wantPaths: []string{"/labels", "/label/{name}/values", "/series"},
+ excludePaths: []string{"/query", "/alerts", "/targets"},
+ },
+ {
+ name: "exact path match",
+ includePaths: []string{"/alerts"},
+ wantPaths: []string{"/alerts"},
+ excludePaths: []string{"/alertmanagers", "/query"},
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ builder := NewOpenAPIBuilder(OpenAPIOptions{
+ IncludePaths: tc.includePaths,
+ }, promslog.NewNopLogger())
+
+ req := httptest.NewRequest(http.MethodGet, "/api/v1/openapi.yaml", nil)
+ rec := httptest.NewRecorder()
+ builder.ServeOpenAPI(rec, req)
+
+ require.Equal(t, http.StatusOK, rec.Code)
+
+ var spec map[string]any
+ err := yaml.Unmarshal(rec.Body.Bytes(), &spec)
+ require.NoError(t, err)
+
+ paths, ok := spec["paths"].(map[any]any)
+ require.True(t, ok, "paths should be a map")
+
+ for _, want := range tc.wantPaths {
+ require.Contains(t, paths, want)
+ }
+
+ for _, exclude := range tc.excludePaths {
+ require.NotContains(t, paths, exclude)
+ }
+ })
+ }
+}
+
+// TestOpenAPISchemaCompleteness verifies that all referenced schemas in paths
+// are defined in the components/schemas section of the specification.
+func TestOpenAPISchemaCompleteness(t *testing.T) {
+ builder := NewOpenAPIBuilder(OpenAPIOptions{}, promslog.NewNopLogger())
+
+ req := httptest.NewRequest(http.MethodGet, "/api/v1/openapi.yaml", nil)
+ rec := httptest.NewRecorder()
+ builder.ServeOpenAPI(rec, req)
+
+ var spec map[string]any
+ err := yaml.Unmarshal(rec.Body.Bytes(), &spec)
+ require.NoError(t, err)
+
+ components, ok := spec["components"].(map[any]any)
+ require.True(t, ok, "components should be a map")
+
+ schemas, ok := components["schemas"].(map[any]any)
+ require.True(t, ok, "schemas should be a map")
+
+ // Verify essential schemas are present.
+ essentialSchemas := []string{
+ "Error",
+ "Labels",
+ "QueryOutputBody",
+ "LabelsOutputBody",
+ "SeriesOutputBody",
+ "TargetsOutputBody",
+ "AlertsOutputBody",
+ "RulesOutputBody",
+ "StatusConfigOutputBody",
+ "StatusFlagsOutputBody",
+ "PrometheusVersion",
+ }
+
+ for _, schema := range essentialSchemas {
+ require.Contains(t, schemas, schema)
+ }
+}
+
+// TODO: Add test to verify all routes from api.go Register() are covered in OpenAPI spec.
+// Consider wrapping Router to track registered paths and cross-check with OpenAPI paths.
+
+// TestOpenAPIShouldIncludePath verifies the shouldIncludePath method correctly
+// matches paths against the IncludePaths filter configuration.
+func TestOpenAPIShouldIncludePath(t *testing.T) {
+ tests := []struct {
+ name string
+ includePaths []string
+ path string
+ expected bool
+ }{
+ {
+ name: "empty filter includes all",
+ includePaths: nil,
+ path: "/query",
+ expected: true,
+ },
+ {
+ name: "exact match",
+ includePaths: []string{"/query"},
+ path: "/query",
+ expected: true,
+ },
+ {
+ name: "prefix match",
+ includePaths: []string{"/query"},
+ path: "/query_range",
+ expected: true,
+ },
+ {
+ name: "no match",
+ includePaths: []string{"/query"},
+ path: "/labels",
+ expected: false,
+ },
+ {
+ name: "multiple filters with match",
+ includePaths: []string{"/labels", "/series"},
+ path: "/series",
+ expected: true,
+ },
+ {
+ name: "multiple filters without match",
+ includePaths: []string{"/labels", "/series"},
+ path: "/query",
+ expected: false,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ builder := &OpenAPIBuilder{
+ options: OpenAPIOptions{
+ IncludePaths: tc.includePaths,
+ },
+ }
+
+ result := builder.shouldIncludePath(tc.path)
+ require.Equal(t, tc.expected, result)
+ })
+ }
+}
+
+// TestOpenAPIVersionConsistency verifies that both OpenAPI versions are properly generated
+// and that 3.2 has exactly one more path than 3.1 (/notifications/live).
+func TestOpenAPIVersionConsistency(t *testing.T) {
+ builder := NewOpenAPIBuilder(OpenAPIOptions{}, promslog.NewNopLogger())
+
+ // Fetch OpenAPI 3.1 spec (default).
+ req31 := httptest.NewRequest(http.MethodGet, "/api/v1/openapi.yaml", nil)
+ rec31 := httptest.NewRecorder()
+ builder.ServeOpenAPI(rec31, req31)
+
+ require.Equal(t, http.StatusOK, rec31.Code)
+
+ // Fetch OpenAPI 3.2 spec.
+ req32 := httptest.NewRequest(http.MethodGet, "/api/v1/openapi.yaml?openapi_version=3.2", nil)
+ rec32 := httptest.NewRecorder()
+ builder.ServeOpenAPI(rec32, req32)
+
+ require.Equal(t, http.StatusOK, rec32.Code)
+
+ // Parse both specs.
+ var spec31, spec32 map[string]any
+ err := yaml.Unmarshal(rec31.Body.Bytes(), &spec31)
+ require.NoError(t, err)
+ err = yaml.Unmarshal(rec32.Body.Bytes(), &spec32)
+ require.NoError(t, err)
+
+ // Verify versions are different.
+ require.Equal(t, "3.1.0", spec31["openapi"])
+ require.Equal(t, "3.2.0", spec32["openapi"])
+
+ // Verify /notifications/live is only in 3.2.
+ paths31 := spec31["paths"].(map[any]any)
+ paths32 := spec32["paths"].(map[any]any)
+
+ require.NotContains(t, paths31, "/notifications/live")
+
+ require.Contains(t, paths32, "/notifications/live")
+
+ // Verify 3.2 has exactly one more path than 3.1.
+ require.Len(t, paths32, len(paths31)+1,
+ "OpenAPI 3.2 should have exactly one more path than 3.1")
+}
diff --git a/web/api/v1/test_helpers.go b/web/api/v1/test_helpers.go
new file mode 100644
index 0000000000..873a80c238
--- /dev/null
+++ b/web/api/v1/test_helpers.go
@@ -0,0 +1,159 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/prometheus/common/route"
+
+ "github.com/prometheus/prometheus/promql/parser"
+ "github.com/prometheus/prometheus/web/api/testhelpers"
+)
+
+// newTestAPI creates a new API instance for testing using testhelpers.
+func newTestAPI(t *testing.T, cfg testhelpers.APIConfig) *testhelpers.APIWrapper {
+ t.Helper()
+
+ params := testhelpers.PrepareAPI(t, cfg)
+
+ // Adapt the testhelpers interfaces to v1 interfaces.
+ api := NewAPI(
+ params.QueryEngine,
+ params.Queryable,
+ nil, nil, // appendables
+ params.ExemplarQueryable,
+ func(ctx context.Context) ScrapePoolsRetriever {
+ return adaptScrapePoolsRetriever(params.ScrapePoolsRetriever(ctx))
+ },
+ func(ctx context.Context) TargetRetriever {
+ return adaptTargetRetriever(params.TargetRetriever(ctx))
+ },
+ func(ctx context.Context) AlertmanagerRetriever {
+ return adaptAlertmanagerRetriever(params.AlertmanagerRetriever(ctx))
+ },
+ params.ConfigFunc,
+ params.FlagsMap,
+ GlobalURLOptions{},
+ params.ReadyFunc,
+ adaptTSDBAdminStats(params.TSDBAdmin),
+ params.DBDir,
+ false, // enableAdmin
+ params.Logger,
+ func(ctx context.Context) RulesRetriever {
+ return adaptRulesRetriever(params.RulesRetriever(ctx))
+ },
+ 0, // remoteReadSampleLimit
+ 0, // remoteReadConcurrencyLimit
+ 0, // remoteReadMaxBytesInFrame
+ false, // isAgent
+ nil, // corsOrigin
+ func() (RuntimeInfo, error) {
+ info, err := params.RuntimeInfoFunc()
+ return RuntimeInfo{
+ StartTime: info.StartTime,
+ CWD: info.CWD,
+ Hostname: info.Hostname,
+ ServerTime: info.ServerTime,
+ ReloadConfigSuccess: info.ReloadConfigSuccess,
+ LastConfigTime: info.LastConfigTime,
+ CorruptionCount: info.CorruptionCount,
+ GoroutineCount: info.GoroutineCount,
+ GOMAXPROCS: info.GOMAXPROCS,
+ GOMEMLIMIT: info.GOMEMLIMIT,
+ GOGC: info.GOGC,
+ GODEBUG: info.GODEBUG,
+ StorageRetention: info.StorageRetention,
+ }, err
+ },
+ &PrometheusVersion{
+ Version: params.BuildInfo.Version,
+ Revision: params.BuildInfo.Revision,
+ Branch: params.BuildInfo.Branch,
+ BuildUser: params.BuildInfo.BuildUser,
+ BuildDate: params.BuildInfo.BuildDate,
+ GoVersion: params.BuildInfo.GoVersion,
+ },
+ params.NotificationsGetter,
+ params.NotificationsSub,
+ params.Gatherer,
+ params.Registerer,
+ nil, // statsRenderer
+ false, // rwEnabled
+ nil, // acceptRemoteWriteProtoMsgs
+ false, // otlpEnabled
+ false, // otlpDeltaToCumulative
+ false, // otlpNativeDeltaIngestion
+ false, // stZeroIngestionEnabled
+ 5*time.Minute, // lookbackDelta
+ false, // enableTypeAndUnitLabels
+ false, // appendMetadata
+ nil, // overrideErrorCode
+ nil, // featureRegistry
+ OpenAPIOptions{}, // openAPIOptions
+ parser.NewParser(parser.Options{}), // promqlParser
+ )
+
+ // Register routes.
+ router := route.New()
+ api.Register(router.WithPrefix("/api/v1"))
+
+ return &testhelpers.APIWrapper{
+ Handler: router,
+ }
+}
+
+// Adapter functions to convert testhelpers interfaces to v1 interfaces.
+
+type rulesRetrieverAdapter struct {
+ testhelpers.RulesRetriever
+}
+
+func adaptRulesRetriever(r testhelpers.RulesRetriever) RulesRetriever {
+ return &rulesRetrieverAdapter{r}
+}
+
+type targetRetrieverAdapter struct {
+ testhelpers.TargetRetriever
+}
+
+func adaptTargetRetriever(t testhelpers.TargetRetriever) TargetRetriever {
+ return &targetRetrieverAdapter{t}
+}
+
+type scrapePoolsRetrieverAdapter struct {
+ testhelpers.ScrapePoolsRetriever
+}
+
+func adaptScrapePoolsRetriever(s testhelpers.ScrapePoolsRetriever) ScrapePoolsRetriever {
+ return &scrapePoolsRetrieverAdapter{s}
+}
+
+type alertmanagerRetrieverAdapter struct {
+ testhelpers.AlertmanagerRetriever
+}
+
+func adaptAlertmanagerRetriever(a testhelpers.AlertmanagerRetriever) AlertmanagerRetriever {
+ return &alertmanagerRetrieverAdapter{a}
+}
+
+type tsdbAdminStatsAdapter struct {
+ testhelpers.TSDBAdminStats
+}
+
+func adaptTSDBAdminStats(t testhelpers.TSDBAdminStats) TSDBAdminStats {
+ return &tsdbAdminStatsAdapter{t}
+}
diff --git a/web/api/v1/testdata/openapi_3.1_golden.yaml b/web/api/v1/testdata/openapi_3.1_golden.yaml
new file mode 100644
index 0000000000..b1514f209d
--- /dev/null
+++ b/web/api/v1/testdata/openapi_3.1_golden.yaml
@@ -0,0 +1,4453 @@
+openapi: 3.1.0
+info:
+ title: Prometheus API
+ description: Prometheus is an Open-Source monitoring system with a dimensional data model, flexible query language, efficient time series database and modern alerting approach.
+ contact:
+ name: Prometheus Community
+ url: https://prometheus.io/community/
+ version: 0.0.1-undefined
+servers:
+ - url: /api/v1
+paths:
+ /query:
+ get:
+ tags:
+ - query
+ summary: Evaluate an instant query
+ operationId: query
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of metrics to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: time
+ in: query
+ description: The evaluation timestamp (optional, defaults to current time).
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: query
+ in: query
+ description: The PromQL query to execute.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: up
+ - name: timeout
+ in: query
+ description: Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 30s
+ - name: lookback_delta
+ in: query
+ description: Override the lookback period for this query. Optional.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 5m
+ - name: stats
+ in: query
+ description: When provided, include query statistics in the response. The special value 'all' enables more comprehensive statistics.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: all
+ responses:
+ "200":
+ description: Query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryOutputBody'
+ examples:
+ vectorResult:
+ summary: 'Instant vector query: up'
+ value: {"status": "success", "data": {"resultType": "vector", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "value": [1767436620, "1"]}, {"metric": {"__name__": "up", "env": "demo", "instance": "demo.prometheus.io:9093", "job": "alertmanager"}, "value": [1767436620, "1"]}]}}
+ scalarResult:
+ summary: 'Scalar query: scalar(42)'
+ value:
+ data:
+ result:
+ - 1767436620
+ - "42"
+ resultType: scalar
+ status: success
+ matrixResult:
+ summary: 'Range vector query: up[5m]'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767436320, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Evaluate an instant query
+ operationId: query-post
+ requestBody:
+ description: Submit an instant query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/QueryPostInputBody'
+ examples:
+ simpleQuery:
+ summary: Simple instant query
+ value:
+ query: up
+ queryWithTime:
+ summary: Query with specific timestamp
+ value:
+ query: up{job="prometheus"}
+ time: "2026-01-02T13:37:00.000Z"
+ queryWithLimit:
+ summary: Query with limit and statistics
+ value:
+ limit: 100
+ query: rate(prometheus_http_requests_total{handler="/api/v1/query"}[5m])
+ stats: all
+ required: true
+ responses:
+ "200":
+ description: Instant query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryOutputBody'
+ examples:
+ vectorResult:
+ summary: 'Instant vector query: up'
+ value: {"status": "success", "data": {"resultType": "vector", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "value": [1767436620, "1"]}, {"metric": {"__name__": "up", "env": "demo", "instance": "demo.prometheus.io:9093", "job": "alertmanager"}, "value": [1767436620, "1"]}]}}
+ scalarResult:
+ summary: 'Scalar query: scalar(42)'
+ value:
+ data:
+ result:
+ - 1767436620
+ - "42"
+ resultType: scalar
+ status: success
+ matrixResult:
+ summary: 'Range vector query: up[5m]'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767436320, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing instant query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /query_range:
+ get:
+ tags:
+ - query
+ summary: Evaluate a range query
+ operationId: query-range
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of metrics to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: start
+ in: query
+ description: The start time of the query.
+ required: true
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: The end time of the query.
+ required: true
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: step
+ in: query
+ description: The step size of the query.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 15s
+ - name: query
+ in: query
+ description: The query to execute.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: rate(prometheus_http_requests_total{handler="/api/v1/query"}[5m])
+ - name: timeout
+ in: query
+ description: Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 30s
+ - name: lookback_delta
+ in: query
+ description: Override the lookback period for this query. Optional.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 5m
+ - name: stats
+ in: query
+ description: When provided, include query statistics in the response. The special value 'all' enables more comprehensive statistics.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: all
+ responses:
+ "200":
+ description: Range query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryRangeOutputBody'
+ examples:
+ matrixResult:
+ summary: 'Range query: rate(prometheus_http_requests_total[5m])'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767433020, "1"], [1767434820, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing range query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Evaluate a range query
+ operationId: query-range-post
+ requestBody:
+ description: Submit a range query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/QueryRangePostInputBody'
+ examples:
+ basicRange:
+ summary: Basic range query
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ query: up
+ start: "2026-01-02T12:37:00.000Z"
+ step: 15s
+ rateQuery:
+ summary: Rate calculation over time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ query: rate(prometheus_http_requests_total{handler="/api/v1/query"}[5m])
+ start: "2026-01-02T12:37:00.000Z"
+ step: 30s
+ timeout: 30s
+ required: true
+ responses:
+ "200":
+ description: Range query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryRangeOutputBody'
+ examples:
+ matrixResult:
+ summary: 'Range query: rate(prometheus_http_requests_total[5m])'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767433020, "1"], [1767434820, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing range query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /query_exemplars:
+ get:
+ tags:
+ - query
+ summary: Query exemplars
+ operationId: query-exemplars
+ parameters:
+ - name: start
+ in: query
+ description: Start timestamp for exemplars query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for exemplars query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: query
+ in: query
+ description: PromQL query to extract exemplars for.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: prometheus_http_requests_total
+ responses:
+ "200":
+ description: Exemplars retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryExemplarsOutputBody'
+ examples:
+ exemplarsResult:
+ summary: Exemplars for a metric with trace IDs
+ value:
+ data:
+ - exemplars:
+ - labels:
+ traceID: abc123def456
+ timestamp: 1.689956451781e+09
+ value: "1.5"
+ seriesLabels:
+ __name__: http_requests_total
+ job: api-server
+ method: GET
+ status: success
+ default:
+ description: Error retrieving exemplars.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Query exemplars
+ operationId: query-exemplars-post
+ requestBody:
+ description: Submit an exemplars query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/QueryExemplarsPostInputBody'
+ examples:
+ basicExemplar:
+ summary: Query exemplars for a metric
+ value:
+ query: prometheus_http_requests_total
+ exemplarWithTimeRange:
+ summary: Exemplars within specific time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ query: prometheus_http_requests_total{job="prometheus"}
+ start: "2026-01-02T12:37:00.000Z"
+ required: true
+ responses:
+ "200":
+ description: Exemplars query completed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryExemplarsOutputBody'
+ examples:
+ exemplarsResult:
+ summary: Exemplars for a metric with trace IDs
+ value:
+ data:
+ - exemplars:
+ - labels:
+ traceID: abc123def456
+ timestamp: 1.689956451781e+09
+ value: "1.5"
+ seriesLabels:
+ __name__: http_requests_total
+ job: api-server
+ method: GET
+ status: success
+ default:
+ description: Error processing exemplars query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /format_query:
+ get:
+ tags:
+ - query
+ summary: Format a PromQL query
+ operationId: format-query
+ parameters:
+ - name: query
+ in: query
+ description: PromQL expression to format.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: sum(rate(http_requests_total[5m])) by (job)
+ responses:
+ "200":
+ description: Query formatted successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/FormatQueryOutputBody'
+ examples:
+ formattedQuery:
+ summary: Formatted PromQL query
+ value:
+ data: sum by(job, status) (rate(http_requests_total[5m]))
+ status: success
+ default:
+ description: Error formatting query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Format a PromQL query
+ operationId: format-query-post
+ requestBody:
+ description: Submit a PromQL query to format. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/FormatQueryPostInputBody'
+ examples:
+ simpleFormat:
+ summary: Format a simple query
+ value:
+ query: up{job="prometheus"}
+ complexFormat:
+ summary: Format a complex query
+ value:
+ query: sum(rate(http_requests_total[5m])) by (job, status)
+ required: true
+ responses:
+ "200":
+ description: Query formatting completed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/FormatQueryOutputBody'
+ examples:
+ formattedQuery:
+ summary: Formatted PromQL query
+ value:
+ data: sum by(job, status) (rate(http_requests_total[5m]))
+ status: success
+ default:
+ description: Error formatting query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /parse_query:
+ get:
+ tags:
+ - query
+ summary: Parse a PromQL query
+ operationId: parse-query
+ parameters:
+ - name: query
+ in: query
+ description: PromQL expression to parse.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: up{job="prometheus"}
+ responses:
+ "200":
+ description: Query parsed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ParseQueryOutputBody'
+ examples:
+ parsedQuery:
+ summary: Parsed PromQL expression tree
+ value:
+ data:
+ resultType: vector
+ status: success
+ default:
+ description: Error parsing query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Parse a PromQL query
+ operationId: parse-query-post
+ requestBody:
+ description: Submit a PromQL query to parse. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/ParseQueryPostInputBody'
+ examples:
+ simpleParse:
+ summary: Parse a simple query
+ value:
+ query: up
+ complexParse:
+ summary: Parse a complex query
+ value:
+ query: rate(http_requests_total{job="api"}[5m])
+ required: true
+ responses:
+ "200":
+ description: Query parsed successfully via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ParseQueryOutputBody'
+ examples:
+ parsedQuery:
+ summary: Parsed PromQL expression tree
+ value:
+ data:
+ resultType: vector
+ status: success
+ default:
+ description: Error parsing query via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /labels:
+ get:
+ tags:
+ - labels
+ summary: Get label names
+ operationId: labels
+ parameters:
+ - name: start
+ in: query
+ description: Start timestamp for label names query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for label names query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: match[]
+ in: query
+ description: Series selector argument.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{job="prometheus"}'
+ - name: limit
+ in: query
+ description: Maximum number of label names to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ responses:
+ "200":
+ description: Label names retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/LabelsOutputBody'
+ examples:
+ labelNames:
+ summary: List of label names
+ value:
+ data:
+ - __name__
+ - active
+ - address
+ - alertmanager
+ - alertname
+ - alertstate
+ - backend
+ - branch
+ - code
+ - collector
+ - component
+ - device
+ - env
+ - endpoint
+ - fstype
+ - handler
+ - instance
+ - job
+ - le
+ - method
+ - mode
+ - name
+ status: success
+ default:
+ description: Error retrieving label names.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - labels
+ summary: Get label names
+ operationId: labels-post
+ requestBody:
+ description: Submit a label names query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/LabelsPostInputBody'
+ examples:
+ allLabels:
+ summary: Get all label names
+ value: {}
+ labelsWithTimeRange:
+ summary: Get label names within time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ start: "2026-01-02T12:37:00.000Z"
+ labelsWithMatch:
+ summary: Get label names matching series selector
+ value:
+ match[]:
+ - up
+ - process_start_time_seconds{job="prometheus"}
+ required: true
+ responses:
+ "200":
+ description: Label names retrieved successfully via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/LabelsOutputBody'
+ examples:
+ labelNames:
+ summary: List of label names
+ value:
+ data:
+ - __name__
+ - active
+ - address
+ - alertmanager
+ - alertname
+ - alertstate
+ - backend
+ - branch
+ - code
+ - collector
+ - component
+ - device
+ - env
+ - endpoint
+ - fstype
+ - handler
+ - instance
+ - job
+ - le
+ - method
+ - mode
+ - name
+ status: success
+ default:
+ description: Error retrieving label names via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /label/{name}/values:
+ get:
+ tags:
+ - labels
+ summary: Get label values
+ operationId: label-values
+ parameters:
+ - name: name
+ in: path
+ description: Label name.
+ required: true
+ schema:
+ type: string
+ - name: start
+ in: query
+ description: Start timestamp for label values query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for label values query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: match[]
+ in: query
+ description: Series selector argument.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{job="prometheus"}'
+ - name: limit
+ in: query
+ description: Maximum number of label values to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 1000
+ responses:
+ "200":
+ description: Label values retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/LabelValuesOutputBody'
+ examples:
+ labelValues:
+ summary: List of values for a label
+ value:
+ data:
+ - alertmanager
+ - blackbox
+ - caddy
+ - cadvisor
+ - grafana
+ - node
+ - prometheus
+ - random
+ status: success
+ default:
+ description: Error retrieving label values.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /series:
+ get:
+ tags:
+ - series
+ summary: Find series by label matchers
+ operationId: series
+ parameters:
+ - name: start
+ in: query
+ description: Start timestamp for series query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for series query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: match[]
+ in: query
+ description: Series selector argument.
+ required: true
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{job="prometheus"}'
+ - name: limit
+ in: query
+ description: Maximum number of series to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ responses:
+ "200":
+ description: Series returned matching the provided label matchers.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SeriesOutputBody'
+ examples:
+ seriesList:
+ summary: List of series matching the selector
+ value:
+ data:
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:8080
+ job: cadvisor
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9093
+ job: alertmanager
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9100
+ job: node
+ - __name__: up
+ instance: demo.prometheus.io:3000
+ job: grafana
+ - __name__: up
+ instance: demo.prometheus.io:8996
+ job: random
+ status: success
+ default:
+ description: Error retrieving series.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - series
+ summary: Find series by label matchers
+ operationId: series-post
+ requestBody:
+ description: Submit a series query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/SeriesPostInputBody'
+ examples:
+ seriesMatch:
+ summary: Find series by label matchers
+ value:
+ match[]:
+ - up
+ seriesWithTimeRange:
+ summary: Find series with time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ match[]:
+ - up
+ - process_cpu_seconds_total{job="prometheus"}
+ start: "2026-01-02T12:37:00.000Z"
+ required: true
+ responses:
+ "200":
+ description: Series returned matching the provided label matchers via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SeriesOutputBody'
+ examples:
+ seriesList:
+ summary: List of series matching the selector
+ value:
+ data:
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:8080
+ job: cadvisor
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9093
+ job: alertmanager
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9100
+ job: node
+ - __name__: up
+ instance: demo.prometheus.io:3000
+ job: grafana
+ - __name__: up
+ instance: demo.prometheus.io:8996
+ job: random
+ status: success
+ default:
+ description: Error retrieving series via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ delete:
+ tags:
+ - series
+ summary: Delete series
+ description: 'Delete series matching selectors. Note: This is deprecated, use POST /admin/tsdb/delete_series instead.'
+ operationId: delete-series
+ responses:
+ "200":
+ description: Series marked for deletion.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SeriesDeleteOutputBody'
+ examples:
+ seriesDeleted:
+ summary: Series marked for deletion
+ value:
+ status: success
+ default:
+ description: Error deleting series.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /metadata:
+ get:
+ tags:
+ - metadata
+ summary: Get metadata
+ operationId: get-metadata
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of metrics to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: limit_per_metric
+ in: query
+ description: The maximum number of metadata entries per metric.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 10
+ - name: metric
+ in: query
+ description: A metric name to filter metadata for.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: http_requests_total
+ responses:
+ "200":
+ description: Metric metadata retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/MetadataOutputBody'
+ examples:
+ metricMetadata:
+ summary: Metadata for metrics
+ value:
+ data:
+ go_gc_stack_starting_size_bytes:
+ - help: The stack size of new goroutines. Sourced from /gc/stack/starting-size:bytes.
+ type: gauge
+ unit: ""
+ prometheus_rule_group_iterations_missed_total:
+ - help: The total number of rule group evaluations missed due to slow rule group evaluation.
+ type: counter
+ unit: ""
+ prometheus_sd_updates_total:
+ - help: Total number of update events sent to the SD consumers.
+ type: counter
+ unit: ""
+ status: success
+ default:
+ description: Error retrieving metadata.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /scrape_pools:
+ get:
+ tags:
+ - targets
+ summary: Get scrape pools
+ operationId: get-scrape-pools
+ responses:
+ "200":
+ description: Scrape pools retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ScrapePoolsOutputBody'
+ examples:
+ scrapePoolsList:
+ summary: List of scrape pool names
+ value:
+ data:
+ scrapePools:
+ - alertmanager
+ - blackbox
+ - caddy
+ - cadvisor
+ - grafana
+ - node
+ - prometheus
+ - random
+ status: success
+ default:
+ description: Error retrieving scrape pools.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /targets:
+ get:
+ tags:
+ - targets
+ summary: Get targets
+ operationId: get-targets
+ parameters:
+ - name: scrapePool
+ in: query
+ description: Filter targets by scrape pool name.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: prometheus
+ - name: state
+ in: query
+ description: 'Filter by state: active, dropped, or any.'
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: active
+ responses:
+ "200":
+ description: Target discovery information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/TargetsOutputBody'
+ examples:
+ targetsList:
+ summary: Active and dropped targets
+ value:
+ data:
+ activeTargets:
+ - discoveredLabels:
+ __address__: demo.prometheus.io:9093
+ __meta_filepath: /etc/prometheus/file_sd/alertmanager.yml
+ __metrics_path__: /metrics
+ __scheme__: http
+ env: demo
+ job: alertmanager
+ globalUrl: http://demo.prometheus.io:9093/metrics
+ health: up
+ labels:
+ env: demo
+ instance: demo.prometheus.io:9093
+ job: alertmanager
+ lastError: ""
+ lastScrape: "2026-01-02T13:36:40.200Z"
+ lastScrapeDuration: 0.006576866
+ scrapeInterval: 15s
+ scrapePool: alertmanager
+ scrapeTimeout: 10s
+ scrapeUrl: http://demo.prometheus.io:9093/metrics
+ droppedTargetCounts:
+ alertmanager: 0
+ blackbox: 0
+ caddy: 0
+ cadvisor: 0
+ grafana: 0
+ node: 0
+ prometheus: 0
+ random: 0
+ droppedTargets: []
+ status: success
+ default:
+ description: Error retrieving targets.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /targets/metadata:
+ get:
+ tags:
+ - targets
+ summary: Get targets metadata
+ operationId: get-targets-metadata
+ parameters:
+ - name: match_target
+ in: query
+ description: Label selector to filter targets.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: '{job="prometheus"}'
+ - name: metric
+ in: query
+ description: Metric name to retrieve metadata for.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: http_requests_total
+ - name: limit
+ in: query
+ description: Maximum number of targets to match.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 10
+ responses:
+ "200":
+ description: Target metadata retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/TargetMetadataOutputBody'
+ examples:
+ targetMetadata:
+ summary: Metadata for targets
+ value:
+ data:
+ - help: The current health status of the target
+ metric: up
+ target:
+ instance: localhost:9090
+ job: prometheus
+ type: gauge
+ unit: ""
+ status: success
+ default:
+ description: Error retrieving target metadata.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /targets/relabel_steps:
+ get:
+ tags:
+ - targets
+ summary: Get targets relabel steps
+ operationId: get-targets-relabel-steps
+ parameters:
+ - name: scrapePool
+ in: query
+ description: Name of the scrape pool.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: prometheus
+ - name: labels
+ in: query
+ description: JSON-encoded labels to apply relabel rules to.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: '{"__address__":"localhost:9090","job":"prometheus"}'
+ responses:
+ "200":
+ description: Relabel steps retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/TargetRelabelStepsOutputBody'
+ examples:
+ relabelSteps:
+ summary: Relabel steps for a target
+ value:
+ data:
+ steps:
+ - keep: true
+ output:
+ __address__: localhost:9090
+ instance: localhost:9090
+ job: prometheus
+ rule:
+ action: replace
+ regex: (.*)
+ replacement: $1
+ source_labels:
+ - __address__
+ target_label: instance
+ status: success
+ default:
+ description: Error retrieving relabel steps.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /rules:
+ get:
+ tags:
+ - rules
+ summary: Get alerting and recording rules
+ operationId: rules
+ parameters:
+ - name: type
+ in: query
+ description: 'Filter by rule type: alert or record.'
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: alert
+ - name: rule_name[]
+ in: query
+ description: Filter by rule name.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - HighErrorRate
+ - name: rule_group[]
+ in: query
+ description: Filter by rule group name.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - example_alerts
+ - name: file[]
+ in: query
+ description: Filter by file path.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - /etc/prometheus/rules.yml
+ - name: match[]
+ in: query
+ description: Label matchers to filter rules.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{severity="critical"}'
+ - name: exclude_alerts
+ in: query
+ description: Exclude active alerts from response.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: "false"
+ - name: group_limit
+ in: query
+ description: Maximum number of rule groups to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: group_next_token
+ in: query
+ description: Pagination token for next page.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: abc123
+ responses:
+ "200":
+ description: Rules retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/RulesOutputBody'
+ examples:
+ ruleGroups:
+ summary: Alerting and recording rules
+ value:
+ data:
+ groups:
+ - evaluationTime: 0.000561635
+ file: /etc/prometheus/rules/ansible_managed.yml
+ interval: 15
+ lastEvaluation: "2026-01-02T13:36:56.874Z"
+ limit: 0
+ name: ansible managed alert rules
+ rules:
+ - annotations:
+ description: This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the "DeadMansSnitch" integration in PagerDuty.
+ summary: Ensure entire alerting pipeline is functional
+ duration: 600
+ evaluationTime: 0.000356688
+ health: ok
+ keepFiringFor: 0
+ labels:
+ severity: warning
+ lastEvaluation: "2026-01-02T13:36:56.874Z"
+ name: Watchdog
+ query: vector(1)
+ state: firing
+ type: alerting
+ status: success
+ default:
+ description: Error retrieving rules.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /alerts:
+ get:
+ tags:
+ - alerts
+ summary: Get active alerts
+ operationId: alerts
+ responses:
+ "200":
+ description: Active alerts retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AlertsOutputBody'
+ examples:
+ activeAlerts:
+ summary: Currently active alerts
+ value:
+ data:
+ alerts:
+ - activeAt: "2026-01-02T13:30:00.000Z"
+ annotations:
+ description: This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the "DeadMansSnitch" integration in PagerDuty.
+ summary: Ensure entire alerting pipeline is functional
+ labels:
+ alertname: Watchdog
+ severity: warning
+ state: firing
+ value: "1e+00"
+ status: success
+ default:
+ description: Error retrieving alerts.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /alertmanagers:
+ get:
+ tags:
+ - alerts
+ summary: Get Alertmanager discovery
+ operationId: alertmanagers
+ responses:
+ "200":
+ description: Alertmanager targets retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AlertmanagersOutputBody'
+ examples:
+ alertmanagerDiscovery:
+ summary: Alertmanager discovery results
+ value:
+ data:
+ activeAlertmanagers:
+ - url: http://demo.prometheus.io:9093/api/v2/alerts
+ droppedAlertmanagers: []
+ status: success
+ default:
+ description: Error retrieving Alertmanager targets.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/config:
+ get:
+ tags:
+ - status
+ summary: Get status config
+ operationId: get-status-config
+ responses:
+ "200":
+ description: Configuration retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusConfigOutputBody'
+ examples:
+ configYAML:
+ summary: Prometheus configuration
+ value:
+ data:
+ yaml: |
+ global:
+ scrape_interval: 15s
+ scrape_timeout: 10s
+ evaluation_interval: 15s
+ external_labels:
+ environment: demo-prometheus-io
+ alerting:
+ alertmanagers:
+ - scheme: http
+ static_configs:
+ - targets:
+ - demo.prometheus.io:9093
+ rule_files:
+ - /etc/prometheus/rules/*.yml
+ status: success
+ default:
+ description: Error retrieving configuration.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/runtimeinfo:
+ get:
+ tags:
+ - status
+ summary: Get status runtimeinfo
+ operationId: get-status-runtimeinfo
+ responses:
+ "200":
+ description: Runtime information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusRuntimeInfoOutputBody'
+ examples:
+ runtimeInfo:
+ summary: Runtime information
+ value:
+ data:
+ CWD: /
+ GODEBUG: ""
+ GOGC: "75"
+ GOMAXPROCS: 2
+ GOMEMLIMIT: 3703818240
+ corruptionCount: 0
+ goroutineCount: 88
+ hostname: demo-prometheus-io
+ lastConfigTime: "2026-01-01T13:37:00.000Z"
+ reloadConfigSuccess: true
+ serverTime: "2026-01-02T13:37:00.000Z"
+ startTime: "2026-01-01T13:37:00.000Z"
+ storageRetention: 31d
+ status: success
+ default:
+ description: Error retrieving runtime information.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/buildinfo:
+ get:
+ tags:
+ - status
+ summary: Get status buildinfo
+ operationId: get-status-buildinfo
+ responses:
+ "200":
+ description: Build information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusBuildInfoOutputBody'
+ examples:
+ buildInfo:
+ summary: Build information
+ value:
+ data:
+ branch: HEAD
+ buildDate: 20251030-07:26:10
+ buildUser: root@08c890a84441
+ goVersion: go1.25.3
+ revision: 0a41f0000705c69ab8e0f9a723fc73e39ed62b07
+ version: 3.7.3
+ status: success
+ default:
+ description: Error retrieving build information.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/flags:
+ get:
+ tags:
+ - status
+ summary: Get status flags
+ operationId: get-status-flags
+ responses:
+ "200":
+ description: Command-line flags retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusFlagsOutputBody'
+ examples:
+ flags:
+ summary: Command-line flags
+ value:
+ data:
+ agent: "false"
+ alertmanager.notification-queue-capacity: "10000"
+ config.file: /etc/prometheus/prometheus.yml
+ enable-feature: exemplar-storage,native-histograms
+ query.max-concurrency: "20"
+ query.timeout: 2m
+ storage.tsdb.path: /prometheus
+ storage.tsdb.retention.time: 15d
+ web.console.libraries: /usr/share/prometheus/console_libraries
+ web.console.templates: /usr/share/prometheus/consoles
+ web.enable-admin-api: "true"
+ web.enable-lifecycle: "true"
+ web.listen-address: 0.0.0.0:9090
+ web.page-title: Prometheus Time Series Collection and Processing Server
+ status: success
+ default:
+ description: Error retrieving flags.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/tsdb:
+ get:
+ tags:
+ - status
+ summary: Get TSDB status
+ operationId: status-tsdb
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of items to return per category.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 10
+ responses:
+ "200":
+ description: TSDB status retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusTSDBOutputBody'
+ examples:
+ tsdbStats:
+ summary: TSDB statistics
+ value:
+ data:
+ headStats:
+ chunkCount: 37525
+ maxTime: 1767436620000
+ minTime: 1767362400712
+ numLabelPairs: 2512
+ numSeries: 9925
+ labelValueCountByLabelName:
+ - name: __name__
+ value: 5
+ - name: job
+ value: 3
+ memoryInBytesByLabelName:
+ - name: __name__
+ value: 1024
+ - name: job
+ value: 512
+ seriesCountByLabelValuePair:
+ - name: job=prometheus
+ value: 100
+ - name: instance=localhost:9090
+ value: 100
+ seriesCountByMetricName:
+ - name: up
+ value: 100
+ - name: http_requests_total
+ value: 500
+ status: success
+ default:
+ description: Error retrieving TSDB status.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/tsdb/blocks:
+ get:
+ tags:
+ - status
+ summary: Get TSDB blocks information
+ operationId: status-tsdb-blocks
+ responses:
+ "200":
+ description: TSDB blocks information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusTSDBBlocksOutputBody'
+ examples:
+ tsdbBlocks:
+ summary: TSDB block information
+ value:
+ data:
+ blocks:
+ - compaction:
+ level: 4
+ sources:
+ - 01KBCJ7TR8A4QAJ3AA1J651P5S
+ - 01KBCS3J0E34567YPB8Y5W0E24
+ - 01KBCZZ9KRTYGG3E7HVQFGC3S3
+ maxTime: 1764763200000
+ minTime: 1764568801099
+ stats:
+ numChunks: 1073962
+ numSamples: 129505582
+ numSeries: 10661
+ ulid: 01KC4D6GXQA4CRHYKV78NEBVAE
+ version: 1
+ status: success
+ default:
+ description: Error retrieving TSDB blocks.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/walreplay:
+ get:
+ tags:
+ - status
+ summary: Get status walreplay
+ operationId: get-status-walreplay
+ responses:
+ "200":
+ description: WAL replay status retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusWALReplayOutputBody'
+ examples:
+ walReplay:
+ summary: WAL replay status
+ value:
+ data:
+ current: 3214
+ max: 3214
+ min: 3209
+ status: success
+ default:
+ description: Error retrieving WAL replay status.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /admin/tsdb/delete_series:
+ put:
+ tags:
+ - admin
+ summary: Delete series matching selectors via PUT
+ description: Deletes data for a selection of series in a time range using PUT method.
+ operationId: deleteSeriesPut
+ parameters:
+ - name: match[]
+ in: query
+ description: Series selectors to identify series to delete.
+ required: true
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{__name__=~"test.*"}'
+ - name: start
+ in: query
+ description: Start timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ responses:
+ "200":
+ description: Series deleted successfully via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/DeleteSeriesOutputBody'
+ examples:
+ deletionSuccess:
+ summary: Successful series deletion
+ value:
+ status: success
+ default:
+ description: Error deleting series via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - admin
+ summary: Delete series matching selectors
+ description: Deletes data for a selection of series in a time range.
+ operationId: deleteSeriesPost
+ parameters:
+ - name: match[]
+ in: query
+ description: Series selectors to identify series to delete.
+ required: true
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{__name__=~"test.*"}'
+ - name: start
+ in: query
+ description: Start timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ responses:
+ "200":
+ description: Series deleted successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/DeleteSeriesOutputBody'
+ examples:
+ deletionSuccess:
+ summary: Successful series deletion
+ value:
+ status: success
+ default:
+ description: Error deleting series.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /admin/tsdb/clean_tombstones:
+ put:
+ tags:
+ - admin
+ summary: Clean tombstones in the TSDB via PUT
+ description: Removes deleted data from disk and cleans up existing tombstones using PUT method.
+ operationId: cleanTombstonesPut
+ responses:
+ "200":
+ description: Tombstones cleaned successfully via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/CleanTombstonesOutputBody'
+ examples:
+ tombstonesCleaned:
+ summary: Tombstones cleaned successfully
+ value:
+ status: success
+ default:
+ description: Error cleaning tombstones via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - admin
+ summary: Clean tombstones in the TSDB
+ description: Removes deleted data from disk and cleans up existing tombstones.
+ operationId: cleanTombstonesPost
+ responses:
+ "200":
+ description: Tombstones cleaned successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/CleanTombstonesOutputBody'
+ examples:
+ tombstonesCleaned:
+ summary: Tombstones cleaned successfully
+ value:
+ status: success
+ default:
+ description: Error cleaning tombstones.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /admin/tsdb/snapshot:
+ put:
+ tags:
+ - admin
+ summary: Create a snapshot of the TSDB via PUT
+ description: Creates a snapshot of all current data using PUT method.
+ operationId: snapshotPut
+ parameters:
+ - name: skip_head
+ in: query
+ description: If true, do not snapshot data in the head block.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: "false"
+ responses:
+ "200":
+ description: Snapshot created successfully via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SnapshotOutputBody'
+ examples:
+ snapshotCreated:
+ summary: Snapshot created successfully
+ value:
+ data:
+ name: 20260102T133700Z-a1b2c3d4e5f67890
+ status: success
+ default:
+ description: Error creating snapshot via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - admin
+ summary: Create a snapshot of the TSDB
+ description: Creates a snapshot of all current data.
+ operationId: snapshotPost
+ parameters:
+ - name: skip_head
+ in: query
+ description: If true, do not snapshot data in the head block.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: "false"
+ responses:
+ "200":
+ description: Snapshot created successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SnapshotOutputBody'
+ examples:
+ snapshotCreated:
+ summary: Snapshot created successfully
+ value:
+ data:
+ name: 20260102T133700Z-a1b2c3d4e5f67890
+ status: success
+ default:
+ description: Error creating snapshot.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /read:
+ post:
+ tags:
+ - remote
+ summary: Remote read endpoint
+ description: Prometheus remote read endpoint for federated queries. Accepts and returns Protocol Buffer encoded data.
+ operationId: remoteRead
+ responses:
+ "204":
+ description: No Content
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /write:
+ post:
+ tags:
+ - remote
+ summary: Remote write endpoint
+ description: Prometheus remote write endpoint for sending metrics. Accepts Protocol Buffer encoded write requests.
+ operationId: remoteWrite
+ responses:
+ "204":
+ description: No Content
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /otlp/v1/metrics:
+ post:
+ tags:
+ - otlp
+ summary: OTLP metrics write endpoint
+ description: OpenTelemetry Protocol metrics ingestion endpoint. Accepts OTLP/HTTP metrics in Protocol Buffer format.
+ operationId: otlpWrite
+ responses:
+ "204":
+ description: No Content
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /notifications:
+ get:
+ tags:
+ - notifications
+ summary: Get notifications
+ operationId: get-notifications
+ responses:
+ "200":
+ description: Notifications retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/NotificationsOutputBody'
+ examples:
+ notifications:
+ summary: Server notifications
+ value:
+ data:
+ - active: true
+ date: "2026-01-02T16:14:50.046Z"
+ text: Configuration reload has failed.
+ status: success
+ default:
+ description: Error retrieving notifications.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /features:
+ get:
+ tags:
+ - features
+ summary: Get features
+ operationId: get-features
+ responses:
+ "200":
+ description: Feature flags retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/FeaturesOutputBody'
+ examples:
+ enabledFeatures:
+ summary: Enabled feature flags
+ value:
+ data:
+ - exemplar-storage
+ - remote-write-receiver
+ status: success
+ default:
+ description: Error retrieving features.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+components:
+ schemas:
+ Error:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ errorType:
+ type: string
+ description: Type of error that occurred.
+ example: bad_data
+ error:
+ type: string
+ description: Human-readable error message.
+ example: invalid parameter
+ required:
+ - status
+ - errorType
+ - error
+ additionalProperties: false
+ description: Error response.
+ Labels:
+ type: object
+ additionalProperties: true
+ description: Label set represented as a key-value map.
+ QueryOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/QueryData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for instant query.
+ QueryRangeOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/QueryData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for range query.
+ QueryPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The PromQL query to execute.'
+ example: up
+ time:
+ type: string
+ description: 'Form field: The evaluation timestamp (optional, defaults to current time).'
+ example: "2023-07-21T20:10:51.781Z"
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of metrics to return.'
+ example: 100
+ timeout:
+ type: string
+ description: 'Form field: Evaluation timeout (optional, defaults to and is capped by the value of the -query.timeout flag).'
+ example: 30s
+ lookback_delta:
+ type: string
+ description: 'Form field: Override the lookback period for this query (optional).'
+ example: 5m
+ stats:
+ type: string
+ description: 'Form field: When provided, include query statistics in the response (the special value ''all'' enables more comprehensive statistics).'
+ example: all
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for instant query.
+ QueryRangePostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to execute.'
+ example: rate(http_requests_total[5m])
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:10:30.781Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T20:20:30.781Z"
+ step:
+ type: string
+ description: 'Form field: The step size of the query.'
+ example: 15s
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of metrics to return.'
+ example: 100
+ timeout:
+ type: string
+ description: 'Form field: Evaluation timeout (optional, defaults to and is capped by the value of the -query.timeout flag).'
+ example: 30s
+ lookback_delta:
+ type: string
+ description: 'Form field: Override the lookback period for this query (optional).'
+ example: 5m
+ stats:
+ type: string
+ description: 'Form field: When provided, include query statistics in the response (the special value ''all'' enables more comprehensive statistics).'
+ example: all
+ required:
+ - query
+ - start
+ - end
+ - step
+ additionalProperties: false
+ description: POST request body for range query.
+ QueryExemplarsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+ QueryExemplarsPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to execute.'
+ example: http_requests_total
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:00:00.000Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T21:00:00.000Z"
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for exemplars query.
+ FormatQueryOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: string
+ description: Formatted query string.
+ example: sum by(status) (rate(http_requests_total[5m]))
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for format query endpoint.
+ FormatQueryPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to format.'
+ example: sum(rate(http_requests_total[5m])) by (status)
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for format query.
+ ParseQueryOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+ ParseQueryPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to parse.'
+ example: sum(rate(http_requests_total[5m]))
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for parse query.
+ QueryData:
+ anyOf:
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - vector
+ result:
+ type: array
+ items:
+ anyOf:
+ - $ref: '#/components/schemas/FloatSample'
+ - $ref: '#/components/schemas/HistogramSample'
+ description: Array of samples (either float or histogram).
+ stats:
+ $ref: '#/components/schemas/QueryStats'
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - matrix
+ result:
+ type: array
+ items:
+ anyOf:
+ - $ref: '#/components/schemas/FloatSeries'
+ - $ref: '#/components/schemas/HistogramSeries'
+ description: Array of time series (either float or histogram).
+ stats:
+ $ref: '#/components/schemas/QueryStats'
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - scalar
+ result:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ maxItems: 2
+ minItems: 2
+ description: Scalar value as [timestamp, stringValue].
+ stats:
+ $ref: '#/components/schemas/QueryStats'
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - string
+ result:
+ type: array
+ items:
+ type: string
+ maxItems: 2
+ minItems: 2
+ description: String value as [timestamp, stringValue].
+ stats:
+ $ref: '#/components/schemas/QueryStats'
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ description: Query result data. The structure of 'result' depends on 'resultType'.
+ example:
+ result:
+ - metric:
+ __name__: up
+ job: prometheus
+ value:
+ - 1627845600
+ - "1"
+ resultType: vector
+ QueryStats:
+ type: object
+ properties:
+ timings:
+ type: object
+ properties:
+ evalTotalTime:
+ type: number
+ description: Total evaluation time in seconds.
+ resultSortTime:
+ type: number
+ description: Time spent sorting results in seconds.
+ queryPreparationTime:
+ type: number
+ description: Query preparation time in seconds.
+ innerEvalTime:
+ type: number
+ description: Inner evaluation time in seconds.
+ execQueueTime:
+ type: number
+ description: Execution queue wait time in seconds.
+ execTotalTime:
+ type: number
+ description: Total execution time in seconds.
+ samples:
+ type: object
+ properties:
+ totalQueryableSamples:
+ type: integer
+ description: Total number of samples that were queryable.
+ peakSamples:
+ type: integer
+ description: Peak number of samples in memory.
+ totalQueryableSamplesPerStep:
+ type: array
+ items:
+ type: array
+ items:
+ type: number
+ maxItems: 2
+ minItems: 2
+ description: Timestamp and sample count as [timestamp, count].
+ description: Total queryable samples per step (only included with stats=all).
+ description: Query execution statistics (included when the stats query parameter is provided).
+ FloatSample:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ value:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ maxItems: 2
+ minItems: 2
+ description: Timestamp and float value as [unixTimestamp, stringValue].
+ example:
+ - 1767436620
+ - "1"
+ required:
+ - metric
+ - value
+ additionalProperties: false
+ description: A sample with a float value.
+ HistogramSample:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ histogram:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - $ref: '#/components/schemas/HistogramValue'
+ maxItems: 2
+ minItems: 2
+ description: Timestamp and histogram value as [unixTimestamp, histogramObject].
+ example:
+ - 1767436620
+ - buckets: []
+ count: "60"
+ sum: "120"
+ required:
+ - metric
+ - histogram
+ additionalProperties: false
+ description: A sample with a native histogram value.
+ FloatSeries:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ values:
+ type: array
+ items:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ maxItems: 2
+ minItems: 2
+ description: Array of [timestamp, stringValue] pairs for float values.
+ required:
+ - metric
+ - values
+ additionalProperties: false
+ description: A time series with float values.
+ HistogramSeries:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ histograms:
+ type: array
+ items:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - $ref: '#/components/schemas/HistogramValue'
+ maxItems: 2
+ minItems: 2
+ description: Array of [timestamp, histogramObject] pairs for histogram values.
+ required:
+ - metric
+ - histograms
+ additionalProperties: false
+ description: A time series with native histogram values.
+ HistogramValue:
+ type: object
+ properties:
+ count:
+ type: string
+ description: Total count of observations.
+ sum:
+ type: string
+ description: Sum of all observed values.
+ buckets:
+ type: array
+ items:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ description: Histogram buckets as [boundary_rule, lower, upper, count].
+ required:
+ - count
+ - sum
+ additionalProperties: false
+ description: Native histogram value representation.
+ LabelsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ type: string
+ example:
+ - __name__
+ - job
+ - instance
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of strings.
+ LabelsPostInputBody:
+ type: object
+ properties:
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:00:00.000Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T21:00:00.000Z"
+ match[]:
+ type: array
+ items:
+ type: string
+ description: 'Form field: Series selector argument that selects the series from which to read the label names.'
+ example:
+ - '{job="prometheus"}'
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of label names to return.'
+ example: 100
+ additionalProperties: false
+ description: POST request body for labels query.
+ LabelValuesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ type: string
+ example:
+ - __name__
+ - job
+ - instance
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of strings.
+ SeriesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/Labels'
+ example:
+ - __name__: up
+ instance: localhost:9090
+ job: prometheus
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of label sets.
+ SeriesPostInputBody:
+ type: object
+ properties:
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:00:00.000Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T21:00:00.000Z"
+ match[]:
+ type: array
+ items:
+ type: string
+ description: 'Form field: Series selector argument that selects the series to return.'
+ example:
+ - '{job="prometheus"}'
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of series to return.'
+ example: 100
+ required:
+ - match[]
+ additionalProperties: false
+ description: POST request body for series query.
+ SeriesDeleteOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+ Metadata:
+ type: object
+ properties:
+ type:
+ type: string
+ description: Metric type (counter, gauge, histogram, summary, or untyped).
+ unit:
+ type: string
+ description: Unit of the metric.
+ help:
+ type: string
+ description: Help text describing the metric.
+ required:
+ - type
+ - unit
+ - help
+ additionalProperties: false
+ description: Metric metadata.
+ MetadataOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: object
+ additionalProperties:
+ type: array
+ items:
+ $ref: '#/components/schemas/Metadata'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for metadata endpoint.
+ MetricMetadata:
+ type: object
+ properties:
+ target:
+ $ref: '#/components/schemas/Labels'
+ metric:
+ type: string
+ description: Metric name.
+ type:
+ type: string
+ description: Metric type (counter, gauge, histogram, summary, or untyped).
+ help:
+ type: string
+ description: Help text describing the metric.
+ unit:
+ type: string
+ description: Unit of the metric.
+ required:
+ - target
+ - type
+ - help
+ - unit
+ additionalProperties: false
+ description: Target metric metadata.
+ Target:
+ type: object
+ properties:
+ discoveredLabels:
+ $ref: '#/components/schemas/Labels'
+ labels:
+ $ref: '#/components/schemas/Labels'
+ scrapePool:
+ type: string
+ description: Name of the scrape pool.
+ scrapeUrl:
+ type: string
+ description: URL of the target.
+ globalUrl:
+ type: string
+ description: Global URL of the target.
+ lastError:
+ type: string
+ description: Last error message from scraping.
+ lastScrape:
+ type: string
+ format: date-time
+ description: Timestamp of the last scrape.
+ lastScrapeDuration:
+ type: number
+ format: double
+ description: Duration of the last scrape in seconds.
+ health:
+ type: string
+ description: Health status of the target (up, down, or unknown).
+ scrapeInterval:
+ type: string
+ description: Scrape interval for this target.
+ scrapeTimeout:
+ type: string
+ description: Scrape timeout for this target.
+ required:
+ - discoveredLabels
+ - labels
+ - scrapePool
+ - scrapeUrl
+ - globalUrl
+ - lastError
+ - lastScrape
+ - lastScrapeDuration
+ - health
+ - scrapeInterval
+ - scrapeTimeout
+ additionalProperties: false
+ description: Scrape target information.
+ DroppedTarget:
+ type: object
+ properties:
+ discoveredLabels:
+ $ref: '#/components/schemas/Labels'
+ scrapePool:
+ type: string
+ description: Name of the scrape pool.
+ required:
+ - discoveredLabels
+ - scrapePool
+ additionalProperties: false
+ description: Dropped target information.
+ TargetDiscovery:
+ type: object
+ properties:
+ activeTargets:
+ type: array
+ items:
+ $ref: '#/components/schemas/Target'
+ droppedTargets:
+ type: array
+ items:
+ $ref: '#/components/schemas/DroppedTarget'
+ droppedTargetCounts:
+ type: object
+ additionalProperties:
+ type: integer
+ format: int64
+ required:
+ - activeTargets
+ - droppedTargets
+ - droppedTargetCounts
+ additionalProperties: false
+ description: Target discovery information including active and dropped targets.
+ TargetsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/TargetDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for targets endpoint.
+ TargetMetadataOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/MetricMetadata'
+ example:
+ - help: The current health status of the target
+ metric: up
+ target:
+ instance: localhost:9090
+ job: prometheus
+ type: gauge
+ unit: ""
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of metric metadata.
+ ScrapePoolsDiscovery:
+ type: object
+ properties:
+ scrapePools:
+ type: array
+ items:
+ type: string
+ required:
+ - scrapePools
+ additionalProperties: false
+ description: List of all configured scrape pools.
+ ScrapePoolsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/ScrapePoolsDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for scrape pools endpoint.
+ Config:
+ type: object
+ properties:
+ source_labels:
+ type: array
+ items:
+ type: string
+ description: Source labels for relabeling.
+ separator:
+ type: string
+ description: Separator for source label values.
+ regex:
+ type: string
+ description: Regular expression for matching.
+ modulus:
+ type: integer
+ format: int64
+ description: Modulus for hash-based relabeling.
+ target_label:
+ type: string
+ description: Target label name.
+ replacement:
+ type: string
+ description: Replacement value.
+ action:
+ type: string
+ description: Relabel action.
+ additionalProperties: false
+ description: Relabel configuration.
+ RelabelStep:
+ type: object
+ properties:
+ rule:
+ $ref: '#/components/schemas/Config'
+ output:
+ $ref: '#/components/schemas/Labels'
+ keep:
+ type: boolean
+ required:
+ - rule
+ - output
+ - keep
+ additionalProperties: false
+ description: Relabel step showing the rule, output, and whether the target was kept.
+ RelabelStepsResponse:
+ type: object
+ properties:
+ steps:
+ type: array
+ items:
+ $ref: '#/components/schemas/RelabelStep'
+ required:
+ - steps
+ additionalProperties: false
+ description: Relabeling steps response.
+ TargetRelabelStepsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/RelabelStepsResponse'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for target relabel steps endpoint.
+ RuleGroup:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name of the rule group.
+ file:
+ type: string
+ description: File containing the rule group.
+ rules:
+ type: array
+ items:
+ type: object
+ description: Rule definition.
+ description: Rules in this group.
+ interval:
+ type: number
+ format: double
+ description: Evaluation interval in seconds.
+ limit:
+ type: integer
+ format: int64
+ description: Maximum number of alerts for this group.
+ evaluationTime:
+ type: number
+ format: double
+ description: Time taken to evaluate the group in seconds.
+ lastEvaluation:
+ type: string
+ format: date-time
+ description: Timestamp of the last evaluation.
+ required:
+ - name
+ - file
+ - rules
+ - interval
+ - limit
+ - evaluationTime
+ - lastEvaluation
+ additionalProperties: false
+ description: Rule group information.
+ RuleDiscovery:
+ type: object
+ properties:
+ groups:
+ type: array
+ items:
+ $ref: '#/components/schemas/RuleGroup'
+ groupNextToken:
+ type: string
+ description: Pagination token for the next page of groups.
+ required:
+ - groups
+ additionalProperties: false
+ description: Rule discovery information containing all rule groups.
+ RulesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/RuleDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for rules endpoint.
+ Alert:
+ type: object
+ properties:
+ labels:
+ $ref: '#/components/schemas/Labels'
+ annotations:
+ $ref: '#/components/schemas/Labels'
+ state:
+ type: string
+ description: State of the alert (pending, firing, or inactive).
+ value:
+ type: string
+ description: Value of the alert expression.
+ activeAt:
+ type: string
+ format: date-time
+ description: Timestamp when the alert became active.
+ keepFiringSince:
+ type: string
+ format: date-time
+ description: Timestamp since the alert has been kept firing.
+ required:
+ - labels
+ - annotations
+ - state
+ - value
+ additionalProperties: false
+ description: Alert information.
+ AlertDiscovery:
+ type: object
+ properties:
+ alerts:
+ type: array
+ items:
+ $ref: '#/components/schemas/Alert'
+ required:
+ - alerts
+ additionalProperties: false
+ description: Alert discovery information containing all active alerts.
+ AlertsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/AlertDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for alerts endpoint.
+ AlertmanagerTarget:
+ type: object
+ properties:
+ url:
+ type: string
+ description: URL of the Alertmanager instance.
+ required:
+ - url
+ additionalProperties: false
+ description: Alertmanager target information.
+ AlertmanagerDiscovery:
+ type: object
+ properties:
+ activeAlertmanagers:
+ type: array
+ items:
+ $ref: '#/components/schemas/AlertmanagerTarget'
+ droppedAlertmanagers:
+ type: array
+ items:
+ $ref: '#/components/schemas/AlertmanagerTarget'
+ required:
+ - activeAlertmanagers
+ - droppedAlertmanagers
+ additionalProperties: false
+ description: Alertmanager discovery information including active and dropped instances.
+ AlertmanagersOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/AlertmanagerDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for alertmanagers endpoint.
+ StatusConfigData:
+ type: object
+ properties:
+ yaml:
+ type: string
+ description: Prometheus configuration in YAML format.
+ required:
+ - yaml
+ additionalProperties: false
+ description: Prometheus configuration.
+ StatusConfigOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/StatusConfigData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status config endpoint.
+ RuntimeInfo:
+ type: object
+ properties:
+ startTime:
+ type: string
+ format: date-time
+ CWD:
+ type: string
+ hostname:
+ type: string
+ serverTime:
+ type: string
+ format: date-time
+ reloadConfigSuccess:
+ type: boolean
+ lastConfigTime:
+ type: string
+ format: date-time
+ corruptionCount:
+ type: integer
+ format: int64
+ goroutineCount:
+ type: integer
+ format: int64
+ GOMAXPROCS:
+ type: integer
+ format: int64
+ GOMEMLIMIT:
+ type: integer
+ format: int64
+ GOGC:
+ type: string
+ GODEBUG:
+ type: string
+ storageRetention:
+ type: string
+ required:
+ - startTime
+ - CWD
+ - hostname
+ - serverTime
+ - reloadConfigSuccess
+ - lastConfigTime
+ - corruptionCount
+ - goroutineCount
+ - GOMAXPROCS
+ - GOMEMLIMIT
+ - GOGC
+ - GODEBUG
+ - storageRetention
+ additionalProperties: false
+ description: Prometheus runtime information.
+ StatusRuntimeInfoOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/RuntimeInfo'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status runtime info endpoint.
+ PrometheusVersion:
+ type: object
+ properties:
+ version:
+ type: string
+ revision:
+ type: string
+ branch:
+ type: string
+ buildUser:
+ type: string
+ buildDate:
+ type: string
+ goVersion:
+ type: string
+ required:
+ - version
+ - revision
+ - branch
+ - buildUser
+ - buildDate
+ - goVersion
+ additionalProperties: false
+ description: Prometheus version information.
+ StatusBuildInfoOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/PrometheusVersion'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status build info endpoint.
+ StatusFlagsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: object
+ additionalProperties:
+ type: string
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status flags endpoint.
+ HeadStats:
+ type: object
+ properties:
+ numSeries:
+ type: integer
+ format: int64
+ numLabelPairs:
+ type: integer
+ format: int64
+ chunkCount:
+ type: integer
+ format: int64
+ minTime:
+ type: integer
+ format: int64
+ maxTime:
+ type: integer
+ format: int64
+ required:
+ - numSeries
+ - numLabelPairs
+ - chunkCount
+ - minTime
+ - maxTime
+ additionalProperties: false
+ description: TSDB head statistics.
+ TSDBStat:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: integer
+ format: int64
+ required:
+ - name
+ - value
+ additionalProperties: false
+ description: TSDB statistic.
+ TSDBStatus:
+ type: object
+ properties:
+ headStats:
+ $ref: '#/components/schemas/HeadStats'
+ seriesCountByMetricName:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ labelValueCountByLabelName:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ memoryInBytesByLabelName:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ seriesCountByLabelValuePair:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ required:
+ - headStats
+ - seriesCountByMetricName
+ - labelValueCountByLabelName
+ - memoryInBytesByLabelName
+ - seriesCountByLabelValuePair
+ additionalProperties: false
+ description: TSDB status information.
+ StatusTSDBOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/TSDBStatus'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status TSDB endpoint.
+ BlockDesc:
+ type: object
+ properties:
+ ulid:
+ type: string
+ minTime:
+ type: integer
+ format: int64
+ maxTime:
+ type: integer
+ format: int64
+ required:
+ - ulid
+ - minTime
+ - maxTime
+ additionalProperties: false
+ description: Block descriptor.
+ BlockStats:
+ type: object
+ properties:
+ numSamples:
+ type: integer
+ format: int64
+ numSeries:
+ type: integer
+ format: int64
+ numChunks:
+ type: integer
+ format: int64
+ numTombstones:
+ type: integer
+ format: int64
+ numFloatSamples:
+ type: integer
+ format: int64
+ numHistogramSamples:
+ type: integer
+ format: int64
+ additionalProperties: false
+ description: Block statistics.
+ BlockMetaCompaction:
+ type: object
+ properties:
+ level:
+ type: integer
+ format: int64
+ sources:
+ type: array
+ items:
+ type: string
+ parents:
+ type: array
+ items:
+ $ref: '#/components/schemas/BlockDesc'
+ failed:
+ type: boolean
+ deletable:
+ type: boolean
+ hints:
+ type: array
+ items:
+ type: string
+ required:
+ - level
+ additionalProperties: false
+ description: Block compaction metadata.
+ BlockMeta:
+ type: object
+ properties:
+ ulid:
+ type: string
+ minTime:
+ type: integer
+ format: int64
+ maxTime:
+ type: integer
+ format: int64
+ stats:
+ $ref: '#/components/schemas/BlockStats'
+ compaction:
+ $ref: '#/components/schemas/BlockMetaCompaction'
+ version:
+ type: integer
+ format: int64
+ required:
+ - ulid
+ - minTime
+ - maxTime
+ - compaction
+ - version
+ additionalProperties: false
+ description: Block metadata.
+ StatusTSDBBlocksData:
+ type: object
+ properties:
+ blocks:
+ type: array
+ items:
+ $ref: '#/components/schemas/BlockMeta'
+ required:
+ - blocks
+ additionalProperties: false
+ description: TSDB blocks information.
+ StatusTSDBBlocksOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/StatusTSDBBlocksData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status TSDB blocks endpoint.
+ StatusWALReplayData:
+ type: object
+ properties:
+ min:
+ type: integer
+ format: int64
+ max:
+ type: integer
+ format: int64
+ current:
+ type: integer
+ format: int64
+ required:
+ - min
+ - max
+ - current
+ additionalProperties: false
+ description: WAL replay status.
+ StatusWALReplayOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/StatusWALReplayData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status WAL replay endpoint.
+ DeleteSeriesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ additionalProperties: false
+ description: Response body containing only status.
+ CleanTombstonesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ additionalProperties: false
+ description: Response body containing only status.
+ DataStruct:
+ type: object
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ additionalProperties: false
+ description: Generic data structure with a name field.
+ SnapshotOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/DataStruct'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for snapshot endpoint.
+ Notification:
+ type: object
+ properties:
+ text:
+ type: string
+ date:
+ type: string
+ format: date-time
+ active:
+ type: boolean
+ required:
+ - text
+ - date
+ - active
+ additionalProperties: false
+ description: Server notification.
+ NotificationsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/Notification'
+ example:
+ - active: true
+ date: "2023-07-21T20:00:00.000Z"
+ text: Server is running
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of notifications.
+ FeaturesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+tags:
+ - name: query
+ description: Query and evaluate PromQL expressions.
+ - name: metadata
+ description: Retrieve metric metadata such as type and unit.
+ - name: labels
+ description: Query label names and values.
+ - name: series
+ description: Query and manage time series.
+ - name: targets
+ description: Retrieve target and scrape pool information.
+ - name: rules
+ description: Query recording and alerting rules.
+ - name: alerts
+ description: Query active alerts and alertmanager discovery.
+ - name: status
+ description: Retrieve server status and configuration.
+ - name: admin
+ description: Administrative operations for TSDB management.
+ - name: features
+ description: Query enabled features.
+ - name: remote
+ description: Remote read and write endpoints.
+ - name: otlp
+ description: OpenTelemetry Protocol metrics ingestion.
+ - name: notifications
+ description: Server notifications and events.
diff --git a/web/api/v1/testdata/openapi_3.2_golden.yaml b/web/api/v1/testdata/openapi_3.2_golden.yaml
new file mode 100644
index 0000000000..fa79fffecc
--- /dev/null
+++ b/web/api/v1/testdata/openapi_3.2_golden.yaml
@@ -0,0 +1,4504 @@
+openapi: 3.2.0
+info:
+ title: Prometheus API
+ description: Prometheus is an Open-Source monitoring system with a dimensional data model, flexible query language, efficient time series database and modern alerting approach.
+ contact:
+ name: Prometheus Community
+ url: https://prometheus.io/community/
+ version: 0.0.1-undefined
+servers:
+ - url: /api/v1
+paths:
+ /query:
+ get:
+ tags:
+ - query
+ summary: Evaluate an instant query
+ operationId: query
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of metrics to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: time
+ in: query
+ description: The evaluation timestamp (optional, defaults to current time).
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: query
+ in: query
+ description: The PromQL query to execute.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: up
+ - name: timeout
+ in: query
+ description: Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 30s
+ - name: lookback_delta
+ in: query
+ description: Override the lookback period for this query. Optional.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 5m
+ - name: stats
+ in: query
+ description: When provided, include query statistics in the response. The special value 'all' enables more comprehensive statistics.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: all
+ responses:
+ "200":
+ description: Query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryOutputBody'
+ examples:
+ vectorResult:
+ summary: 'Instant vector query: up'
+ value: {"status": "success", "data": {"resultType": "vector", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "value": [1767436620, "1"]}, {"metric": {"__name__": "up", "env": "demo", "instance": "demo.prometheus.io:9093", "job": "alertmanager"}, "value": [1767436620, "1"]}]}}
+ scalarResult:
+ summary: 'Scalar query: scalar(42)'
+ value:
+ data:
+ result:
+ - 1767436620
+ - "42"
+ resultType: scalar
+ status: success
+ matrixResult:
+ summary: 'Range vector query: up[5m]'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767436320, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Evaluate an instant query
+ operationId: query-post
+ requestBody:
+ description: Submit an instant query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/QueryPostInputBody'
+ examples:
+ simpleQuery:
+ summary: Simple instant query
+ value:
+ query: up
+ queryWithTime:
+ summary: Query with specific timestamp
+ value:
+ query: up{job="prometheus"}
+ time: "2026-01-02T13:37:00.000Z"
+ queryWithLimit:
+ summary: Query with limit and statistics
+ value:
+ limit: 100
+ query: rate(prometheus_http_requests_total{handler="/api/v1/query"}[5m])
+ stats: all
+ required: true
+ responses:
+ "200":
+ description: Instant query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryOutputBody'
+ examples:
+ vectorResult:
+ summary: 'Instant vector query: up'
+ value: {"status": "success", "data": {"resultType": "vector", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "value": [1767436620, "1"]}, {"metric": {"__name__": "up", "env": "demo", "instance": "demo.prometheus.io:9093", "job": "alertmanager"}, "value": [1767436620, "1"]}]}}
+ scalarResult:
+ summary: 'Scalar query: scalar(42)'
+ value:
+ data:
+ result:
+ - 1767436620
+ - "42"
+ resultType: scalar
+ status: success
+ matrixResult:
+ summary: 'Range vector query: up[5m]'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767436320, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing instant query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /query_range:
+ get:
+ tags:
+ - query
+ summary: Evaluate a range query
+ operationId: query-range
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of metrics to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: start
+ in: query
+ description: The start time of the query.
+ required: true
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: The end time of the query.
+ required: true
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: step
+ in: query
+ description: The step size of the query.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 15s
+ - name: query
+ in: query
+ description: The query to execute.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: rate(prometheus_http_requests_total{handler="/api/v1/query"}[5m])
+ - name: timeout
+ in: query
+ description: Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 30s
+ - name: lookback_delta
+ in: query
+ description: Override the lookback period for this query. Optional.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: 5m
+ - name: stats
+ in: query
+ description: When provided, include query statistics in the response. The special value 'all' enables more comprehensive statistics.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: all
+ responses:
+ "200":
+ description: Range query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryRangeOutputBody'
+ examples:
+ matrixResult:
+ summary: 'Range query: rate(prometheus_http_requests_total[5m])'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767433020, "1"], [1767434820, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing range query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Evaluate a range query
+ operationId: query-range-post
+ requestBody:
+ description: Submit a range query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/QueryRangePostInputBody'
+ examples:
+ basicRange:
+ summary: Basic range query
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ query: up
+ start: "2026-01-02T12:37:00.000Z"
+ step: 15s
+ rateQuery:
+ summary: Rate calculation over time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ query: rate(prometheus_http_requests_total{handler="/api/v1/query"}[5m])
+ start: "2026-01-02T12:37:00.000Z"
+ step: 30s
+ timeout: 30s
+ required: true
+ responses:
+ "200":
+ description: Range query executed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryRangeOutputBody'
+ examples:
+ matrixResult:
+ summary: 'Range query: rate(prometheus_http_requests_total[5m])'
+ value: {"status": "success", "data": {"resultType": "matrix", "result": [{"metric": {"__name__": "up", "instance": "demo.prometheus.io:9090", "job": "prometheus"}, "values": [[1767433020, "1"], [1767434820, "1"], [1767436620, "1"]]}]}}
+ default:
+ description: Error executing range query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /query_exemplars:
+ get:
+ tags:
+ - query
+ summary: Query exemplars
+ operationId: query-exemplars
+ parameters:
+ - name: start
+ in: query
+ description: Start timestamp for exemplars query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for exemplars query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: query
+ in: query
+ description: PromQL query to extract exemplars for.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: prometheus_http_requests_total
+ responses:
+ "200":
+ description: Exemplars retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryExemplarsOutputBody'
+ examples:
+ exemplarsResult:
+ summary: Exemplars for a metric with trace IDs
+ value:
+ data:
+ - exemplars:
+ - labels:
+ traceID: abc123def456
+ timestamp: 1.689956451781e+09
+ value: "1.5"
+ seriesLabels:
+ __name__: http_requests_total
+ job: api-server
+ method: GET
+ status: success
+ default:
+ description: Error retrieving exemplars.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Query exemplars
+ operationId: query-exemplars-post
+ requestBody:
+ description: Submit an exemplars query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/QueryExemplarsPostInputBody'
+ examples:
+ basicExemplar:
+ summary: Query exemplars for a metric
+ value:
+ query: prometheus_http_requests_total
+ exemplarWithTimeRange:
+ summary: Exemplars within specific time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ query: prometheus_http_requests_total{job="prometheus"}
+ start: "2026-01-02T12:37:00.000Z"
+ required: true
+ responses:
+ "200":
+ description: Exemplars query completed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/QueryExemplarsOutputBody'
+ examples:
+ exemplarsResult:
+ summary: Exemplars for a metric with trace IDs
+ value:
+ data:
+ - exemplars:
+ - labels:
+ traceID: abc123def456
+ timestamp: 1.689956451781e+09
+ value: "1.5"
+ seriesLabels:
+ __name__: http_requests_total
+ job: api-server
+ method: GET
+ status: success
+ default:
+ description: Error processing exemplars query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /format_query:
+ get:
+ tags:
+ - query
+ summary: Format a PromQL query
+ operationId: format-query
+ parameters:
+ - name: query
+ in: query
+ description: PromQL expression to format.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: sum(rate(http_requests_total[5m])) by (job)
+ responses:
+ "200":
+ description: Query formatted successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/FormatQueryOutputBody'
+ examples:
+ formattedQuery:
+ summary: Formatted PromQL query
+ value:
+ data: sum by(job, status) (rate(http_requests_total[5m]))
+ status: success
+ default:
+ description: Error formatting query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Format a PromQL query
+ operationId: format-query-post
+ requestBody:
+ description: Submit a PromQL query to format. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/FormatQueryPostInputBody'
+ examples:
+ simpleFormat:
+ summary: Format a simple query
+ value:
+ query: up{job="prometheus"}
+ complexFormat:
+ summary: Format a complex query
+ value:
+ query: sum(rate(http_requests_total[5m])) by (job, status)
+ required: true
+ responses:
+ "200":
+ description: Query formatting completed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/FormatQueryOutputBody'
+ examples:
+ formattedQuery:
+ summary: Formatted PromQL query
+ value:
+ data: sum by(job, status) (rate(http_requests_total[5m]))
+ status: success
+ default:
+ description: Error formatting query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /parse_query:
+ get:
+ tags:
+ - query
+ summary: Parse a PromQL query
+ operationId: parse-query
+ parameters:
+ - name: query
+ in: query
+ description: PromQL expression to parse.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: up{job="prometheus"}
+ responses:
+ "200":
+ description: Query parsed successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ParseQueryOutputBody'
+ examples:
+ parsedQuery:
+ summary: Parsed PromQL expression tree
+ value:
+ data:
+ resultType: vector
+ status: success
+ default:
+ description: Error parsing query.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - query
+ summary: Parse a PromQL query
+ operationId: parse-query-post
+ requestBody:
+ description: Submit a PromQL query to parse. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/ParseQueryPostInputBody'
+ examples:
+ simpleParse:
+ summary: Parse a simple query
+ value:
+ query: up
+ complexParse:
+ summary: Parse a complex query
+ value:
+ query: rate(http_requests_total{job="api"}[5m])
+ required: true
+ responses:
+ "200":
+ description: Query parsed successfully via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ParseQueryOutputBody'
+ examples:
+ parsedQuery:
+ summary: Parsed PromQL expression tree
+ value:
+ data:
+ resultType: vector
+ status: success
+ default:
+ description: Error parsing query via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /labels:
+ get:
+ tags:
+ - labels
+ summary: Get label names
+ operationId: labels
+ parameters:
+ - name: start
+ in: query
+ description: Start timestamp for label names query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for label names query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: match[]
+ in: query
+ description: Series selector argument.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{job="prometheus"}'
+ - name: limit
+ in: query
+ description: Maximum number of label names to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ responses:
+ "200":
+ description: Label names retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/LabelsOutputBody'
+ examples:
+ labelNames:
+ summary: List of label names
+ value:
+ data:
+ - __name__
+ - active
+ - address
+ - alertmanager
+ - alertname
+ - alertstate
+ - backend
+ - branch
+ - code
+ - collector
+ - component
+ - device
+ - env
+ - endpoint
+ - fstype
+ - handler
+ - instance
+ - job
+ - le
+ - method
+ - mode
+ - name
+ status: success
+ default:
+ description: Error retrieving label names.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - labels
+ summary: Get label names
+ operationId: labels-post
+ requestBody:
+ description: Submit a label names query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/LabelsPostInputBody'
+ examples:
+ allLabels:
+ summary: Get all label names
+ value: {}
+ labelsWithTimeRange:
+ summary: Get label names within time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ start: "2026-01-02T12:37:00.000Z"
+ labelsWithMatch:
+ summary: Get label names matching series selector
+ value:
+ match[]:
+ - up
+ - process_start_time_seconds{job="prometheus"}
+ required: true
+ responses:
+ "200":
+ description: Label names retrieved successfully via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/LabelsOutputBody'
+ examples:
+ labelNames:
+ summary: List of label names
+ value:
+ data:
+ - __name__
+ - active
+ - address
+ - alertmanager
+ - alertname
+ - alertstate
+ - backend
+ - branch
+ - code
+ - collector
+ - component
+ - device
+ - env
+ - endpoint
+ - fstype
+ - handler
+ - instance
+ - job
+ - le
+ - method
+ - mode
+ - name
+ status: success
+ default:
+ description: Error retrieving label names via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /label/{name}/values:
+ get:
+ tags:
+ - labels
+ summary: Get label values
+ operationId: label-values
+ parameters:
+ - name: name
+ in: path
+ description: Label name.
+ required: true
+ schema:
+ type: string
+ - name: start
+ in: query
+ description: Start timestamp for label values query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for label values query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: match[]
+ in: query
+ description: Series selector argument.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{job="prometheus"}'
+ - name: limit
+ in: query
+ description: Maximum number of label values to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 1000
+ responses:
+ "200":
+ description: Label values retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/LabelValuesOutputBody'
+ examples:
+ labelValues:
+ summary: List of values for a label
+ value:
+ data:
+ - alertmanager
+ - blackbox
+ - caddy
+ - cadvisor
+ - grafana
+ - node
+ - prometheus
+ - random
+ status: success
+ default:
+ description: Error retrieving label values.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /series:
+ get:
+ tags:
+ - series
+ summary: Find series by label matchers
+ operationId: series
+ parameters:
+ - name: start
+ in: query
+ description: Start timestamp for series query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for series query.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ - name: match[]
+ in: query
+ description: Series selector argument.
+ required: true
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{job="prometheus"}'
+ - name: limit
+ in: query
+ description: Maximum number of series to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ responses:
+ "200":
+ description: Series returned matching the provided label matchers.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SeriesOutputBody'
+ examples:
+ seriesList:
+ summary: List of series matching the selector
+ value:
+ data:
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:8080
+ job: cadvisor
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9093
+ job: alertmanager
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9100
+ job: node
+ - __name__: up
+ instance: demo.prometheus.io:3000
+ job: grafana
+ - __name__: up
+ instance: demo.prometheus.io:8996
+ job: random
+ status: success
+ default:
+ description: Error retrieving series.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - series
+ summary: Find series by label matchers
+ operationId: series-post
+ requestBody:
+ description: Submit a series query. This endpoint accepts the same parameters as the GET version.
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/SeriesPostInputBody'
+ examples:
+ seriesMatch:
+ summary: Find series by label matchers
+ value:
+ match[]:
+ - up
+ seriesWithTimeRange:
+ summary: Find series with time range
+ value:
+ end: "2026-01-02T13:37:00.000Z"
+ match[]:
+ - up
+ - process_cpu_seconds_total{job="prometheus"}
+ start: "2026-01-02T12:37:00.000Z"
+ required: true
+ responses:
+ "200":
+ description: Series returned matching the provided label matchers via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SeriesOutputBody'
+ examples:
+ seriesList:
+ summary: List of series matching the selector
+ value:
+ data:
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:8080
+ job: cadvisor
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9093
+ job: alertmanager
+ - __name__: up
+ env: demo
+ instance: demo.prometheus.io:9100
+ job: node
+ - __name__: up
+ instance: demo.prometheus.io:3000
+ job: grafana
+ - __name__: up
+ instance: demo.prometheus.io:8996
+ job: random
+ status: success
+ default:
+ description: Error retrieving series via POST.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ delete:
+ tags:
+ - series
+ summary: Delete series
+ description: 'Delete series matching selectors. Note: This is deprecated, use POST /admin/tsdb/delete_series instead.'
+ operationId: delete-series
+ responses:
+ "200":
+ description: Series marked for deletion.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SeriesDeleteOutputBody'
+ examples:
+ seriesDeleted:
+ summary: Series marked for deletion
+ value:
+ status: success
+ default:
+ description: Error deleting series.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /metadata:
+ get:
+ tags:
+ - metadata
+ summary: Get metadata
+ operationId: get-metadata
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of metrics to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: limit_per_metric
+ in: query
+ description: The maximum number of metadata entries per metric.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 10
+ - name: metric
+ in: query
+ description: A metric name to filter metadata for.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: http_requests_total
+ responses:
+ "200":
+ description: Metric metadata retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/MetadataOutputBody'
+ examples:
+ metricMetadata:
+ summary: Metadata for metrics
+ value:
+ data:
+ go_gc_stack_starting_size_bytes:
+ - help: The stack size of new goroutines. Sourced from /gc/stack/starting-size:bytes.
+ type: gauge
+ unit: ""
+ prometheus_rule_group_iterations_missed_total:
+ - help: The total number of rule group evaluations missed due to slow rule group evaluation.
+ type: counter
+ unit: ""
+ prometheus_sd_updates_total:
+ - help: Total number of update events sent to the SD consumers.
+ type: counter
+ unit: ""
+ status: success
+ default:
+ description: Error retrieving metadata.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /scrape_pools:
+ get:
+ tags:
+ - targets
+ summary: Get scrape pools
+ operationId: get-scrape-pools
+ responses:
+ "200":
+ description: Scrape pools retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ScrapePoolsOutputBody'
+ examples:
+ scrapePoolsList:
+ summary: List of scrape pool names
+ value:
+ data:
+ scrapePools:
+ - alertmanager
+ - blackbox
+ - caddy
+ - cadvisor
+ - grafana
+ - node
+ - prometheus
+ - random
+ status: success
+ default:
+ description: Error retrieving scrape pools.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /targets:
+ get:
+ tags:
+ - targets
+ summary: Get targets
+ operationId: get-targets
+ parameters:
+ - name: scrapePool
+ in: query
+ description: Filter targets by scrape pool name.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: prometheus
+ - name: state
+ in: query
+ description: 'Filter by state: active, dropped, or any.'
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: active
+ responses:
+ "200":
+ description: Target discovery information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/TargetsOutputBody'
+ examples:
+ targetsList:
+ summary: Active and dropped targets
+ value:
+ data:
+ activeTargets:
+ - discoveredLabels:
+ __address__: demo.prometheus.io:9093
+ __meta_filepath: /etc/prometheus/file_sd/alertmanager.yml
+ __metrics_path__: /metrics
+ __scheme__: http
+ env: demo
+ job: alertmanager
+ globalUrl: http://demo.prometheus.io:9093/metrics
+ health: up
+ labels:
+ env: demo
+ instance: demo.prometheus.io:9093
+ job: alertmanager
+ lastError: ""
+ lastScrape: "2026-01-02T13:36:40.200Z"
+ lastScrapeDuration: 0.006576866
+ scrapeInterval: 15s
+ scrapePool: alertmanager
+ scrapeTimeout: 10s
+ scrapeUrl: http://demo.prometheus.io:9093/metrics
+ droppedTargetCounts:
+ alertmanager: 0
+ blackbox: 0
+ caddy: 0
+ cadvisor: 0
+ grafana: 0
+ node: 0
+ prometheus: 0
+ random: 0
+ droppedTargets: []
+ status: success
+ default:
+ description: Error retrieving targets.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /targets/metadata:
+ get:
+ tags:
+ - targets
+ summary: Get targets metadata
+ operationId: get-targets-metadata
+ parameters:
+ - name: match_target
+ in: query
+ description: Label selector to filter targets.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: '{job="prometheus"}'
+ - name: metric
+ in: query
+ description: Metric name to retrieve metadata for.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: http_requests_total
+ - name: limit
+ in: query
+ description: Maximum number of targets to match.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 10
+ responses:
+ "200":
+ description: Target metadata retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/TargetMetadataOutputBody'
+ examples:
+ targetMetadata:
+ summary: Metadata for targets
+ value:
+ data:
+ - help: The current health status of the target
+ metric: up
+ target:
+ instance: localhost:9090
+ job: prometheus
+ type: gauge
+ unit: ""
+ status: success
+ default:
+ description: Error retrieving target metadata.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /targets/relabel_steps:
+ get:
+ tags:
+ - targets
+ summary: Get targets relabel steps
+ operationId: get-targets-relabel-steps
+ parameters:
+ - name: scrapePool
+ in: query
+ description: Name of the scrape pool.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: prometheus
+ - name: labels
+ in: query
+ description: JSON-encoded labels to apply relabel rules to.
+ required: true
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: '{"__address__":"localhost:9090","job":"prometheus"}'
+ responses:
+ "200":
+ description: Relabel steps retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/TargetRelabelStepsOutputBody'
+ examples:
+ relabelSteps:
+ summary: Relabel steps for a target
+ value:
+ data:
+ steps:
+ - keep: true
+ output:
+ __address__: localhost:9090
+ instance: localhost:9090
+ job: prometheus
+ rule:
+ action: replace
+ regex: (.*)
+ replacement: $1
+ source_labels:
+ - __address__
+ target_label: instance
+ status: success
+ default:
+ description: Error retrieving relabel steps.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /rules:
+ get:
+ tags:
+ - rules
+ summary: Get alerting and recording rules
+ operationId: rules
+ parameters:
+ - name: type
+ in: query
+ description: 'Filter by rule type: alert or record.'
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: alert
+ - name: rule_name[]
+ in: query
+ description: Filter by rule name.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - HighErrorRate
+ - name: rule_group[]
+ in: query
+ description: Filter by rule group name.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - example_alerts
+ - name: file[]
+ in: query
+ description: Filter by file path.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - /etc/prometheus/rules.yml
+ - name: match[]
+ in: query
+ description: Label matchers to filter rules.
+ required: false
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{severity="critical"}'
+ - name: exclude_alerts
+ in: query
+ description: Exclude active alerts from response.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: "false"
+ - name: group_limit
+ in: query
+ description: Maximum number of rule groups to return.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 100
+ - name: group_next_token
+ in: query
+ description: Pagination token for next page.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: abc123
+ responses:
+ "200":
+ description: Rules retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/RulesOutputBody'
+ examples:
+ ruleGroups:
+ summary: Alerting and recording rules
+ value:
+ data:
+ groups:
+ - evaluationTime: 0.000561635
+ file: /etc/prometheus/rules/ansible_managed.yml
+ interval: 15
+ lastEvaluation: "2026-01-02T13:36:56.874Z"
+ limit: 0
+ name: ansible managed alert rules
+ rules:
+ - annotations:
+ description: This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the "DeadMansSnitch" integration in PagerDuty.
+ summary: Ensure entire alerting pipeline is functional
+ duration: 600
+ evaluationTime: 0.000356688
+ health: ok
+ keepFiringFor: 0
+ labels:
+ severity: warning
+ lastEvaluation: "2026-01-02T13:36:56.874Z"
+ name: Watchdog
+ query: vector(1)
+ state: firing
+ type: alerting
+ status: success
+ default:
+ description: Error retrieving rules.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /alerts:
+ get:
+ tags:
+ - alerts
+ summary: Get active alerts
+ operationId: alerts
+ responses:
+ "200":
+ description: Active alerts retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AlertsOutputBody'
+ examples:
+ activeAlerts:
+ summary: Currently active alerts
+ value:
+ data:
+ alerts:
+ - activeAt: "2026-01-02T13:30:00.000Z"
+ annotations:
+ description: This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the "DeadMansSnitch" integration in PagerDuty.
+ summary: Ensure entire alerting pipeline is functional
+ labels:
+ alertname: Watchdog
+ severity: warning
+ state: firing
+ value: "1e+00"
+ status: success
+ default:
+ description: Error retrieving alerts.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /alertmanagers:
+ get:
+ tags:
+ - alerts
+ summary: Get Alertmanager discovery
+ operationId: alertmanagers
+ responses:
+ "200":
+ description: Alertmanager targets retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AlertmanagersOutputBody'
+ examples:
+ alertmanagerDiscovery:
+ summary: Alertmanager discovery results
+ value:
+ data:
+ activeAlertmanagers:
+ - url: http://demo.prometheus.io:9093/api/v2/alerts
+ droppedAlertmanagers: []
+ status: success
+ default:
+ description: Error retrieving Alertmanager targets.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/config:
+ get:
+ tags:
+ - status
+ summary: Get status config
+ operationId: get-status-config
+ responses:
+ "200":
+ description: Configuration retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusConfigOutputBody'
+ examples:
+ configYAML:
+ summary: Prometheus configuration
+ value:
+ data:
+ yaml: |
+ global:
+ scrape_interval: 15s
+ scrape_timeout: 10s
+ evaluation_interval: 15s
+ external_labels:
+ environment: demo-prometheus-io
+ alerting:
+ alertmanagers:
+ - scheme: http
+ static_configs:
+ - targets:
+ - demo.prometheus.io:9093
+ rule_files:
+ - /etc/prometheus/rules/*.yml
+ status: success
+ default:
+ description: Error retrieving configuration.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/runtimeinfo:
+ get:
+ tags:
+ - status
+ summary: Get status runtimeinfo
+ operationId: get-status-runtimeinfo
+ responses:
+ "200":
+ description: Runtime information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusRuntimeInfoOutputBody'
+ examples:
+ runtimeInfo:
+ summary: Runtime information
+ value:
+ data:
+ CWD: /
+ GODEBUG: ""
+ GOGC: "75"
+ GOMAXPROCS: 2
+ GOMEMLIMIT: 3703818240
+ corruptionCount: 0
+ goroutineCount: 88
+ hostname: demo-prometheus-io
+ lastConfigTime: "2026-01-01T13:37:00.000Z"
+ reloadConfigSuccess: true
+ serverTime: "2026-01-02T13:37:00.000Z"
+ startTime: "2026-01-01T13:37:00.000Z"
+ storageRetention: 31d
+ status: success
+ default:
+ description: Error retrieving runtime information.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/buildinfo:
+ get:
+ tags:
+ - status
+ summary: Get status buildinfo
+ operationId: get-status-buildinfo
+ responses:
+ "200":
+ description: Build information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusBuildInfoOutputBody'
+ examples:
+ buildInfo:
+ summary: Build information
+ value:
+ data:
+ branch: HEAD
+ buildDate: 20251030-07:26:10
+ buildUser: root@08c890a84441
+ goVersion: go1.25.3
+ revision: 0a41f0000705c69ab8e0f9a723fc73e39ed62b07
+ version: 3.7.3
+ status: success
+ default:
+ description: Error retrieving build information.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/flags:
+ get:
+ tags:
+ - status
+ summary: Get status flags
+ operationId: get-status-flags
+ responses:
+ "200":
+ description: Command-line flags retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusFlagsOutputBody'
+ examples:
+ flags:
+ summary: Command-line flags
+ value:
+ data:
+ agent: "false"
+ alertmanager.notification-queue-capacity: "10000"
+ config.file: /etc/prometheus/prometheus.yml
+ enable-feature: exemplar-storage,native-histograms
+ query.max-concurrency: "20"
+ query.timeout: 2m
+ storage.tsdb.path: /prometheus
+ storage.tsdb.retention.time: 15d
+ web.console.libraries: /usr/share/prometheus/console_libraries
+ web.console.templates: /usr/share/prometheus/consoles
+ web.enable-admin-api: "true"
+ web.enable-lifecycle: "true"
+ web.listen-address: 0.0.0.0:9090
+ web.page-title: Prometheus Time Series Collection and Processing Server
+ status: success
+ default:
+ description: Error retrieving flags.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/tsdb:
+ get:
+ tags:
+ - status
+ summary: Get TSDB status
+ operationId: status-tsdb
+ parameters:
+ - name: limit
+ in: query
+ description: The maximum number of items to return per category.
+ required: false
+ explode: false
+ schema:
+ type: integer
+ format: int64
+ examples:
+ example:
+ value: 10
+ responses:
+ "200":
+ description: TSDB status retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusTSDBOutputBody'
+ examples:
+ tsdbStats:
+ summary: TSDB statistics
+ value:
+ data:
+ headStats:
+ chunkCount: 37525
+ maxTime: 1767436620000
+ minTime: 1767362400712
+ numLabelPairs: 2512
+ numSeries: 9925
+ labelValueCountByLabelName:
+ - name: __name__
+ value: 5
+ - name: job
+ value: 3
+ memoryInBytesByLabelName:
+ - name: __name__
+ value: 1024
+ - name: job
+ value: 512
+ seriesCountByLabelValuePair:
+ - name: job=prometheus
+ value: 100
+ - name: instance=localhost:9090
+ value: 100
+ seriesCountByMetricName:
+ - name: up
+ value: 100
+ - name: http_requests_total
+ value: 500
+ status: success
+ default:
+ description: Error retrieving TSDB status.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/tsdb/blocks:
+ get:
+ tags:
+ - status
+ summary: Get TSDB blocks information
+ operationId: status-tsdb-blocks
+ responses:
+ "200":
+ description: TSDB blocks information retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusTSDBBlocksOutputBody'
+ examples:
+ tsdbBlocks:
+ summary: TSDB block information
+ value:
+ data:
+ blocks:
+ - compaction:
+ level: 4
+ sources:
+ - 01KBCJ7TR8A4QAJ3AA1J651P5S
+ - 01KBCS3J0E34567YPB8Y5W0E24
+ - 01KBCZZ9KRTYGG3E7HVQFGC3S3
+ maxTime: 1764763200000
+ minTime: 1764568801099
+ stats:
+ numChunks: 1073962
+ numSamples: 129505582
+ numSeries: 10661
+ ulid: 01KC4D6GXQA4CRHYKV78NEBVAE
+ version: 1
+ status: success
+ default:
+ description: Error retrieving TSDB blocks.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /status/walreplay:
+ get:
+ tags:
+ - status
+ summary: Get status walreplay
+ operationId: get-status-walreplay
+ responses:
+ "200":
+ description: WAL replay status retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/StatusWALReplayOutputBody'
+ examples:
+ walReplay:
+ summary: WAL replay status
+ value:
+ data:
+ current: 3214
+ max: 3214
+ min: 3209
+ status: success
+ default:
+ description: Error retrieving WAL replay status.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /admin/tsdb/delete_series:
+ put:
+ tags:
+ - admin
+ summary: Delete series matching selectors via PUT
+ description: Deletes data for a selection of series in a time range using PUT method.
+ operationId: deleteSeriesPut
+ parameters:
+ - name: match[]
+ in: query
+ description: Series selectors to identify series to delete.
+ required: true
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{__name__=~"test.*"}'
+ - name: start
+ in: query
+ description: Start timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ responses:
+ "200":
+ description: Series deleted successfully via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/DeleteSeriesOutputBody'
+ examples:
+ deletionSuccess:
+ summary: Successful series deletion
+ value:
+ status: success
+ default:
+ description: Error deleting series via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - admin
+ summary: Delete series matching selectors
+ description: Deletes data for a selection of series in a time range.
+ operationId: deleteSeriesPost
+ parameters:
+ - name: match[]
+ in: query
+ description: Series selectors to identify series to delete.
+ required: true
+ explode: false
+ schema:
+ type: array
+ items:
+ type: string
+ examples:
+ example:
+ value:
+ - '{__name__=~"test.*"}'
+ - name: start
+ in: query
+ description: Start timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T12:37:00Z"
+ epoch:
+ value: 1767357420
+ - name: end
+ in: query
+ description: End timestamp for deletion.
+ required: false
+ explode: false
+ schema:
+ oneOf:
+ - type: string
+ format: date-time
+ description: RFC3339 timestamp.
+ - type: number
+ format: unixtime
+ description: Unix timestamp in seconds.
+ description: Timestamp in RFC3339 format or Unix timestamp in seconds.
+ examples:
+ RFC3339:
+ value: "2026-01-02T13:37:00Z"
+ epoch:
+ value: 1767361020
+ responses:
+ "200":
+ description: Series deleted successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/DeleteSeriesOutputBody'
+ examples:
+ deletionSuccess:
+ summary: Successful series deletion
+ value:
+ status: success
+ default:
+ description: Error deleting series.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /admin/tsdb/clean_tombstones:
+ put:
+ tags:
+ - admin
+ summary: Clean tombstones in the TSDB via PUT
+ description: Removes deleted data from disk and cleans up existing tombstones using PUT method.
+ operationId: cleanTombstonesPut
+ responses:
+ "200":
+ description: Tombstones cleaned successfully via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/CleanTombstonesOutputBody'
+ examples:
+ tombstonesCleaned:
+ summary: Tombstones cleaned successfully
+ value:
+ status: success
+ default:
+ description: Error cleaning tombstones via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - admin
+ summary: Clean tombstones in the TSDB
+ description: Removes deleted data from disk and cleans up existing tombstones.
+ operationId: cleanTombstonesPost
+ responses:
+ "200":
+ description: Tombstones cleaned successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/CleanTombstonesOutputBody'
+ examples:
+ tombstonesCleaned:
+ summary: Tombstones cleaned successfully
+ value:
+ status: success
+ default:
+ description: Error cleaning tombstones.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /admin/tsdb/snapshot:
+ put:
+ tags:
+ - admin
+ summary: Create a snapshot of the TSDB via PUT
+ description: Creates a snapshot of all current data using PUT method.
+ operationId: snapshotPut
+ parameters:
+ - name: skip_head
+ in: query
+ description: If true, do not snapshot data in the head block.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: "false"
+ responses:
+ "200":
+ description: Snapshot created successfully via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SnapshotOutputBody'
+ examples:
+ snapshotCreated:
+ summary: Snapshot created successfully
+ value:
+ data:
+ name: 20260102T133700Z-a1b2c3d4e5f67890
+ status: success
+ default:
+ description: Error creating snapshot via PUT.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ post:
+ tags:
+ - admin
+ summary: Create a snapshot of the TSDB
+ description: Creates a snapshot of all current data.
+ operationId: snapshotPost
+ parameters:
+ - name: skip_head
+ in: query
+ description: If true, do not snapshot data in the head block.
+ required: false
+ explode: false
+ schema:
+ type: string
+ examples:
+ example:
+ value: "false"
+ responses:
+ "200":
+ description: Snapshot created successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SnapshotOutputBody'
+ examples:
+ snapshotCreated:
+ summary: Snapshot created successfully
+ value:
+ data:
+ name: 20260102T133700Z-a1b2c3d4e5f67890
+ status: success
+ default:
+ description: Error creating snapshot.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /read:
+ post:
+ tags:
+ - remote
+ summary: Remote read endpoint
+ description: Prometheus remote read endpoint for federated queries. Accepts and returns Protocol Buffer encoded data.
+ operationId: remoteRead
+ responses:
+ "204":
+ description: No Content
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /write:
+ post:
+ tags:
+ - remote
+ summary: Remote write endpoint
+ description: Prometheus remote write endpoint for sending metrics. Accepts Protocol Buffer encoded write requests.
+ operationId: remoteWrite
+ responses:
+ "204":
+ description: No Content
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /otlp/v1/metrics:
+ post:
+ tags:
+ - otlp
+ summary: OTLP metrics write endpoint
+ description: OpenTelemetry Protocol metrics ingestion endpoint. Accepts OTLP/HTTP metrics in Protocol Buffer format.
+ operationId: otlpWrite
+ responses:
+ "204":
+ description: No Content
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /notifications:
+ get:
+ tags:
+ - notifications
+ summary: Get notifications
+ operationId: get-notifications
+ responses:
+ "200":
+ description: Notifications retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/NotificationsOutputBody'
+ examples:
+ notifications:
+ summary: Server notifications
+ value:
+ data:
+ - active: true
+ date: "2026-01-02T16:14:50.046Z"
+ text: Configuration reload has failed.
+ status: success
+ default:
+ description: Error retrieving notifications.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+ /notifications/live:
+ get:
+ tags:
+ - notifications
+ summary: Stream live notifications via Server-Sent Events
+ description: Subscribe to real-time server notifications using SSE. Each event contains a JSON-encoded Notification object in the data field.
+ operationId: notifications-live
+ responses:
+ "200":
+ description: Server-sent events stream established.
+ content:
+ text/event-stream:
+ itemSchema:
+ type: object
+ properties:
+ data:
+ type: string
+ contentSchema:
+ $ref: '#/components/schemas/Notification'
+ description: SSE data field containing JSON-encoded notification.
+ contentMediaType: application/json
+ title: Server Sent Event Message
+ required:
+ - data
+ additionalProperties: false
+ description: A single SSE message. The data field contains a JSON-encoded Notification object.
+ examples:
+ activeNotification:
+ summary: Active notification SSE message
+ description: An SSE message containing an active server notification.
+ value:
+ data: '{"text":"Configuration reload has failed.","date":"2026-01-02T16:14:50.046Z","active":true}'
+ default:
+ description: Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ /features:
+ get:
+ tags:
+ - features
+ summary: Get features
+ operationId: get-features
+ responses:
+ "200":
+ description: Feature flags retrieved successfully.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/FeaturesOutputBody'
+ examples:
+ enabledFeatures:
+ summary: Enabled feature flags
+ value:
+ data:
+ - exemplar-storage
+ - remote-write-receiver
+ status: success
+ default:
+ description: Error retrieving features.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Error'
+ examples:
+ tsdbNotReady:
+ summary: TSDB not ready
+ value:
+ error: TSDB not ready
+ errorType: internal
+ status: error
+components:
+ schemas:
+ Error:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ errorType:
+ type: string
+ description: Type of error that occurred.
+ example: bad_data
+ error:
+ type: string
+ description: Human-readable error message.
+ example: invalid parameter
+ required:
+ - status
+ - errorType
+ - error
+ additionalProperties: false
+ description: Error response.
+ Labels:
+ type: object
+ additionalProperties: true
+ description: Label set represented as a key-value map.
+ QueryOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/QueryData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for instant query.
+ QueryRangeOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/QueryData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for range query.
+ QueryPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The PromQL query to execute.'
+ example: up
+ time:
+ type: string
+ description: 'Form field: The evaluation timestamp (optional, defaults to current time).'
+ example: "2023-07-21T20:10:51.781Z"
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of metrics to return.'
+ example: 100
+ timeout:
+ type: string
+ description: 'Form field: Evaluation timeout (optional, defaults to and is capped by the value of the -query.timeout flag).'
+ example: 30s
+ lookback_delta:
+ type: string
+ description: 'Form field: Override the lookback period for this query (optional).'
+ example: 5m
+ stats:
+ type: string
+ description: 'Form field: When provided, include query statistics in the response (the special value ''all'' enables more comprehensive statistics).'
+ example: all
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for instant query.
+ QueryRangePostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to execute.'
+ example: rate(http_requests_total[5m])
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:10:30.781Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T20:20:30.781Z"
+ step:
+ type: string
+ description: 'Form field: The step size of the query.'
+ example: 15s
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of metrics to return.'
+ example: 100
+ timeout:
+ type: string
+ description: 'Form field: Evaluation timeout (optional, defaults to and is capped by the value of the -query.timeout flag).'
+ example: 30s
+ lookback_delta:
+ type: string
+ description: 'Form field: Override the lookback period for this query (optional).'
+ example: 5m
+ stats:
+ type: string
+ description: 'Form field: When provided, include query statistics in the response (the special value ''all'' enables more comprehensive statistics).'
+ example: all
+ required:
+ - query
+ - start
+ - end
+ - step
+ additionalProperties: false
+ description: POST request body for range query.
+ QueryExemplarsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+ QueryExemplarsPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to execute.'
+ example: http_requests_total
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:00:00.000Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T21:00:00.000Z"
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for exemplars query.
+ FormatQueryOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: string
+ description: Formatted query string.
+ example: sum by(status) (rate(http_requests_total[5m]))
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for format query endpoint.
+ FormatQueryPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to format.'
+ example: sum(rate(http_requests_total[5m])) by (status)
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for format query.
+ ParseQueryOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+ ParseQueryPostInputBody:
+ type: object
+ properties:
+ query:
+ type: string
+ description: 'Form field: The query to parse.'
+ example: sum(rate(http_requests_total[5m]))
+ required:
+ - query
+ additionalProperties: false
+ description: POST request body for parse query.
+ QueryData:
+ anyOf:
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - vector
+ result:
+ type: array
+ items:
+ anyOf:
+ - $ref: '#/components/schemas/FloatSample'
+ - $ref: '#/components/schemas/HistogramSample'
+ description: Array of samples (either float or histogram).
+ stats:
+ $ref: '#/components/schemas/QueryStats'
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - matrix
+ result:
+ type: array
+ items:
+ anyOf:
+ - $ref: '#/components/schemas/FloatSeries'
+ - $ref: '#/components/schemas/HistogramSeries'
+ description: Array of time series (either float or histogram).
+ stats:
+ $ref: '#/components/schemas/QueryStats'
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - scalar
+ result:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ maxItems: 2
+ minItems: 2
+ description: Scalar value as [timestamp, stringValue].
+ stats:
+ $ref: '#/components/schemas/QueryStats'
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ - type: object
+ properties:
+ resultType:
+ type: string
+ enum:
+ - string
+ result:
+ type: array
+ items:
+ type: string
+ maxItems: 2
+ minItems: 2
+ description: String value as [timestamp, stringValue].
+ stats:
+ $ref: '#/components/schemas/QueryStats'
+ required:
+ - resultType
+ - result
+ additionalProperties: false
+ description: Query result data. The structure of 'result' depends on 'resultType'.
+ example:
+ result:
+ - metric:
+ __name__: up
+ job: prometheus
+ value:
+ - 1627845600
+ - "1"
+ resultType: vector
+ QueryStats:
+ type: object
+ properties:
+ timings:
+ type: object
+ properties:
+ evalTotalTime:
+ type: number
+ description: Total evaluation time in seconds.
+ resultSortTime:
+ type: number
+ description: Time spent sorting results in seconds.
+ queryPreparationTime:
+ type: number
+ description: Query preparation time in seconds.
+ innerEvalTime:
+ type: number
+ description: Inner evaluation time in seconds.
+ execQueueTime:
+ type: number
+ description: Execution queue wait time in seconds.
+ execTotalTime:
+ type: number
+ description: Total execution time in seconds.
+ samples:
+ type: object
+ properties:
+ totalQueryableSamples:
+ type: integer
+ description: Total number of samples that were queryable.
+ peakSamples:
+ type: integer
+ description: Peak number of samples in memory.
+ totalQueryableSamplesPerStep:
+ type: array
+ items:
+ type: array
+ items:
+ type: number
+ maxItems: 2
+ minItems: 2
+ description: Timestamp and sample count as [timestamp, count].
+ description: Total queryable samples per step (only included with stats=all).
+ description: Query execution statistics (included when the stats query parameter is provided).
+ FloatSample:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ value:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ maxItems: 2
+ minItems: 2
+ description: Timestamp and float value as [unixTimestamp, stringValue].
+ example:
+ - 1767436620
+ - "1"
+ required:
+ - metric
+ - value
+ additionalProperties: false
+ description: A sample with a float value.
+ HistogramSample:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ histogram:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - $ref: '#/components/schemas/HistogramValue'
+ maxItems: 2
+ minItems: 2
+ description: Timestamp and histogram value as [unixTimestamp, histogramObject].
+ example:
+ - 1767436620
+ - buckets: []
+ count: "60"
+ sum: "120"
+ required:
+ - metric
+ - histogram
+ additionalProperties: false
+ description: A sample with a native histogram value.
+ FloatSeries:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ values:
+ type: array
+ items:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ maxItems: 2
+ minItems: 2
+ description: Array of [timestamp, stringValue] pairs for float values.
+ required:
+ - metric
+ - values
+ additionalProperties: false
+ description: A time series with float values.
+ HistogramSeries:
+ type: object
+ properties:
+ metric:
+ $ref: '#/components/schemas/Labels'
+ histograms:
+ type: array
+ items:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - $ref: '#/components/schemas/HistogramValue'
+ maxItems: 2
+ minItems: 2
+ description: Array of [timestamp, histogramObject] pairs for histogram values.
+ required:
+ - metric
+ - histograms
+ additionalProperties: false
+ description: A time series with native histogram values.
+ HistogramValue:
+ type: object
+ properties:
+ count:
+ type: string
+ description: Total count of observations.
+ sum:
+ type: string
+ description: Sum of all observed values.
+ buckets:
+ type: array
+ items:
+ type: array
+ items:
+ oneOf:
+ - type: number
+ - type: string
+ description: Histogram buckets as [boundary_rule, lower, upper, count].
+ required:
+ - count
+ - sum
+ additionalProperties: false
+ description: Native histogram value representation.
+ LabelsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ type: string
+ example:
+ - __name__
+ - job
+ - instance
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of strings.
+ LabelsPostInputBody:
+ type: object
+ properties:
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:00:00.000Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T21:00:00.000Z"
+ match[]:
+ type: array
+ items:
+ type: string
+ description: 'Form field: Series selector argument that selects the series from which to read the label names.'
+ example:
+ - '{job="prometheus"}'
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of label names to return.'
+ example: 100
+ additionalProperties: false
+ description: POST request body for labels query.
+ LabelValuesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ type: string
+ example:
+ - __name__
+ - job
+ - instance
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of strings.
+ SeriesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/Labels'
+ example:
+ - __name__: up
+ instance: localhost:9090
+ job: prometheus
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of label sets.
+ SeriesPostInputBody:
+ type: object
+ properties:
+ start:
+ type: string
+ description: 'Form field: The start time of the query.'
+ example: "2023-07-21T20:00:00.000Z"
+ end:
+ type: string
+ description: 'Form field: The end time of the query.'
+ example: "2023-07-21T21:00:00.000Z"
+ match[]:
+ type: array
+ items:
+ type: string
+ description: 'Form field: Series selector argument that selects the series to return.'
+ example:
+ - '{job="prometheus"}'
+ limit:
+ type: integer
+ format: int64
+ description: 'Form field: The maximum number of series to return.'
+ example: 100
+ required:
+ - match[]
+ additionalProperties: false
+ description: POST request body for series query.
+ SeriesDeleteOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+ Metadata:
+ type: object
+ properties:
+ type:
+ type: string
+ description: Metric type (counter, gauge, histogram, summary, or untyped).
+ unit:
+ type: string
+ description: Unit of the metric.
+ help:
+ type: string
+ description: Help text describing the metric.
+ required:
+ - type
+ - unit
+ - help
+ additionalProperties: false
+ description: Metric metadata.
+ MetadataOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: object
+ additionalProperties:
+ type: array
+ items:
+ $ref: '#/components/schemas/Metadata'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for metadata endpoint.
+ MetricMetadata:
+ type: object
+ properties:
+ target:
+ $ref: '#/components/schemas/Labels'
+ metric:
+ type: string
+ description: Metric name.
+ type:
+ type: string
+ description: Metric type (counter, gauge, histogram, summary, or untyped).
+ help:
+ type: string
+ description: Help text describing the metric.
+ unit:
+ type: string
+ description: Unit of the metric.
+ required:
+ - target
+ - type
+ - help
+ - unit
+ additionalProperties: false
+ description: Target metric metadata.
+ Target:
+ type: object
+ properties:
+ discoveredLabels:
+ $ref: '#/components/schemas/Labels'
+ labels:
+ $ref: '#/components/schemas/Labels'
+ scrapePool:
+ type: string
+ description: Name of the scrape pool.
+ scrapeUrl:
+ type: string
+ description: URL of the target.
+ globalUrl:
+ type: string
+ description: Global URL of the target.
+ lastError:
+ type: string
+ description: Last error message from scraping.
+ lastScrape:
+ type: string
+ format: date-time
+ description: Timestamp of the last scrape.
+ lastScrapeDuration:
+ type: number
+ format: double
+ description: Duration of the last scrape in seconds.
+ health:
+ type: string
+ description: Health status of the target (up, down, or unknown).
+ scrapeInterval:
+ type: string
+ description: Scrape interval for this target.
+ scrapeTimeout:
+ type: string
+ description: Scrape timeout for this target.
+ required:
+ - discoveredLabels
+ - labels
+ - scrapePool
+ - scrapeUrl
+ - globalUrl
+ - lastError
+ - lastScrape
+ - lastScrapeDuration
+ - health
+ - scrapeInterval
+ - scrapeTimeout
+ additionalProperties: false
+ description: Scrape target information.
+ DroppedTarget:
+ type: object
+ properties:
+ discoveredLabels:
+ $ref: '#/components/schemas/Labels'
+ scrapePool:
+ type: string
+ description: Name of the scrape pool.
+ required:
+ - discoveredLabels
+ - scrapePool
+ additionalProperties: false
+ description: Dropped target information.
+ TargetDiscovery:
+ type: object
+ properties:
+ activeTargets:
+ type: array
+ items:
+ $ref: '#/components/schemas/Target'
+ droppedTargets:
+ type: array
+ items:
+ $ref: '#/components/schemas/DroppedTarget'
+ droppedTargetCounts:
+ type: object
+ additionalProperties:
+ type: integer
+ format: int64
+ required:
+ - activeTargets
+ - droppedTargets
+ - droppedTargetCounts
+ additionalProperties: false
+ description: Target discovery information including active and dropped targets.
+ TargetsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/TargetDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for targets endpoint.
+ TargetMetadataOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/MetricMetadata'
+ example:
+ - help: The current health status of the target
+ metric: up
+ target:
+ instance: localhost:9090
+ job: prometheus
+ type: gauge
+ unit: ""
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of metric metadata.
+ ScrapePoolsDiscovery:
+ type: object
+ properties:
+ scrapePools:
+ type: array
+ items:
+ type: string
+ required:
+ - scrapePools
+ additionalProperties: false
+ description: List of all configured scrape pools.
+ ScrapePoolsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/ScrapePoolsDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for scrape pools endpoint.
+ Config:
+ type: object
+ properties:
+ source_labels:
+ type: array
+ items:
+ type: string
+ description: Source labels for relabeling.
+ separator:
+ type: string
+ description: Separator for source label values.
+ regex:
+ type: string
+ description: Regular expression for matching.
+ modulus:
+ type: integer
+ format: int64
+ description: Modulus for hash-based relabeling.
+ target_label:
+ type: string
+ description: Target label name.
+ replacement:
+ type: string
+ description: Replacement value.
+ action:
+ type: string
+ description: Relabel action.
+ additionalProperties: false
+ description: Relabel configuration.
+ RelabelStep:
+ type: object
+ properties:
+ rule:
+ $ref: '#/components/schemas/Config'
+ output:
+ $ref: '#/components/schemas/Labels'
+ keep:
+ type: boolean
+ required:
+ - rule
+ - output
+ - keep
+ additionalProperties: false
+ description: Relabel step showing the rule, output, and whether the target was kept.
+ RelabelStepsResponse:
+ type: object
+ properties:
+ steps:
+ type: array
+ items:
+ $ref: '#/components/schemas/RelabelStep'
+ required:
+ - steps
+ additionalProperties: false
+ description: Relabeling steps response.
+ TargetRelabelStepsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/RelabelStepsResponse'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for target relabel steps endpoint.
+ RuleGroup:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name of the rule group.
+ file:
+ type: string
+ description: File containing the rule group.
+ rules:
+ type: array
+ items:
+ type: object
+ description: Rule definition.
+ description: Rules in this group.
+ interval:
+ type: number
+ format: double
+ description: Evaluation interval in seconds.
+ limit:
+ type: integer
+ format: int64
+ description: Maximum number of alerts for this group.
+ evaluationTime:
+ type: number
+ format: double
+ description: Time taken to evaluate the group in seconds.
+ lastEvaluation:
+ type: string
+ format: date-time
+ description: Timestamp of the last evaluation.
+ required:
+ - name
+ - file
+ - rules
+ - interval
+ - limit
+ - evaluationTime
+ - lastEvaluation
+ additionalProperties: false
+ description: Rule group information.
+ RuleDiscovery:
+ type: object
+ properties:
+ groups:
+ type: array
+ items:
+ $ref: '#/components/schemas/RuleGroup'
+ groupNextToken:
+ type: string
+ description: Pagination token for the next page of groups.
+ required:
+ - groups
+ additionalProperties: false
+ description: Rule discovery information containing all rule groups.
+ RulesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/RuleDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for rules endpoint.
+ Alert:
+ type: object
+ properties:
+ labels:
+ $ref: '#/components/schemas/Labels'
+ annotations:
+ $ref: '#/components/schemas/Labels'
+ state:
+ type: string
+ description: State of the alert (pending, firing, or inactive).
+ value:
+ type: string
+ description: Value of the alert expression.
+ activeAt:
+ type: string
+ format: date-time
+ description: Timestamp when the alert became active.
+ keepFiringSince:
+ type: string
+ format: date-time
+ description: Timestamp since the alert has been kept firing.
+ required:
+ - labels
+ - annotations
+ - state
+ - value
+ additionalProperties: false
+ description: Alert information.
+ AlertDiscovery:
+ type: object
+ properties:
+ alerts:
+ type: array
+ items:
+ $ref: '#/components/schemas/Alert'
+ required:
+ - alerts
+ additionalProperties: false
+ description: Alert discovery information containing all active alerts.
+ AlertsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/AlertDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for alerts endpoint.
+ AlertmanagerTarget:
+ type: object
+ properties:
+ url:
+ type: string
+ description: URL of the Alertmanager instance.
+ required:
+ - url
+ additionalProperties: false
+ description: Alertmanager target information.
+ AlertmanagerDiscovery:
+ type: object
+ properties:
+ activeAlertmanagers:
+ type: array
+ items:
+ $ref: '#/components/schemas/AlertmanagerTarget'
+ droppedAlertmanagers:
+ type: array
+ items:
+ $ref: '#/components/schemas/AlertmanagerTarget'
+ required:
+ - activeAlertmanagers
+ - droppedAlertmanagers
+ additionalProperties: false
+ description: Alertmanager discovery information including active and dropped instances.
+ AlertmanagersOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/AlertmanagerDiscovery'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for alertmanagers endpoint.
+ StatusConfigData:
+ type: object
+ properties:
+ yaml:
+ type: string
+ description: Prometheus configuration in YAML format.
+ required:
+ - yaml
+ additionalProperties: false
+ description: Prometheus configuration.
+ StatusConfigOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/StatusConfigData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status config endpoint.
+ RuntimeInfo:
+ type: object
+ properties:
+ startTime:
+ type: string
+ format: date-time
+ CWD:
+ type: string
+ hostname:
+ type: string
+ serverTime:
+ type: string
+ format: date-time
+ reloadConfigSuccess:
+ type: boolean
+ lastConfigTime:
+ type: string
+ format: date-time
+ corruptionCount:
+ type: integer
+ format: int64
+ goroutineCount:
+ type: integer
+ format: int64
+ GOMAXPROCS:
+ type: integer
+ format: int64
+ GOMEMLIMIT:
+ type: integer
+ format: int64
+ GOGC:
+ type: string
+ GODEBUG:
+ type: string
+ storageRetention:
+ type: string
+ required:
+ - startTime
+ - CWD
+ - hostname
+ - serverTime
+ - reloadConfigSuccess
+ - lastConfigTime
+ - corruptionCount
+ - goroutineCount
+ - GOMAXPROCS
+ - GOMEMLIMIT
+ - GOGC
+ - GODEBUG
+ - storageRetention
+ additionalProperties: false
+ description: Prometheus runtime information.
+ StatusRuntimeInfoOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/RuntimeInfo'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status runtime info endpoint.
+ PrometheusVersion:
+ type: object
+ properties:
+ version:
+ type: string
+ revision:
+ type: string
+ branch:
+ type: string
+ buildUser:
+ type: string
+ buildDate:
+ type: string
+ goVersion:
+ type: string
+ required:
+ - version
+ - revision
+ - branch
+ - buildUser
+ - buildDate
+ - goVersion
+ additionalProperties: false
+ description: Prometheus version information.
+ StatusBuildInfoOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/PrometheusVersion'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status build info endpoint.
+ StatusFlagsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: object
+ additionalProperties:
+ type: string
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status flags endpoint.
+ HeadStats:
+ type: object
+ properties:
+ numSeries:
+ type: integer
+ format: int64
+ numLabelPairs:
+ type: integer
+ format: int64
+ chunkCount:
+ type: integer
+ format: int64
+ minTime:
+ type: integer
+ format: int64
+ maxTime:
+ type: integer
+ format: int64
+ required:
+ - numSeries
+ - numLabelPairs
+ - chunkCount
+ - minTime
+ - maxTime
+ additionalProperties: false
+ description: TSDB head statistics.
+ TSDBStat:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: integer
+ format: int64
+ required:
+ - name
+ - value
+ additionalProperties: false
+ description: TSDB statistic.
+ TSDBStatus:
+ type: object
+ properties:
+ headStats:
+ $ref: '#/components/schemas/HeadStats'
+ seriesCountByMetricName:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ labelValueCountByLabelName:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ memoryInBytesByLabelName:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ seriesCountByLabelValuePair:
+ type: array
+ items:
+ $ref: '#/components/schemas/TSDBStat'
+ required:
+ - headStats
+ - seriesCountByMetricName
+ - labelValueCountByLabelName
+ - memoryInBytesByLabelName
+ - seriesCountByLabelValuePair
+ additionalProperties: false
+ description: TSDB status information.
+ StatusTSDBOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/TSDBStatus'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status TSDB endpoint.
+ BlockDesc:
+ type: object
+ properties:
+ ulid:
+ type: string
+ minTime:
+ type: integer
+ format: int64
+ maxTime:
+ type: integer
+ format: int64
+ required:
+ - ulid
+ - minTime
+ - maxTime
+ additionalProperties: false
+ description: Block descriptor.
+ BlockStats:
+ type: object
+ properties:
+ numSamples:
+ type: integer
+ format: int64
+ numSeries:
+ type: integer
+ format: int64
+ numChunks:
+ type: integer
+ format: int64
+ numTombstones:
+ type: integer
+ format: int64
+ numFloatSamples:
+ type: integer
+ format: int64
+ numHistogramSamples:
+ type: integer
+ format: int64
+ additionalProperties: false
+ description: Block statistics.
+ BlockMetaCompaction:
+ type: object
+ properties:
+ level:
+ type: integer
+ format: int64
+ sources:
+ type: array
+ items:
+ type: string
+ parents:
+ type: array
+ items:
+ $ref: '#/components/schemas/BlockDesc'
+ failed:
+ type: boolean
+ deletable:
+ type: boolean
+ hints:
+ type: array
+ items:
+ type: string
+ required:
+ - level
+ additionalProperties: false
+ description: Block compaction metadata.
+ BlockMeta:
+ type: object
+ properties:
+ ulid:
+ type: string
+ minTime:
+ type: integer
+ format: int64
+ maxTime:
+ type: integer
+ format: int64
+ stats:
+ $ref: '#/components/schemas/BlockStats'
+ compaction:
+ $ref: '#/components/schemas/BlockMetaCompaction'
+ version:
+ type: integer
+ format: int64
+ required:
+ - ulid
+ - minTime
+ - maxTime
+ - compaction
+ - version
+ additionalProperties: false
+ description: Block metadata.
+ StatusTSDBBlocksData:
+ type: object
+ properties:
+ blocks:
+ type: array
+ items:
+ $ref: '#/components/schemas/BlockMeta'
+ required:
+ - blocks
+ additionalProperties: false
+ description: TSDB blocks information.
+ StatusTSDBBlocksOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/StatusTSDBBlocksData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status TSDB blocks endpoint.
+ StatusWALReplayData:
+ type: object
+ properties:
+ min:
+ type: integer
+ format: int64
+ max:
+ type: integer
+ format: int64
+ current:
+ type: integer
+ format: int64
+ required:
+ - min
+ - max
+ - current
+ additionalProperties: false
+ description: WAL replay status.
+ StatusWALReplayOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/StatusWALReplayData'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for status WAL replay endpoint.
+ DeleteSeriesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ additionalProperties: false
+ description: Response body containing only status.
+ CleanTombstonesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ additionalProperties: false
+ description: Response body containing only status.
+ DataStruct:
+ type: object
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ additionalProperties: false
+ description: Generic data structure with a name field.
+ SnapshotOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ $ref: '#/components/schemas/DataStruct'
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body for snapshot endpoint.
+ Notification:
+ type: object
+ properties:
+ text:
+ type: string
+ date:
+ type: string
+ format: date-time
+ active:
+ type: boolean
+ required:
+ - text
+ - date
+ - active
+ additionalProperties: false
+ description: Server notification.
+ NotificationsOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/Notification'
+ example:
+ - active: true
+ date: "2023-07-21T20:00:00.000Z"
+ text: Server is running
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Response body with an array of notifications.
+ FeaturesOutputBody:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - success
+ - error
+ description: Response status.
+ example: success
+ data:
+ description: Response data (structure varies by endpoint).
+ example:
+ result: ok
+ warnings:
+ type: array
+ items:
+ type: string
+ description: Only set if there were warnings while executing the request. There will still be data in the data field.
+ infos:
+ type: array
+ items:
+ type: string
+ description: Only set if there were info-level annotations while executing the request.
+ required:
+ - status
+ - data
+ additionalProperties: false
+ description: Generic response body.
+tags:
+ - name: query
+ summary: Query
+ description: Query and evaluate PromQL expressions.
+ - name: metadata
+ summary: Metadata
+ description: Retrieve metric metadata such as type and unit.
+ - name: labels
+ summary: Labels
+ description: Query label names and values.
+ - name: series
+ summary: Series
+ description: Query and manage time series.
+ - name: targets
+ summary: Targets
+ description: Retrieve target and scrape pool information.
+ - name: rules
+ summary: Rules
+ description: Query recording and alerting rules.
+ - name: alerts
+ summary: Alerts
+ description: Query active alerts and alertmanager discovery.
+ - name: status
+ summary: Status
+ description: Retrieve server status and configuration.
+ - name: admin
+ summary: Admin
+ description: Administrative operations for TSDB management.
+ - name: features
+ summary: Features
+ description: Query enabled features.
+ - name: remote
+ summary: Remote Storage
+ description: Remote read and write endpoints.
+ - name: otlp
+ summary: OTLP
+ description: OpenTelemetry Protocol metrics ingestion.
+ - name: notifications
+ summary: Notifications
+ description: Server notifications and events.
diff --git a/web/api/v1/translate_ast.go b/web/api/v1/translate_ast.go
index 3cce0583f9..3c2bc09943 100644
--- a/web/api/v1/translate_ast.go
+++ b/web/api/v1/translate_ast.go
@@ -47,6 +47,10 @@ func translateAST(node parser.Expr) any {
"labels": sanitizeList(m.MatchingLabels),
"on": m.On,
"include": sanitizeList(m.Include),
+ "fillValues": map[string]*float64{
+ "lhs": m.FillValues.LHS,
+ "rhs": m.FillValues.RHS,
+ },
}
}
diff --git a/web/federate.go b/web/federate.go
index 584b8d7c4a..730c0cf8e2 100644
--- a/web/federate.go
+++ b/web/federate.go
@@ -32,7 +32,6 @@ import (
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/promql"
- "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc"
@@ -64,7 +63,7 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
return
}
- matcherSets, err := parser.ParseMetricSelectors(req.Form["match[]"])
+ matcherSets, err := h.options.Parser.ParseMetricSelectors(req.Form["match[]"])
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
diff --git a/web/federate_test.go b/web/federate_test.go
index 932639e2e6..1254bf6644 100644
--- a/web/federate_test.go
+++ b/web/federate_test.go
@@ -35,6 +35,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/textparse"
"github.com/prometheus/prometheus/promql"
+ "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
@@ -42,6 +43,8 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
+var testParser = parser.NewParser(parser.Options{})
+
var scenarios = map[string]struct {
params string
externalLabels labels.Labels
@@ -212,7 +215,6 @@ func TestFederation(t *testing.T) {
test_metric_stale 1+10x99 stale
test_metric_old 1+10x98
`)
- t.Cleanup(func() { storage.Close() })
h := &Handler{
localStorage: &dbAdapter{storage.DB},
@@ -221,6 +223,7 @@ func TestFederation(t *testing.T) {
config: &config.Config{
GlobalConfig: config.GlobalConfig{},
},
+ options: &Options{Parser: testParser},
}
for name, scenario := range scenarios {
@@ -265,6 +268,7 @@ func TestFederation_NotReady(t *testing.T) {
ExternalLabels: scenario.externalLabels,
},
},
+ options: &Options{Parser: testParser},
}
req := httptest.NewRequest(http.MethodGet, "http://example.org/federate?"+scenario.params, nil)
@@ -303,7 +307,6 @@ func normalizeBody(body *bytes.Buffer) string {
func TestFederationWithNativeHistograms(t *testing.T) {
storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
var expVec promql.Vector
@@ -442,6 +445,7 @@ func TestFederationWithNativeHistograms(t *testing.T) {
config: &config.Config{
GlobalConfig: config.GlobalConfig{},
},
+ options: &Options{Parser: testParser},
}
req := httptest.NewRequest(http.MethodGet, "http://example.org/federate?match[]=test_metric", nil)
diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json
index f38a2d965f..8f35318090 100644
--- a/web/ui/mantine-ui/package.json
+++ b/web/ui/mantine-ui/package.json
@@ -12,57 +12,57 @@
"test": "vitest"
},
"dependencies": {
- "@codemirror/autocomplete": "^6.19.1",
- "@codemirror/language": "^6.11.3",
- "@codemirror/lint": "^6.9.2",
- "@codemirror/state": "^6.5.2",
- "@codemirror/view": "^6.38.6",
- "@floating-ui/dom": "^1.7.4",
- "@lezer/common": "^1.3.0",
+ "@codemirror/autocomplete": "^6.20.0",
+ "@codemirror/language": "^6.12.1",
+ "@codemirror/lint": "^6.9.3",
+ "@codemirror/state": "^6.5.4",
+ "@codemirror/view": "^6.39.12",
+ "@floating-ui/dom": "^1.7.5",
+ "@lezer/common": "^1.5.1",
"@lezer/highlight": "^1.2.3",
- "@mantine/code-highlight": "^8.3.6",
- "@mantine/core": "^8.3.6",
- "@mantine/dates": "^8.3.6",
- "@mantine/hooks": "^8.3.6",
- "@mantine/notifications": "^8.3.6",
+ "@mantine/code-highlight": "^8.3.14",
+ "@mantine/core": "^8.3.14",
+ "@mantine/dates": "^8.3.14",
+ "@mantine/hooks": "^8.3.14",
+ "@mantine/notifications": "^8.3.14",
"@microsoft/fetch-event-source": "^2.0.1",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.309.1",
- "@reduxjs/toolkit": "^2.10.1",
- "@tabler/icons-react": "^3.35.0",
- "@tanstack/react-query": "^5.90.7",
+ "@reduxjs/toolkit": "^2.11.2",
+ "@tabler/icons-react": "^3.36.1",
+ "@tanstack/react-query": "^5.90.20",
"@testing-library/jest-dom": "^6.9.1",
- "@testing-library/react": "^16.3.0",
- "@types/lodash": "^4.17.20",
+ "@testing-library/react": "^16.3.2",
+ "@types/lodash": "^4.17.23",
"@types/sanitize-html": "^2.16.0",
- "@uiw/react-codemirror": "^4.25.3",
+ "@uiw/react-codemirror": "^4.25.4",
"clsx": "^2.1.1",
"dayjs": "^1.11.19",
"highlight.js": "^11.11.1",
- "lodash": "^4.17.21",
- "react": "^19.2.0",
- "react-dom": "^19.2.0",
- "react-infinite-scroll-component": "^6.1.0",
+ "lodash": "^4.17.23",
+ "react": "^19.2.4",
+ "react-dom": "^19.2.4",
+ "react-infinite-scroll-component": "^6.1.1",
"react-redux": "^9.2.0",
- "react-router-dom": "^7.9.5",
+ "react-router-dom": "^7.13.0",
"sanitize-html": "^2.17.0",
"uplot": "^1.6.32",
"uplot-react": "^1.2.4",
- "use-query-params": "^2.2.1"
+ "use-query-params": "^2.2.2"
},
"devDependencies": {
"@eslint/compat": "^1.4.1",
- "@eslint/eslintrc": "^3.3.1",
- "@eslint/js": "^9.39.1",
- "@types/react": "^19.2.2",
- "@types/react-dom": "^19.2.2",
- "@typescript-eslint/eslint-plugin": "^8.46.3",
- "@typescript-eslint/parser": "^8.46.3",
+ "@eslint/eslintrc": "^3.3.3",
+ "@eslint/js": "^9.39.2",
+ "@types/react": "^19.2.13",
+ "@types/react-dom": "^19.2.3",
+ "@typescript-eslint/eslint-plugin": "^8.54.0",
+ "@typescript-eslint/parser": "^8.54.0",
"@vitejs/plugin-react": "^4.7.0",
- "eslint": "^9.39.1",
+ "eslint": "^9.39.2",
"eslint-plugin-react-hooks": "^5.2.0",
- "eslint-plugin-react-refresh": "^0.4.24",
+ "eslint-plugin-react-refresh": "^0.5.0",
"globals": "^16.5.0",
"jsdom": "^25.0.1",
"postcss": "^8.5.6",
diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx
index e70b7a3f3e..5c10357561 100644
--- a/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx
+++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx
@@ -8,6 +8,7 @@ import {
MatchErrorType,
computeVectorVectorBinOp,
filteredSampleValue,
+ MaybeFilledInstantSample,
} from "../../../../promql/binOp";
import { formatNode, labelNameList } from "../../../../promql/format";
import {
@@ -177,11 +178,10 @@ const explanationText = (node: BinaryExpr): React.ReactNode => {
) : (
-
- group_{manySide}({labelNameList(matching.include)})
-
- : {matching.card} match. Each series from the {oneSide}-hand side is
- allowed to match with multiple series from the {manySide}-hand side.
+ group_{manySide}
+ ({labelNameList(matching.include)}) : {matching.card} match. Each
+ series from the {oneSide}-hand side is allowed to match with
+ multiple series from the {manySide}-hand side.
{matching.include.length !== 0 && (
<>
{" "}
@@ -192,6 +192,55 @@ const explanationText = (node: BinaryExpr): React.ReactNode => {
)}
)}
+ {(matching.fillValues.lhs !== null ||
+ matching.fillValues.rhs !== null) &&
+ (matching.fillValues.lhs === matching.fillValues.rhs ? (
+
+ fill (
+
+ {matching.fillValues.lhs}
+
+ ) : For series on either side missing a match, fill in the sample
+ value{" "}
+
+ {matching.fillValues.lhs}
+
+ .
+
+ ) : (
+ <>
+ {matching.fillValues.lhs !== null && (
+
+ fill_left (
+
+ {matching.fillValues.lhs}
+
+ ) : For series on the left-hand side missing a match, fill in
+ the sample value{" "}
+
+ {matching.fillValues.lhs}
+
+ .
+
+ )}
+
+ {matching.fillValues.rhs !== null && (
+
+ fill_right
+ (
+
+ {matching.fillValues.rhs}
+
+ ) : For series on the right-hand side missing a match, fill in
+ the sample value{" "}
+
+ {matching.fillValues.rhs}
+
+ .
+
+ )}
+ >
+ ))}
{node.bool && (
bool : Instead of
@@ -239,7 +288,12 @@ const explainError = (
matching: {
...(binOp.matching
? binOp.matching
- : { labels: [], on: false, include: [] }),
+ : {
+ labels: [],
+ on: false,
+ include: [],
+ fillValues: { lhs: null, rhs: null },
+ }),
card:
err.dupeSide === "left"
? vectorMatchCardinality.manyToOne
@@ -403,7 +457,7 @@ const VectorVectorBinaryExprExplainView: FC<
);
const matchGroupTable = (
- series: InstantSample[],
+ series: MaybeFilledInstantSample[],
seriesCount: number,
color: string,
colorOffset?: number
@@ -458,6 +512,11 @@ const VectorVectorBinaryExprExplainView: FC<
)}
format={true}
/>
+ {s.filled && (
+
+ no match, filling in default value
+
+ )}
{showSampleValues && (
diff --git a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx
index 4c3209e53a..2193dba267 100644
--- a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx
+++ b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx
@@ -11,7 +11,6 @@ import {
useComputedColorScheme,
} from "@mantine/core";
import {
- CompleteStrategy,
PromQLExtension,
newCompleteStrategy,
} from "@prometheus-io/codemirror-promql";
@@ -36,12 +35,9 @@ import {
bracketMatching,
indentOnInput,
syntaxHighlighting,
- syntaxTree,
} from "@codemirror/language";
import classes from "./ExpressionInput.module.css";
import {
- CompletionContext,
- CompletionResult,
autocompletion,
closeBrackets,
closeBracketsKeymap,
@@ -71,50 +67,10 @@ import MetricsExplorer from "./MetricsExplorer/MetricsExplorer";
import ErrorBoundary from "../../components/ErrorBoundary";
import { useAppSelector } from "../../state/hooks";
import { inputIconStyle, menuIconStyle } from "../../styles";
+import { HistoryCompleteStrategy } from "./HistoryCompleteStrategy";
const promqlExtension = new PromQLExtension();
-// Autocompletion strategy that wraps the main one and enriches
-// it with past query items.
-export class HistoryCompleteStrategy implements CompleteStrategy {
- private complete: CompleteStrategy;
- private queryHistory: string[];
- constructor(complete: CompleteStrategy, queryHistory: string[]) {
- this.complete = complete;
- this.queryHistory = queryHistory;
- }
-
- promQL(
- context: CompletionContext
- ): Promise | CompletionResult | null {
- return Promise.resolve(this.complete.promQL(context)).then((res) => {
- const { state, pos } = context;
- const tree = syntaxTree(state).resolve(pos, -1);
- const start = res != null ? res.from : tree.from;
-
- if (start !== 0) {
- return res;
- }
-
- const historyItems: CompletionResult = {
- from: start,
- to: pos,
- options: this.queryHistory.map((q) => ({
- label: q.length < 80 ? q : q.slice(0, 76).concat("..."),
- detail: "past query",
- apply: q,
- info: q.length < 80 ? undefined : q,
- })),
- validFor: /^[a-zA-Z0-9_:]+$/,
- };
-
- if (res !== null) {
- historyItems.options = historyItems.options.concat(res.options);
- }
- return historyItems;
- });
- }
-}
interface ExpressionInputProps {
initialExpr: string;
diff --git a/web/ui/mantine-ui/src/pages/query/HistoryCompleteStrategy.tsx b/web/ui/mantine-ui/src/pages/query/HistoryCompleteStrategy.tsx
new file mode 100644
index 0000000000..e56f645fc8
--- /dev/null
+++ b/web/ui/mantine-ui/src/pages/query/HistoryCompleteStrategy.tsx
@@ -0,0 +1,45 @@
+// Autocompletion strategy that wraps the main one and enriches
+// it with past query items.
+import {CompleteStrategy} from "@prometheus-io/codemirror-promql";
+import {CompletionContext, CompletionResult} from "@codemirror/autocomplete";
+import {syntaxTree} from "@codemirror/language";
+
+export class HistoryCompleteStrategy implements CompleteStrategy {
+ private complete: CompleteStrategy;
+ private queryHistory: string[];
+ constructor(complete: CompleteStrategy, queryHistory: string[]) {
+ this.complete = complete;
+ this.queryHistory = queryHistory;
+ }
+
+ promQL(
+ context: CompletionContext
+ ): Promise | CompletionResult | null {
+ return Promise.resolve(this.complete.promQL(context)).then((res) => {
+ const { state, pos } = context;
+ const tree = syntaxTree(state).resolve(pos, -1);
+ const start = res != null ? res.from : tree.from;
+
+ if (start !== 0) {
+ return res;
+ }
+
+ const historyItems: CompletionResult = {
+ from: start,
+ to: pos,
+ options: this.queryHistory.map((q) => ({
+ label: q.length < 80 ? q : q.slice(0, 76).concat("..."),
+ detail: "past query",
+ apply: q,
+ info: q.length < 80 ? undefined : q,
+ })),
+ validFor: /^[a-zA-Z0-9_:]+$/,
+ };
+
+ if (res !== null) {
+ historyItems.options = historyItems.options.concat(res.options);
+ }
+ return historyItems;
+ });
+ }
+}
\ No newline at end of file
diff --git a/web/ui/mantine-ui/src/pages/query/urlStateEncoding.test.ts b/web/ui/mantine-ui/src/pages/query/urlStateEncoding.test.ts
new file mode 100644
index 0000000000..aef8369cd5
--- /dev/null
+++ b/web/ui/mantine-ui/src/pages/query/urlStateEncoding.test.ts
@@ -0,0 +1,648 @@
+import {
+ parseTime,
+ formatTime,
+ decodePanelOptionsFromURLParams,
+ encodePanelOptionsToURLParams,
+} from "./urlStateEncoding";
+import { GraphDisplayMode, Panel } from "../../state/queryPageSlice";
+
+describe("parseTime", () => {
+ test("parses ISO date string correctly", () => {
+ expect(parseTime("2024-01-15 12:30:45")).toBe(1705321845000);
+ });
+
+ test("parses date-only string correctly", () => {
+ expect(parseTime("2024-01-01 00:00:00")).toBe(1704067200000);
+ });
+
+ test("parses date with different time values", () => {
+ expect(parseTime("2024-06-15 23:59:59")).toBe(1718495999000);
+ });
+});
+
+describe("formatTime", () => {
+ test("formats timestamp to expected string format", () => {
+ expect(formatTime(1705321845000)).toBe("2024-01-15 12:30:45");
+ });
+
+ test("formats midnight correctly", () => {
+ expect(formatTime(1704067200000)).toBe("2024-01-01 00:00:00");
+ });
+
+ test("formats end of day correctly", () => {
+ expect(formatTime(1718495999000)).toBe("2024-06-15 23:59:59");
+ });
+});
+
+describe("parseTime and formatTime roundtrip", () => {
+ test("roundtrip preserves time", () => {
+ const original = "2024-03-20 15:45:30";
+ const timestamp = parseTime(original);
+ expect(formatTime(timestamp)).toBe(original);
+ });
+});
+
+describe("decodePanelOptionsFromURLParams", () => {
+ test("returns empty array for empty query string", () => {
+ expect(decodePanelOptionsFromURLParams("")).toEqual([]);
+ });
+
+ test("returns empty array when no expr parameter exists", () => {
+ expect(decodePanelOptionsFromURLParams("?foo=bar")).toEqual([]);
+ });
+
+ test("decodes single panel with expr only", () => {
+ const panels = decodePanelOptionsFromURLParams("g0.expr=up");
+ expect(panels).toHaveLength(1);
+ expect(panels[0].expr).toBe("up");
+ });
+
+ test("decodes URL-encoded expression", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=rate(http_requests_total%5B5m%5D)"
+ );
+ expect(panels).toHaveLength(1);
+ expect(panels[0].expr).toBe("rate(http_requests_total[5m])");
+ });
+
+ test("decodes multiple panels", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g1.expr=node_cpu_seconds_total"
+ );
+ expect(panels).toHaveLength(2);
+ expect(panels[0].expr).toBe("up");
+ expect(panels[1].expr).toBe("node_cpu_seconds_total");
+ });
+
+ test("decodes show_tree parameter", () => {
+ const panelsWithTree = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.show_tree=1"
+ );
+ expect(panelsWithTree[0].showTree).toBe(true);
+
+ const panelsWithoutTree = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.show_tree=0"
+ );
+ expect(panelsWithoutTree[0].showTree).toBe(false);
+ });
+
+ describe("tab parameter", () => {
+ test("decodes numeric tab value 0 as graph", () => {
+ const panels = decodePanelOptionsFromURLParams("g0.expr=up&g0.tab=0");
+ expect(panels[0].visualizer.activeTab).toBe("graph");
+ });
+
+ test("decodes numeric tab value 1 as table", () => {
+ const panels = decodePanelOptionsFromURLParams("g0.expr=up&g0.tab=1");
+ expect(panels[0].visualizer.activeTab).toBe("table");
+ });
+
+ test("decodes string tab value graph", () => {
+ const panels = decodePanelOptionsFromURLParams("g0.expr=up&g0.tab=graph");
+ expect(panels[0].visualizer.activeTab).toBe("graph");
+ });
+
+ test("decodes string tab value table", () => {
+ const panels = decodePanelOptionsFromURLParams("g0.expr=up&g0.tab=table");
+ expect(panels[0].visualizer.activeTab).toBe("table");
+ });
+
+ test("decodes string tab value explain", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.tab=explain"
+ );
+ expect(panels[0].visualizer.activeTab).toBe("explain");
+ });
+ });
+
+ describe("display_mode parameter", () => {
+ test("decodes lines display mode", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.display_mode=lines"
+ );
+ expect(panels[0].visualizer.displayMode).toBe(GraphDisplayMode.Lines);
+ });
+
+ test("decodes stacked display mode", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.display_mode=stacked"
+ );
+ expect(panels[0].visualizer.displayMode).toBe(GraphDisplayMode.Stacked);
+ });
+
+ test("decodes heatmap display mode", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.display_mode=heatmap"
+ );
+ expect(panels[0].visualizer.displayMode).toBe(GraphDisplayMode.Heatmap);
+ });
+ });
+
+ describe("legacy stacked parameter", () => {
+ test("decodes stacked=1 as stacked display mode", () => {
+ const panels = decodePanelOptionsFromURLParams("g0.expr=up&g0.stacked=1");
+ expect(panels[0].visualizer.displayMode).toBe(GraphDisplayMode.Stacked);
+ });
+
+ test("decodes stacked=0 as lines display mode", () => {
+ const panels = decodePanelOptionsFromURLParams("g0.expr=up&g0.stacked=0");
+ expect(panels[0].visualizer.displayMode).toBe(GraphDisplayMode.Lines);
+ });
+ });
+
+ test("decodes y_axis_min parameter", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.y_axis_min=10.5"
+ );
+ expect(panels[0].visualizer.yAxisMin).toBe(10.5);
+ });
+
+ test("decodes empty y_axis_min as null", () => {
+ const panels = decodePanelOptionsFromURLParams("g0.expr=up&g0.y_axis_min=");
+ expect(panels[0].visualizer.yAxisMin).toBeNull();
+ });
+
+ test("decodes show_exemplars parameter", () => {
+ const panelsWithExemplars = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.show_exemplars=1"
+ );
+ expect(panelsWithExemplars[0].visualizer.showExemplars).toBe(true);
+
+ const panelsWithoutExemplars = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.show_exemplars=0"
+ );
+ expect(panelsWithoutExemplars[0].visualizer.showExemplars).toBe(false);
+ });
+
+ test("decodes range_input parameter", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.range_input=2h"
+ );
+ expect(panels[0].visualizer.range).toBe(7200000); // 2 hours in ms
+ });
+
+ test("decodes end_input parameter", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.end_input=2024-01-15%2012%3A30%3A45"
+ );
+ expect(panels[0].visualizer.endTime).toBe(1705321845000);
+ });
+
+ test("decodes moment_input parameter", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.moment_input=2024-01-15%2012%3A30%3A45"
+ );
+ expect(panels[0].visualizer.endTime).toBe(1705321845000);
+ });
+
+ describe("legacy step_input parameter", () => {
+ test("decodes positive step_input as custom resolution", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.step_input=15"
+ );
+ expect(panels[0].visualizer.resolution).toEqual({
+ type: "custom",
+ step: 15000,
+ });
+ });
+
+ test("ignores non-positive step_input", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.step_input=0"
+ );
+ expect(panels[0].visualizer.resolution).toEqual({
+ type: "auto",
+ density: "medium",
+ });
+ });
+ });
+
+ describe("resolution parameters", () => {
+ test("decodes auto resolution with low density", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.res_type=auto&g0.res_density=low"
+ );
+ expect(panels[0].visualizer.resolution).toEqual({
+ type: "auto",
+ density: "low",
+ });
+ });
+
+ test("decodes auto resolution with medium density", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.res_type=auto&g0.res_density=medium"
+ );
+ expect(panels[0].visualizer.resolution).toEqual({
+ type: "auto",
+ density: "medium",
+ });
+ });
+
+ test("decodes auto resolution with high density", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.res_type=auto&g0.res_density=high"
+ );
+ expect(panels[0].visualizer.resolution).toEqual({
+ type: "auto",
+ density: "high",
+ });
+ });
+
+ test("decodes fixed resolution", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.res_type=fixed&g0.res_step=30"
+ );
+ expect(panels[0].visualizer.resolution).toEqual({
+ type: "fixed",
+ step: 30000,
+ });
+ });
+
+ test("decodes custom resolution", () => {
+ const panels = decodePanelOptionsFromURLParams(
+ "g0.expr=up&g0.res_type=custom&g0.res_step=60"
+ );
+ expect(panels[0].visualizer.resolution).toEqual({
+ type: "custom",
+ step: 60000,
+ });
+ });
+ });
+
+ test("decodes complex panel with all parameters", () => {
+ const queryString =
+ "g0.expr=rate(http_requests_total%5B5m%5D)" +
+ "&g0.show_tree=1" +
+ "&g0.tab=graph" +
+ "&g0.display_mode=stacked" +
+ "&g0.y_axis_min=0" +
+ "&g0.show_exemplars=1" +
+ "&g0.range_input=1h" +
+ "&g0.end_input=2024-01-15%2012%3A30%3A45" +
+ "&g0.res_type=fixed" +
+ "&g0.res_step=15";
+
+ const panels = decodePanelOptionsFromURLParams(queryString);
+ expect(panels).toHaveLength(1);
+ expect(panels[0].expr).toBe("rate(http_requests_total[5m])");
+ expect(panels[0].showTree).toBe(true);
+ expect(panels[0].visualizer.activeTab).toBe("graph");
+ expect(panels[0].visualizer.displayMode).toBe(GraphDisplayMode.Stacked);
+ expect(panels[0].visualizer.yAxisMin).toBe(0);
+ expect(panels[0].visualizer.showExemplars).toBe(true);
+ expect(panels[0].visualizer.range).toBe(3600000);
+ expect(panels[0].visualizer.endTime).toBe(1705321845000);
+ expect(panels[0].visualizer.resolution).toEqual({
+ type: "fixed",
+ step: 15000,
+ });
+ });
+});
+
+describe("encodePanelOptionsToURLParams", () => {
+ const createPanel = (overrides: Partial = {}): Panel => ({
+ id: "test-id",
+ expr: "up",
+ showTree: false,
+ showMetricsExplorer: false,
+ visualizer: {
+ activeTab: "table",
+ endTime: null,
+ range: 3600000,
+ resolution: { type: "auto", density: "medium" },
+ displayMode: GraphDisplayMode.Lines,
+ showExemplars: false,
+ yAxisMin: null,
+ },
+ ...overrides,
+ });
+
+ test("encodes single panel with basic settings", () => {
+ const panel = createPanel();
+ const params = encodePanelOptionsToURLParams([panel]);
+
+ expect(params.get("g0.expr")).toBe("up");
+ expect(params.get("g0.show_tree")).toBe("0");
+ expect(params.get("g0.tab")).toBe("table");
+ expect(params.get("g0.range_input")).toBe("1h");
+ expect(params.get("g0.display_mode")).toBe("lines");
+ expect(params.get("g0.show_exemplars")).toBe("0");
+ });
+
+ test("encodes multiple panels", () => {
+ const panel1 = createPanel({ expr: "up" });
+ const panel2 = createPanel({ expr: "node_cpu_seconds_total" });
+ const params = encodePanelOptionsToURLParams([panel1, panel2]);
+
+ expect(params.get("g0.expr")).toBe("up");
+ expect(params.get("g1.expr")).toBe("node_cpu_seconds_total");
+ });
+
+ test("encodes show_tree as 1 when true", () => {
+ const panel = createPanel({ showTree: true });
+ const params = encodePanelOptionsToURLParams([panel]);
+
+ expect(params.get("g0.show_tree")).toBe("1");
+ });
+
+ test("encodes different tab values", () => {
+ const graphPanel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ activeTab: "graph",
+ },
+ });
+ const tablePanel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ activeTab: "table",
+ },
+ });
+ const explainPanel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ activeTab: "explain",
+ },
+ });
+
+ expect(encodePanelOptionsToURLParams([graphPanel]).get("g0.tab")).toBe(
+ "graph"
+ );
+ expect(encodePanelOptionsToURLParams([tablePanel]).get("g0.tab")).toBe(
+ "table"
+ );
+ expect(encodePanelOptionsToURLParams([explainPanel]).get("g0.tab")).toBe(
+ "explain"
+ );
+ });
+
+ test("encodes endTime when set", () => {
+ const panel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ endTime: 1705321845000,
+ },
+ });
+ const params = encodePanelOptionsToURLParams([panel]);
+
+ expect(params.get("g0.end_input")).toBe("2024-01-15 12:30:45");
+ expect(params.get("g0.moment_input")).toBe("2024-01-15 12:30:45");
+ });
+
+ test("does not encode endTime when null", () => {
+ const panel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ endTime: null,
+ },
+ });
+ const params = encodePanelOptionsToURLParams([panel]);
+
+ expect(params.has("g0.end_input")).toBe(false);
+ expect(params.has("g0.moment_input")).toBe(false);
+ });
+
+ test("encodes range_input in Prometheus duration format", () => {
+ const panel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ range: 7200000, // 2 hours
+ },
+ });
+ const params = encodePanelOptionsToURLParams([panel]);
+
+ expect(params.get("g0.range_input")).toBe("2h");
+ });
+
+ describe("resolution encoding", () => {
+ test("encodes auto resolution with density", () => {
+ const panel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ resolution: { type: "auto", density: "high" },
+ },
+ });
+ const params = encodePanelOptionsToURLParams([panel]);
+
+ expect(params.get("g0.res_type")).toBe("auto");
+ expect(params.get("g0.res_density")).toBe("high");
+ });
+
+ test("encodes fixed resolution with step", () => {
+ const panel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ resolution: { type: "fixed", step: 30000 },
+ },
+ });
+ const params = encodePanelOptionsToURLParams([panel]);
+
+ expect(params.get("g0.res_type")).toBe("fixed");
+ expect(params.get("g0.res_step")).toBe("30");
+ });
+
+ test("encodes custom resolution with step", () => {
+ const panel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ resolution: { type: "custom", step: 60000 },
+ },
+ });
+ const params = encodePanelOptionsToURLParams([panel]);
+
+ expect(params.get("g0.res_type")).toBe("custom");
+ expect(params.get("g0.res_step")).toBe("60");
+ });
+ });
+
+ test("encodes display_mode", () => {
+ const linesPanel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ displayMode: GraphDisplayMode.Lines,
+ },
+ });
+ const stackedPanel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ displayMode: GraphDisplayMode.Stacked,
+ },
+ });
+ const heatmapPanel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ displayMode: GraphDisplayMode.Heatmap,
+ },
+ });
+
+ expect(
+ encodePanelOptionsToURLParams([linesPanel]).get("g0.display_mode")
+ ).toBe("lines");
+ expect(
+ encodePanelOptionsToURLParams([stackedPanel]).get("g0.display_mode")
+ ).toBe("stacked");
+ expect(
+ encodePanelOptionsToURLParams([heatmapPanel]).get("g0.display_mode")
+ ).toBe("heatmap");
+ });
+
+ test("encodes y_axis_min when set", () => {
+ const panel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ yAxisMin: 10.5,
+ },
+ });
+ const params = encodePanelOptionsToURLParams([panel]);
+
+ expect(params.get("g0.y_axis_min")).toBe("10.5");
+ });
+
+ test("does not encode y_axis_min when null", () => {
+ const panel = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ yAxisMin: null,
+ },
+ });
+ const params = encodePanelOptionsToURLParams([panel]);
+
+ expect(params.has("g0.y_axis_min")).toBe(false);
+ });
+
+ test("encodes show_exemplars", () => {
+ const panelWithExemplars = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ showExemplars: true,
+ },
+ });
+ const panelWithoutExemplars = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ showExemplars: false,
+ },
+ });
+
+ expect(
+ encodePanelOptionsToURLParams([panelWithExemplars]).get(
+ "g0.show_exemplars"
+ )
+ ).toBe("1");
+ expect(
+ encodePanelOptionsToURLParams([panelWithoutExemplars]).get(
+ "g0.show_exemplars"
+ )
+ ).toBe("0");
+ });
+
+ test("encodes empty panels array", () => {
+ const params = encodePanelOptionsToURLParams([]);
+ expect(params.toString()).toBe("");
+ });
+});
+
+describe("encode and decode roundtrip", () => {
+ const createPanel = (overrides: Partial = {}): Panel => ({
+ id: "test-id",
+ expr: "up",
+ showTree: false,
+ showMetricsExplorer: false,
+ visualizer: {
+ activeTab: "table",
+ endTime: null,
+ range: 3600000,
+ resolution: { type: "auto", density: "medium" },
+ displayMode: GraphDisplayMode.Lines,
+ showExemplars: false,
+ yAxisMin: null,
+ },
+ ...overrides,
+ });
+
+ test("roundtrip preserves basic panel settings", () => {
+ const original = createPanel({
+ expr: "rate(http_requests_total[5m])",
+ showTree: true,
+ });
+ const encoded = encodePanelOptionsToURLParams([original]);
+ const decoded = decodePanelOptionsFromURLParams(encoded.toString());
+
+ expect(decoded).toHaveLength(1);
+ expect(decoded[0].expr).toBe(original.expr);
+ expect(decoded[0].showTree).toBe(original.showTree);
+ });
+
+ test("roundtrip preserves visualizer settings", () => {
+ const original = createPanel({
+ visualizer: {
+ activeTab: "graph",
+ endTime: 1705321845000,
+ range: 7200000,
+ resolution: { type: "fixed", step: 30000 },
+ displayMode: GraphDisplayMode.Stacked,
+ showExemplars: true,
+ yAxisMin: 0,
+ },
+ });
+ const encoded = encodePanelOptionsToURLParams([original]);
+ const decoded = decodePanelOptionsFromURLParams(encoded.toString());
+
+ expect(decoded).toHaveLength(1);
+ expect(decoded[0].visualizer.activeTab).toBe(original.visualizer.activeTab);
+ expect(decoded[0].visualizer.endTime).toBe(original.visualizer.endTime);
+ expect(decoded[0].visualizer.range).toBe(original.visualizer.range);
+ expect(decoded[0].visualizer.resolution).toEqual(
+ original.visualizer.resolution
+ );
+ expect(decoded[0].visualizer.displayMode).toBe(
+ original.visualizer.displayMode
+ );
+ expect(decoded[0].visualizer.showExemplars).toBe(
+ original.visualizer.showExemplars
+ );
+ expect(decoded[0].visualizer.yAxisMin).toBe(original.visualizer.yAxisMin);
+ });
+
+ test("roundtrip preserves multiple panels", () => {
+ const panels = [
+ createPanel({ expr: "up" }),
+ createPanel({ expr: "node_cpu_seconds_total", showTree: true }),
+ createPanel({
+ expr: "rate(http_requests_total[5m])",
+ visualizer: {
+ ...createPanel().visualizer,
+ activeTab: "graph",
+ displayMode: GraphDisplayMode.Heatmap,
+ },
+ }),
+ ];
+ const encoded = encodePanelOptionsToURLParams(panels);
+ const decoded = decodePanelOptionsFromURLParams(encoded.toString());
+
+ expect(decoded).toHaveLength(3);
+ expect(decoded[0].expr).toBe("up");
+ expect(decoded[1].expr).toBe("node_cpu_seconds_total");
+ expect(decoded[1].showTree).toBe(true);
+ expect(decoded[2].expr).toBe("rate(http_requests_total[5m])");
+ expect(decoded[2].visualizer.displayMode).toBe(GraphDisplayMode.Heatmap);
+ });
+
+ test("roundtrip preserves auto resolution with all densities", () => {
+ for (const density of ["low", "medium", "high"] as const) {
+ const original = createPanel({
+ visualizer: {
+ ...createPanel().visualizer,
+ resolution: { type: "auto", density },
+ },
+ });
+ const encoded = encodePanelOptionsToURLParams([original]);
+ const decoded = decodePanelOptionsFromURLParams(encoded.toString());
+
+ expect(decoded[0].visualizer.resolution).toEqual({
+ type: "auto",
+ density,
+ });
+ }
+ });
+});
diff --git a/web/ui/mantine-ui/src/pages/query/urlStateEncoding.ts b/web/ui/mantine-ui/src/pages/query/urlStateEncoding.ts
index 18b63d9ed4..a20a6fae36 100644
--- a/web/ui/mantine-ui/src/pages/query/urlStateEncoding.ts
+++ b/web/ui/mantine-ui/src/pages/query/urlStateEncoding.ts
@@ -64,7 +64,7 @@ export const decodePanelOptionsFromURLParams = (query: string): Panel[] => {
value === "1" ? GraphDisplayMode.Stacked : GraphDisplayMode.Lines;
});
decodeSetting("y_axis_min", (value) => {
- panel.visualizer.yAxisMin = value === null ? null : parseFloat(value);
+ panel.visualizer.yAxisMin = value === "" ? null : parseFloat(value);
});
decodeSetting("show_exemplars", (value) => {
panel.visualizer.showExemplars = value === "1";
@@ -174,11 +174,11 @@ export const encodePanelOptionsToURLParams = (
}
addParam(idx, "display_mode", p.visualizer.displayMode);
- addParam(
- idx,
- "y_axis_min",
- p.visualizer.yAxisMin === null ? "" : p.visualizer.yAxisMin.toString()
- );
+
+ if (p.visualizer.yAxisMin !== null) {
+ addParam(idx, "y_axis_min", p.visualizer.yAxisMin.toString());
+ }
+
addParam(idx, "show_exemplars", p.visualizer.showExemplars ? "1" : "0");
});
diff --git a/web/ui/mantine-ui/src/promql/ast.ts b/web/ui/mantine-ui/src/promql/ast.ts
index 94872c6db0..9f8c5cb102 100644
--- a/web/ui/mantine-ui/src/promql/ast.ts
+++ b/web/ui/mantine-ui/src/promql/ast.ts
@@ -104,11 +104,16 @@ export interface LabelMatcher {
value: string;
}
+export interface FillValues {
+ lhs: number | null;
+ rhs: number | null;
+}
export interface VectorMatching {
card: vectorMatchCardinality;
labels: string[];
on: boolean;
include: string[];
+ fillValues: FillValues;
}
export type StartOrEnd = "start" | "end" | null;
diff --git a/web/ui/mantine-ui/src/promql/binOp.test.ts b/web/ui/mantine-ui/src/promql/binOp.test.ts
index 72ef16947b..76dd24fa79 100644
--- a/web/ui/mantine-ui/src/promql/binOp.test.ts
+++ b/web/ui/mantine-ui/src/promql/binOp.test.ts
@@ -81,6 +81,7 @@ const testCases: TestCase[] = [
on: false,
include: [],
labels: [],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA,
rhs: testMetricB,
@@ -247,6 +248,7 @@ const testCases: TestCase[] = [
on: true,
include: [],
labels: ["label1", "label2"],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA,
rhs: testMetricB,
@@ -413,6 +415,7 @@ const testCases: TestCase[] = [
on: false,
include: [],
labels: ["same"],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA,
rhs: testMetricB,
@@ -579,6 +582,7 @@ const testCases: TestCase[] = [
on: false,
include: [],
labels: [],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricB,
rhs: testMetricC,
@@ -701,6 +705,7 @@ const testCases: TestCase[] = [
on: true,
include: [],
labels: ["label1"],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricB,
rhs: testMetricC,
@@ -791,6 +796,7 @@ const testCases: TestCase[] = [
on: true,
include: [],
labels: ["label1"],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricB,
rhs: testMetricC,
@@ -905,6 +911,7 @@ const testCases: TestCase[] = [
on: true,
include: [],
labels: ["label1"],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricC,
rhs: testMetricB,
@@ -1019,6 +1026,7 @@ const testCases: TestCase[] = [
on: true,
include: [],
labels: ["label1"],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricC,
rhs: testMetricB,
@@ -1107,6 +1115,7 @@ const testCases: TestCase[] = [
on: true,
include: [],
labels: ["label1"],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA,
rhs: testMetricB,
@@ -1223,6 +1232,7 @@ const testCases: TestCase[] = [
on: false,
include: [],
labels: [],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA,
rhs: testMetricB,
@@ -1409,6 +1419,7 @@ const testCases: TestCase[] = [
on: false,
include: [],
labels: [],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA,
rhs: testMetricB,
@@ -1596,6 +1607,7 @@ const testCases: TestCase[] = [
on: false,
include: [],
labels: [],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA,
rhs: testMetricB,
@@ -1763,6 +1775,7 @@ const testCases: TestCase[] = [
on: false,
include: [],
labels: [],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA,
rhs: testMetricB,
@@ -1929,6 +1942,7 @@ const testCases: TestCase[] = [
on: false,
include: [],
labels: [],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA,
rhs: testMetricB,
@@ -2022,6 +2036,7 @@ const testCases: TestCase[] = [
on: true,
include: [],
labels: ["label1"],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricB,
rhs: testMetricC,
@@ -2105,6 +2120,7 @@ const testCases: TestCase[] = [
on: true,
include: [],
labels: ["label1"],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricB,
rhs: testMetricC,
@@ -2147,6 +2163,437 @@ const testCases: TestCase[] = [
numGroups: 2,
},
},
+ {
+ // metric_a - fill(0) metric_b
+ desc: "subtraction with fill(0) but no missing series",
+ op: binaryOperatorType.sub,
+ matching: {
+ card: vectorMatchCardinality.oneToOne,
+ on: false,
+ include: [],
+ labels: [],
+ fillValues: { lhs: 0, rhs: 0 },
+ },
+ lhs: testMetricA,
+ rhs: testMetricB,
+ result: {
+ groups: {
+ [fnv1a(["a", "x", "same"])]: {
+ groupLabels: { label1: "a", label2: "x", same: "same" },
+ lhs: [
+ {
+ metric: {
+ __name__: "metric_a",
+ label1: "a",
+ label2: "x",
+ same: "same",
+ },
+ value: [0, "1"],
+ },
+ ],
+ lhsCount: 1,
+ rhs: [
+ {
+ metric: {
+ __name__: "metric_b",
+ label1: "a",
+ label2: "x",
+ same: "same",
+ },
+ value: [0, "10"],
+ },
+ ],
+ rhsCount: 1,
+ result: [
+ {
+ sample: {
+ metric: { label1: "a", label2: "x", same: "same" },
+ value: [0, "-9"],
+ },
+ manySideIdx: 0,
+ },
+ ],
+ error: null,
+ },
+ [fnv1a(["a", "y", "same"])]: {
+ groupLabels: { label1: "a", label2: "y", same: "same" },
+ lhs: [
+ {
+ metric: {
+ __name__: "metric_a",
+ label1: "a",
+ label2: "y",
+ same: "same",
+ },
+ value: [0, "2"],
+ },
+ ],
+ lhsCount: 1,
+ rhs: [
+ {
+ metric: {
+ __name__: "metric_b",
+ label1: "a",
+ label2: "y",
+ same: "same",
+ },
+ value: [0, "20"],
+ },
+ ],
+ rhsCount: 1,
+ result: [
+ {
+ sample: {
+ metric: { label1: "a", label2: "y", same: "same" },
+ value: [0, "-18"],
+ },
+ manySideIdx: 0,
+ },
+ ],
+ error: null,
+ },
+ [fnv1a(["b", "x", "same"])]: {
+ groupLabels: { label1: "b", label2: "x", same: "same" },
+ lhs: [
+ {
+ metric: {
+ __name__: "metric_a",
+ label1: "b",
+ label2: "x",
+ same: "same",
+ },
+ value: [0, "3"],
+ },
+ ],
+ lhsCount: 1,
+ rhs: [
+ {
+ metric: {
+ __name__: "metric_b",
+ label1: "b",
+ label2: "x",
+ same: "same",
+ },
+ value: [0, "30"],
+ },
+ ],
+ rhsCount: 1,
+ result: [
+ {
+ sample: {
+ metric: { label1: "b", label2: "x", same: "same" },
+ value: [0, "-27"],
+ },
+ manySideIdx: 0,
+ },
+ ],
+ error: null,
+ },
+ [fnv1a(["b", "y", "same"])]: {
+ groupLabels: { label1: "b", label2: "y", same: "same" },
+ lhs: [
+ {
+ metric: {
+ __name__: "metric_a",
+ label1: "b",
+ label2: "y",
+ same: "same",
+ },
+ value: [0, "4"],
+ },
+ ],
+ lhsCount: 1,
+ rhs: [
+ {
+ metric: {
+ __name__: "metric_b",
+ label1: "b",
+ label2: "y",
+ same: "same",
+ },
+ value: [0, "40"],
+ },
+ ],
+ rhsCount: 1,
+ result: [
+ {
+ sample: {
+ metric: { label1: "b", label2: "y", same: "same" },
+ value: [0, "-36"],
+ },
+ manySideIdx: 0,
+ },
+ ],
+ error: null,
+ },
+ },
+ numGroups: 4,
+ },
+ },
+ {
+ // metric_a[0..2] - fill_left(23) fill_right(42) metric_b[1...3]
+ desc: "subtraction with different fill values and missing series on each side",
+ op: binaryOperatorType.sub,
+ matching: {
+ card: vectorMatchCardinality.oneToOne,
+ on: false,
+ include: [],
+ labels: [],
+ fillValues: { lhs: 23, rhs: 42 },
+ },
+ lhs: testMetricA.slice(0, 3),
+ rhs: testMetricB.slice(1, 4),
+ result: {
+ groups: {
+ [fnv1a(["a", "x", "same"])]: {
+ groupLabels: { label1: "a", label2: "x", same: "same" },
+ lhs: [
+ {
+ metric: {
+ __name__: "metric_a",
+ label1: "a",
+ label2: "x",
+ same: "same",
+ },
+ value: [0, "1"],
+ },
+ ],
+ lhsCount: 1,
+ rhs: [
+ {
+ metric: {
+ label1: "a",
+ label2: "x",
+ same: "same",
+ },
+ value: [0, "42"],
+ filled: true,
+ },
+ ],
+ rhsCount: 1,
+ result: [
+ {
+ sample: {
+ metric: { label1: "a", label2: "x", same: "same" },
+ value: [0, "-41"],
+ },
+ manySideIdx: 0,
+ },
+ ],
+ error: null,
+ },
+ [fnv1a(["a", "y", "same"])]: {
+ groupLabels: { label1: "a", label2: "y", same: "same" },
+ lhs: [
+ {
+ metric: {
+ __name__: "metric_a",
+ label1: "a",
+ label2: "y",
+ same: "same",
+ },
+ value: [0, "2"],
+ },
+ ],
+ lhsCount: 1,
+ rhs: [
+ {
+ metric: {
+ __name__: "metric_b",
+ label1: "a",
+ label2: "y",
+ same: "same",
+ },
+ value: [0, "20"],
+ },
+ ],
+ rhsCount: 1,
+ result: [
+ {
+ sample: {
+ metric: { label1: "a", label2: "y", same: "same" },
+ value: [0, "-18"],
+ },
+ manySideIdx: 0,
+ },
+ ],
+ error: null,
+ },
+ [fnv1a(["b", "x", "same"])]: {
+ groupLabels: { label1: "b", label2: "x", same: "same" },
+ lhs: [
+ {
+ metric: {
+ __name__: "metric_a",
+ label1: "b",
+ label2: "x",
+ same: "same",
+ },
+ value: [0, "3"],
+ },
+ ],
+ lhsCount: 1,
+ rhs: [
+ {
+ metric: {
+ __name__: "metric_b",
+ label1: "b",
+ label2: "x",
+ same: "same",
+ },
+ value: [0, "30"],
+ },
+ ],
+ rhsCount: 1,
+ result: [
+ {
+ sample: {
+ metric: { label1: "b", label2: "x", same: "same" },
+ value: [0, "-27"],
+ },
+ manySideIdx: 0,
+ },
+ ],
+ error: null,
+ },
+ [fnv1a(["b", "y", "same"])]: {
+ groupLabels: { label1: "b", label2: "y", same: "same" },
+ lhs: [
+ {
+ metric: {
+ label1: "b",
+ label2: "y",
+ same: "same",
+ },
+ filled: true,
+ value: [0, "23"],
+ },
+ ],
+ lhsCount: 1,
+ rhs: [
+ {
+ metric: {
+ __name__: "metric_b",
+ label1: "b",
+ label2: "y",
+ same: "same",
+ },
+ value: [0, "40"],
+ },
+ ],
+ rhsCount: 1,
+ result: [
+ {
+ sample: {
+ metric: { label1: "b", label2: "y", same: "same" },
+ value: [0, "-17"],
+ },
+ manySideIdx: 0,
+ },
+ ],
+ error: null,
+ },
+ },
+ numGroups: 4,
+ },
+ },
+ {
+ // metric_b[0...1] - on(label1) group_left fill(0) metric_c
+ desc: "many-to-one matching with matching labels specified, group_left, and fill specified",
+ op: binaryOperatorType.sub,
+ matching: {
+ card: vectorMatchCardinality.manyToOne,
+ on: true,
+ include: [],
+ labels: ["label1"],
+ fillValues: { lhs: 0, rhs: 0 },
+ },
+ lhs: testMetricB.slice(0, 2),
+ rhs: testMetricC,
+ result: {
+ groups: {
+ [fnv1a(["a"])]: {
+ groupLabels: { label1: "a" },
+ lhs: [
+ {
+ metric: {
+ __name__: "metric_b",
+ label1: "a",
+ label2: "x",
+ same: "same",
+ },
+ value: [0, "10"],
+ },
+ {
+ metric: {
+ __name__: "metric_b",
+ label1: "a",
+ label2: "y",
+ same: "same",
+ },
+ value: [0, "20"],
+ },
+ ],
+ lhsCount: 2,
+ rhs: [
+ {
+ metric: { __name__: "metric_c", label1: "a" },
+ value: [0, "100"],
+ },
+ ],
+ rhsCount: 1,
+ result: [
+ {
+ sample: {
+ metric: { label1: "a", label2: "x", same: "same" },
+ value: [0, "-90"],
+ },
+ manySideIdx: 0,
+ },
+ {
+ sample: {
+ metric: { label1: "a", label2: "y", same: "same" },
+ value: [0, "-80"],
+ },
+ manySideIdx: 1,
+ },
+ ],
+ error: null,
+ },
+ [fnv1a(["b"])]: {
+ groupLabels: { label1: "b" },
+ lhs: [
+ {
+ metric: {
+ label1: "b",
+ },
+ filled: true,
+ value: [0, "0"],
+ },
+ ],
+ lhsCount: 1,
+ rhs: [
+ {
+ metric: { __name__: "metric_c", label1: "b" },
+ value: [0, "200"],
+ },
+ ],
+ rhsCount: 1,
+ result: [
+ {
+ sample: {
+ metric: { label1: "b" },
+ value: [0, "-200"],
+ },
+ manySideIdx: 0,
+ },
+ ],
+ error: null,
+ },
+ },
+ numGroups: 2,
+ },
+ },
{
// metric_a and metric b
desc: "and operator with no matching labels and matching groups",
@@ -2156,6 +2603,7 @@ const testCases: TestCase[] = [
on: false,
include: [],
labels: [],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA,
rhs: testMetricB,
@@ -2342,6 +2790,7 @@ const testCases: TestCase[] = [
on: true,
include: [],
labels: ["label1"],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA.slice(0, 3),
rhs: testMetricB.slice(1, 4),
@@ -2474,6 +2923,7 @@ const testCases: TestCase[] = [
on: true,
include: [],
labels: ["label1"],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA.slice(0, 3),
rhs: testMetricB.slice(1, 4),
@@ -2568,6 +3018,7 @@ const testCases: TestCase[] = [
on: true,
include: [],
labels: ["label1"],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA.slice(0, 3),
rhs: testMetricB.slice(1, 4),
@@ -2700,6 +3151,7 @@ const testCases: TestCase[] = [
on: false,
include: [],
labels: [],
+ fillValues: { lhs: null, rhs: null },
},
lhs: testMetricA.slice(0, 3),
rhs: testMetricB.slice(1, 4),
@@ -2886,6 +3338,7 @@ describe("binOp", () => {
on: true,
labels: ["label1"],
include: [],
+ fillValues: { lhs: null, rhs: null },
};
const result = resultMetric(lhs, rhs, op, matching);
@@ -2911,6 +3364,7 @@ describe("binOp", () => {
on: true,
labels: ["label1"],
include: [],
+ fillValues: { lhs: null, rhs: null },
};
const result = resultMetric(lhs, rhs, op, matching);
@@ -2931,6 +3385,7 @@ describe("binOp", () => {
on: true,
labels: ["label1"],
include: ["label2"],
+ fillValues: { lhs: null, rhs: null },
};
const result = resultMetric(lhs, rhs, op, matching);
diff --git a/web/ui/mantine-ui/src/promql/binOp.ts b/web/ui/mantine-ui/src/promql/binOp.ts
index dbfa64be2c..9ebee90f64 100644
--- a/web/ui/mantine-ui/src/promql/binOp.ts
+++ b/web/ui/mantine-ui/src/promql/binOp.ts
@@ -45,13 +45,18 @@ export type VectorMatchError =
| MultipleMatchesOnBothSidesError
| MultipleMatchesOnOneSideError;
+export type MaybeFilledInstantSample = InstantSample & {
+ // If the sample was filled in via a fill(...) modifier, this is true.
+ filled?: boolean;
+};
+
// A single match group as produced by a vector-to-vector binary operation, with all of its
// left-hand side and right-hand side series, as well as a result and error, if applicable.
export type BinOpMatchGroup = {
groupLabels: Metric;
- rhs: InstantSample[];
+ rhs: MaybeFilledInstantSample[];
rhsCount: number; // Number of samples before applying limits.
- lhs: InstantSample[];
+ lhs: MaybeFilledInstantSample[];
lhsCount: number; // Number of samples before applying limits.
result: {
sample: InstantSample;
@@ -338,6 +343,26 @@ export const computeVectorVectorBinOp = (
groups[sig].lhsCount++;
});
+ // Check for any LHS / RHS with no series and fill in default values, if specified.
+ Object.values(groups).forEach((mg) => {
+ if (mg.lhs.length === 0 && matching.fillValues.lhs !== null) {
+ mg.lhs.push({
+ metric: mg.groupLabels,
+ value: [0, formatPrometheusFloat(matching.fillValues.lhs as number)],
+ filled: true,
+ });
+ mg.lhsCount = 1;
+ }
+ if (mg.rhs.length === 0 && matching.fillValues.rhs !== null) {
+ mg.rhs.push({
+ metric: mg.groupLabels,
+ value: [0, formatPrometheusFloat(matching.fillValues.rhs as number)],
+ filled: true,
+ });
+ mg.rhsCount = 1;
+ }
+ });
+
// Annotate the match groups with errors (if any) and populate the results.
Object.values(groups).forEach((mg) => {
switch (matching.card) {
diff --git a/web/ui/mantine-ui/src/promql/format.tsx b/web/ui/mantine-ui/src/promql/format.tsx
index 75b1965b35..8602c65a82 100644
--- a/web/ui/mantine-ui/src/promql/format.tsx
+++ b/web/ui/mantine-ui/src/promql/format.tsx
@@ -265,6 +265,7 @@ const formatNodeInternal = (
case nodeType.binaryExpr: {
let matching = <>>;
let grouping = <>>;
+ let fill = <>>;
const vm = node.matching;
if (vm !== null) {
if (
@@ -305,6 +306,45 @@ const formatNodeInternal = (
>
);
}
+
+ const lfill = vm.fillValues.lhs;
+ const rfill = vm.fillValues.rhs;
+ if (lfill !== null || rfill !== null) {
+ if (lfill === rfill) {
+ fill = (
+ <>
+ {" "}
+ fill
+ (
+ {lfill}
+ )
+ >
+ );
+ } else {
+ fill = (
+ <>
+ {lfill !== null && (
+ <>
+ {" "}
+ fill_left
+ (
+ {lfill}
+ )
+ >
+ )}
+ {rfill !== null && (
+ <>
+ {" "}
+ fill_right
+ (
+ {rfill}
+ )
+ >
+ )}
+ >
+ );
+ }
+ }
}
return (
@@ -327,7 +367,8 @@ const formatNodeInternal = (
>
)}
{matching}
- {grouping}{" "}
+ {grouping}
+ {fill}{" "}
{showChildren &&
formatNode(
maybeParenthesizeBinopChild(node.op, node.rhs),
diff --git a/web/ui/mantine-ui/src/promql/functionDocs.tsx b/web/ui/mantine-ui/src/promql/functionDocs.tsx
index a9d9ca53a9..4cc70a39e6 100644
--- a/web/ui/mantine-ui/src/promql/functionDocs.tsx
+++ b/web/ui/mantine-ui/src/promql/functionDocs.tsx
@@ -1756,6 +1756,12 @@ const funcDocs: Record = {
.
+
+ Note that if there are any time series in v that match the data-label-selector (or the
+ default target_info if that argument is not specified), they will be treated as info series and
+ will be returned unchanged.
+
+
Limitations
diff --git a/web/ui/mantine-ui/src/promql/serialize.ts b/web/ui/mantine-ui/src/promql/serialize.ts
index 584e1ae9ff..50c32c49e4 100644
--- a/web/ui/mantine-ui/src/promql/serialize.ts
+++ b/web/ui/mantine-ui/src/promql/serialize.ts
@@ -135,6 +135,7 @@ const serializeNode = (
case nodeType.binaryExpr: {
let matching = "";
let grouping = "";
+ let fill = "";
const vm = node.matching;
if (vm !== null) {
if (
@@ -152,11 +153,26 @@ const serializeNode = (
) {
grouping = ` group_${vm.card === vectorMatchCardinality.manyToOne ? "left" : "right"}(${labelNameList(vm.include)})`;
}
+
+ const lfill = vm.fillValues.lhs;
+ const rfill = vm.fillValues.rhs;
+ if (lfill !== null || rfill !== null) {
+ if (lfill === rfill) {
+ fill = ` fill(${lfill})`;
+ } else {
+ if (lfill !== null) {
+ fill += ` fill_left(${lfill})`;
+ }
+ if (rfill !== null) {
+ fill += ` fill_right(${rfill})`;
+ }
+ }
+ }
}
return `${serializeNode(maybeParenthesizeBinopChild(node.op, node.lhs), childIndent, pretty)}${childSeparator}${ind}${
node.op
- }${node.bool ? " bool" : ""}${matching}${grouping}${childSeparator}${serializeNode(
+ }${node.bool ? " bool" : ""}${matching}${grouping}${fill}${childSeparator}${serializeNode(
maybeParenthesizeBinopChild(node.op, node.rhs),
childIndent,
pretty
diff --git a/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts b/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts
index a3734d311f..f9ff039882 100644
--- a/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts
+++ b/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts
@@ -658,6 +658,7 @@ describe("serializeNode and formatNode", () => {
labels: [],
on: false,
include: [],
+ fillValues: { lhs: null, rhs: null },
},
bool: false,
},
@@ -677,6 +678,7 @@ describe("serializeNode and formatNode", () => {
labels: [],
on: true,
include: [],
+ fillValues: { lhs: null, rhs: null },
},
bool: false,
},
@@ -696,6 +698,7 @@ describe("serializeNode and formatNode", () => {
labels: ["label1", "label2"],
on: true,
include: [],
+ fillValues: { lhs: null, rhs: null },
},
bool: false,
},
@@ -715,6 +718,7 @@ describe("serializeNode and formatNode", () => {
labels: ["label1", "label2"],
on: false,
include: [],
+ fillValues: { lhs: null, rhs: null },
},
bool: false,
},
@@ -735,6 +739,7 @@ describe("serializeNode and formatNode", () => {
labels: [],
on: false,
include: [],
+ fillValues: { lhs: null, rhs: null },
},
bool: false,
},
@@ -755,6 +760,7 @@ describe("serializeNode and formatNode", () => {
labels: [],
on: false,
include: ["__name__"],
+ fillValues: { lhs: null, rhs: null },
},
bool: false,
},
@@ -774,6 +780,7 @@ describe("serializeNode and formatNode", () => {
labels: ["label1", "label2"],
on: true,
include: [],
+ fillValues: { lhs: null, rhs: null },
},
bool: false,
},
@@ -793,6 +800,7 @@ describe("serializeNode and formatNode", () => {
labels: ["label1", "label2"],
on: true,
include: ["label3"],
+ fillValues: { lhs: null, rhs: null },
},
bool: false,
},
@@ -812,6 +820,7 @@ describe("serializeNode and formatNode", () => {
labels: ["label1", "label2"],
on: true,
include: [],
+ fillValues: { lhs: null, rhs: null },
},
bool: false,
},
@@ -831,6 +840,7 @@ describe("serializeNode and formatNode", () => {
labels: ["label1", "label2"],
on: true,
include: ["label3"],
+ fillValues: { lhs: null, rhs: null },
},
bool: false,
},
@@ -864,6 +874,7 @@ describe("serializeNode and formatNode", () => {
labels: ["label1", "label2"],
on: true,
include: ["label3"],
+ fillValues: { lhs: null, rhs: null },
},
bool: true,
},
@@ -911,6 +922,7 @@ describe("serializeNode and formatNode", () => {
include: ["c", "ü"],
labels: ["b", "ö"],
on: true,
+ fillValues: { lhs: null, rhs: null },
},
op: binaryOperatorType.div,
rhs: {
@@ -948,6 +960,7 @@ describe("serializeNode and formatNode", () => {
include: [],
labels: ["e", "ö"],
on: false,
+ fillValues: { lhs: null, rhs: null },
},
op: binaryOperatorType.add,
rhs: {
diff --git a/web/ui/mantine-ui/src/promql/tools/go.mod b/web/ui/mantine-ui/src/promql/tools/go.mod
index a3abc881e2..693b168206 100644
--- a/web/ui/mantine-ui/src/promql/tools/go.mod
+++ b/web/ui/mantine-ui/src/promql/tools/go.mod
@@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/web/ui/mantine-ui/src/promql/tools
-go 1.24.0
+go 1.25.5
require (
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853
@@ -9,18 +9,34 @@ require (
)
require (
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/dennwc/varint v1.0.0 // indirect
+ github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
+ github.com/klauspost/compress v1.18.4 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
+ github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
- github.com/prometheus/common v0.67.4 // indirect
+ github.com/prometheus/common v0.67.5 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
+ github.com/prometheus/sigv4 v0.4.1 // indirect
+ github.com/rogpeppe/go-internal v1.14.1 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
- golang.org/x/sys v0.37.0 // indirect
- golang.org/x/text v0.30.0 // indirect
- google.golang.org/protobuf v1.36.10 // indirect
+ golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect
+ golang.org/x/net v0.49.0 // indirect
+ golang.org/x/oauth2 v0.35.0 // indirect
+ golang.org/x/sys v0.41.0 // indirect
+ golang.org/x/text v0.34.0 // indirect
+ google.golang.org/api v0.266.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect
+ google.golang.org/protobuf v1.36.11 // indirect
+ k8s.io/client-go v0.35.0 // indirect
)
+
+replace cloud.google.com/go => cloud.google.com/go v0.123.0
diff --git a/web/ui/mantine-ui/src/promql/tools/go.sum b/web/ui/mantine-ui/src/promql/tools/go.sum
index 40c792d93d..0e069f5a8c 100644
--- a/web/ui/mantine-ui/src/promql/tools/go.sum
+++ b/web/ui/mantine-ui/src/promql/tools/go.sum
@@ -1,45 +1,47 @@
-cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4=
-cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ=
+cloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs=
+cloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA=
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 h1:wL5IEG5zb7BVv1Kv0Xm92orq+5hB5Nipn3B5tn4Rqfk=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 h1:fou+2+WFTib47nS+nz/ozhEBnvU96bKHy6LjRsY4E28=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0/go.mod h1:t76Ruy8AHvUAC8GfMWJMa0ElSbuIcO03NLpynfbgsPA=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
-github.com/aws/aws-sdk-go-v2 v1.39.6 h1:2JrPCVgWJm7bm83BDwY5z8ietmeJUbh3O2ACnn+Xsqk=
-github.com/aws/aws-sdk-go-v2 v1.39.6/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
-github.com/aws/aws-sdk-go-v2/config v1.31.17 h1:QFl8lL6RgakNK86vusim14P2k8BFSxjvUkcWLDjgz9Y=
-github.com/aws/aws-sdk-go-v2/config v1.31.17/go.mod h1:V8P7ILjp/Uef/aX8TjGk6OHZN6IKPM5YW6S78QnRD5c=
-github.com/aws/aws-sdk-go-v2/credentials v1.18.21 h1:56HGpsgnmD+2/KpG0ikvvR8+3v3COCwaF4r+oWwOeNA=
-github.com/aws/aws-sdk-go-v2/credentials v1.18.21/go.mod h1:3YELwedmQbw7cXNaII2Wywd+YY58AmLPwX4LzARgmmA=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 h1:a+8/MLcWlIxo1lF9xaGt3J/u3yOZx+CdSveSNwjhD40=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 h1:HBSI2kDkMdWz4ZM7FjwE7e/pWDEZ+nR95x8Ztet1ooY=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M=
+github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
+github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
+github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
+github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg=
-github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 h1:0JPwLz1J+5lEOfy/g0SURC9cxhbQ1lIMHMa+AHZSzz0=
-github.com/aws/aws-sdk-go-v2/service/sso v1.30.1/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 h1:OWs0/j2UYR5LOGi88sD5/lhN6TDLG6SfA7CqsQO9zF0=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo=
-github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 h1:mLlUgHn02ue8whiR4BmxxGJLR2gwU6s6ZzJ5wDamBUs=
-github.com/aws/aws-sdk-go-v2/service/sts v1.39.1/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk=
-github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
-github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
+github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
+github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
+github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
+github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
+github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
+github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -57,8 +59,8 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
-github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
+github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
+github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
@@ -67,16 +69,16 @@ github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
-github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
-github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
-github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
+github.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao=
+github.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8=
+github.com/googleapis/gax-go/v2 v2.17.0 h1:RksgfBpxqff0EZkDWYuz9q/uWsTVz+kf43LsZ1J6SMc=
+github.com/googleapis/gax-go/v2 v2.17.0/go.mod h1:mzaqghpQp4JDh3HvADwrat+6M3MOIDp5YKHhb9PAgDY=
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 h1:cLN4IBkmkYZNnk7EAJ0BHIethd+J6LqxFNw5mSiI2bM=
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
-github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
+github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
+github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -96,77 +98,77 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
-github.com/prometheus/client_golang/exp v0.0.0-20251212205219-7ba246a648ca h1:BOxmsLoL2ymn8lXJtorca7N/m+2vDQUDoEtPjf0iAxA=
-github.com/prometheus/client_golang/exp v0.0.0-20251212205219-7ba246a648ca/go.mod h1:gndBHh3ZdjBozGcGrjUYjN3UJLRS3l2drALtu4lUt+k=
+github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562 h1:vwqZvuobg82U0gcG2eVrFH27806bUbNr32SvfRbvdsg=
+github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562/go.mod h1:PmAYDB13uBFBG9qE1qxZZgZWhg7Rg6SfKM5DMK7hjyI=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
-github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
-github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
+github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
+github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos=
github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/prometheus/prometheus v0.308.1 h1:ApMNI/3/es3Ze90Z7CMb+wwU2BsSYur0m5VKeqHj7h4=
github.com/prometheus/prometheus v0.308.1/go.mod h1:aHjYCDz9zKRyoUXvMWvu13K9XHOkBB12XrEqibs3e0A=
-github.com/prometheus/sigv4 v0.3.0 h1:QIG7nTbu0JTnNidGI1Uwl5AGVIChWUACxn2B/BQ1kms=
-github.com/prometheus/sigv4 v0.3.0/go.mod h1:fKtFYDus2M43CWKMNtGvFNHGXnAJJEGZbiYCmVp/F8I=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
-github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/prometheus/sigv4 v0.4.1 h1:EIc3j+8NBea9u1iV6O5ZAN8uvPq2xOIUPcqCTivHuXs=
+github.com/prometheus/sigv4 v0.4.1/go.mod h1:eu+ZbRvsc5TPiHwqh77OWuCnWK73IdkETYY46P4dXOU=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
-go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
-go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
-go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
-go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
-go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
-go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
-go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
-go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0=
+go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
+go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
+go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
+go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
+go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
+go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
-golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
-golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
-golang.org/x/exp v0.0.0-20250808145144-a408d31f581a h1:Y+7uR/b1Mw2iSXZ3G//1haIiSElDQZ8KWh0h+sZPG90=
-golang.org/x/exp v0.0.0-20250808145144-a408d31f581a/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
-golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
-golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
-golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
-golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
-golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
-golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
-golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
-golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
-golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
-golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
-golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
-google.golang.org/api v0.252.0 h1:xfKJeAJaMwb8OC9fesr369rjciQ704AjU/psjkKURSI=
-google.golang.org/api v0.252.0/go.mod h1:dnHOv81x5RAmumZ7BWLShB/u7JZNeyalImxHmtTHxqw=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 h1:CirRxTOwnRWVLKzDNrs0CXAaVozJoR4G9xvdRecrdpk=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
-google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
-google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
-google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
-google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
+golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
+golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
+golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
+golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
+golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
+golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
+golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ=
+golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
+golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
+golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
+golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
+golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
+golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
+google.golang.org/api v0.266.0 h1:hco+oNCf9y7DmLeAtHJi/uBAY7n/7XC9mZPxu1ROiyk=
+google.golang.org/api v0.266.0/go.mod h1:Jzc0+ZfLnyvXma3UtaTl023TdhZu6OMBP9tJ+0EmFD0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
+google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
-k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
-k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
-k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
+k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
+k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
+k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
+k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
-k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
diff --git a/web/ui/mantine-ui/src/state/settingsSlice.ts b/web/ui/mantine-ui/src/state/settingsSlice.ts
index 8b4a33bf76..a3e133380a 100644
--- a/web/ui/mantine-ui/src/state/settingsSlice.ts
+++ b/web/ui/mantine-ui/src/state/settingsSlice.ts
@@ -102,7 +102,7 @@ export const initialState: Settings = {
),
showAnnotations: initializeFromLocalStorage(
localStorageKeyShowAnnotations,
- true
+ false
),
showQueryWarnings: initializeFromLocalStorage(
localStorageKeyShowQueryWarnings,
diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json
index 06b75f735c..5208513eab 100644
--- a/web/ui/module/codemirror-promql/package.json
+++ b/web/ui/module/codemirror-promql/package.json
@@ -30,18 +30,18 @@
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
"@prometheus-io/lezer-promql": "0.309.1",
- "lru-cache": "^11.2.2"
+ "lru-cache": "^11.2.5"
},
"devDependencies": {
- "@codemirror/autocomplete": "^6.19.1",
- "@codemirror/language": "^6.11.3",
- "@codemirror/lint": "^6.9.2",
- "@codemirror/state": "^6.5.2",
- "@codemirror/view": "^6.38.6",
- "@lezer/common": "^1.3.0",
+ "@codemirror/autocomplete": "^6.20.0",
+ "@codemirror/language": "^6.12.1",
+ "@codemirror/lint": "^6.9.3",
+ "@codemirror/state": "^6.5.4",
+ "@codemirror/view": "^6.39.12",
+ "@lezer/common": "^1.5.1",
"@lezer/highlight": "^1.2.3",
- "@lezer/lr": "^1.4.3",
- "eslint-plugin-prettier": "^5.5.4",
+ "@lezer/lr": "^1.4.8",
+ "eslint-plugin-prettier": "^5.5.5",
"isomorphic-fetch": "^3.0.0",
"nock": "^14.0.10"
},
diff --git a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts
index d356268d74..3670fffff7 100644
--- a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts
+++ b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts
@@ -39,6 +39,10 @@ export const binOpModifierTerms = [
{ label: 'ignoring', info: 'Ignore specified labels for matching', type: 'keyword' },
{ label: 'group_left', info: 'Allow many-to-one matching', type: 'keyword' },
{ label: 'group_right', info: 'Allow one-to-many matching', type: 'keyword' },
+ { label: 'bool', info: 'Return boolean result (0 or 1) instead of filtering', type: 'keyword' },
+ { label: 'fill', info: 'Fill in missing series on both sides', type: 'keyword' },
+ { label: 'fill_left', info: 'Fill in missing series on the left side', type: 'keyword' },
+ { label: 'fill_right', info: 'Fill in missing series on the right side', type: 'keyword' },
];
export const atModifierTerms = [
diff --git a/web/ui/module/codemirror-promql/src/parser/vector.test.ts b/web/ui/module/codemirror-promql/src/parser/vector.test.ts
index f628206538..c6eeb930ab 100644
--- a/web/ui/module/codemirror-promql/src/parser/vector.test.ts
+++ b/web/ui/module/codemirror-promql/src/parser/vector.test.ts
@@ -15,29 +15,31 @@ import { buildVectorMatching } from './vector';
import { createEditorState } from '../test/utils-test';
import { BinaryExpr } from '@prometheus-io/lezer-promql';
import { syntaxTree } from '@codemirror/language';
-import { VectorMatchCardinality } from '../types';
+import { VectorMatchCardinality, VectorMatching } from '../types';
+
+const noFill = { fill: { lhs: null, rhs: null } };
describe('buildVectorMatching test', () => {
- const testCases = [
+ const testCases: { binaryExpr: string; expectedVectorMatching: VectorMatching }[] = [
{
binaryExpr: 'foo * bar',
- expectedVectorMatching: { card: VectorMatchCardinality.CardOneToOne, matchingLabels: [], on: false, include: [] },
+ expectedVectorMatching: { card: VectorMatchCardinality.CardOneToOne, matchingLabels: [], on: false, include: [], ...noFill },
},
{
binaryExpr: 'foo * sum',
- expectedVectorMatching: { card: VectorMatchCardinality.CardOneToOne, matchingLabels: [], on: false, include: [] },
+ expectedVectorMatching: { card: VectorMatchCardinality.CardOneToOne, matchingLabels: [], on: false, include: [], ...noFill },
},
{
binaryExpr: 'foo == 1',
- expectedVectorMatching: { card: VectorMatchCardinality.CardOneToOne, matchingLabels: [], on: false, include: [] },
+ expectedVectorMatching: { card: VectorMatchCardinality.CardOneToOne, matchingLabels: [], on: false, include: [], ...noFill },
},
{
binaryExpr: 'foo == bool 1',
- expectedVectorMatching: { card: VectorMatchCardinality.CardOneToOne, matchingLabels: [], on: false, include: [] },
+ expectedVectorMatching: { card: VectorMatchCardinality.CardOneToOne, matchingLabels: [], on: false, include: [], ...noFill },
},
{
binaryExpr: '2.5 / bar',
- expectedVectorMatching: { card: VectorMatchCardinality.CardOneToOne, matchingLabels: [], on: false, include: [] },
+ expectedVectorMatching: { card: VectorMatchCardinality.CardOneToOne, matchingLabels: [], on: false, include: [], ...noFill },
},
{
binaryExpr: 'foo and bar',
@@ -46,6 +48,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: [],
on: false,
include: [],
+ ...noFill,
},
},
{
@@ -55,6 +58,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: [],
on: false,
include: [],
+ ...noFill,
},
},
{
@@ -64,6 +68,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: [],
on: false,
include: [],
+ ...noFill,
},
},
{
@@ -75,6 +80,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: [],
on: false,
include: [],
+ ...noFill,
},
},
{
@@ -86,6 +92,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: [],
on: false,
include: [],
+ ...noFill,
},
},
{
@@ -95,6 +102,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: ['test', 'blub'],
on: true,
include: [],
+ ...noFill,
},
},
{
@@ -104,6 +112,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: ['test', 'blub'],
on: true,
include: [],
+ ...noFill,
},
},
{
@@ -113,6 +122,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: ['test', 'blub'],
on: true,
include: [],
+ ...noFill,
},
},
{
@@ -122,6 +132,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: [],
on: true,
include: [],
+ ...noFill,
},
},
{
@@ -131,6 +142,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: ['test', 'blub'],
on: false,
include: [],
+ ...noFill,
},
},
{
@@ -140,6 +152,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: [],
on: false,
include: [],
+ ...noFill,
},
},
{
@@ -149,6 +162,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: ['bar'],
on: true,
include: [],
+ ...noFill,
},
},
{
@@ -158,6 +172,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: ['test', 'blub'],
on: true,
include: ['bar'],
+ ...noFill,
},
},
{
@@ -167,6 +182,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: ['test', 'blub'],
on: false,
include: ['blub'],
+ ...noFill,
},
},
{
@@ -176,6 +192,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: ['test', 'blub'],
on: false,
include: ['bar'],
+ ...noFill,
},
},
{
@@ -185,6 +202,7 @@ describe('buildVectorMatching test', () => {
matchingLabels: ['test', 'blub'],
on: true,
include: ['bar', 'foo'],
+ ...noFill,
},
},
{
@@ -194,6 +212,57 @@ describe('buildVectorMatching test', () => {
matchingLabels: ['test', 'blub'],
on: false,
include: ['bar', 'foo'],
+ ...noFill,
+ },
+ },
+ {
+ binaryExpr: 'foo + fill(23) bar',
+ expectedVectorMatching: {
+ card: VectorMatchCardinality.CardOneToOne,
+ matchingLabels: [],
+ on: false,
+ include: [],
+ fill: { lhs: 23, rhs: 23 },
+ },
+ },
+ {
+ binaryExpr: 'foo + fill_left(23) bar',
+ expectedVectorMatching: {
+ card: VectorMatchCardinality.CardOneToOne,
+ matchingLabels: [],
+ on: false,
+ include: [],
+ fill: { lhs: 23, rhs: null },
+ },
+ },
+ {
+ binaryExpr: 'foo + fill_right(23) bar',
+ expectedVectorMatching: {
+ card: VectorMatchCardinality.CardOneToOne,
+ matchingLabels: [],
+ on: false,
+ include: [],
+ fill: { lhs: null, rhs: 23 },
+ },
+ },
+ {
+ binaryExpr: 'foo + fill_left(23) fill_right(42) bar',
+ expectedVectorMatching: {
+ card: VectorMatchCardinality.CardOneToOne,
+ matchingLabels: [],
+ on: false,
+ include: [],
+ fill: { lhs: 23, rhs: 42 },
+ },
+ },
+ {
+ binaryExpr: 'foo + fill_right(23) fill_left(42) bar',
+ expectedVectorMatching: {
+ card: VectorMatchCardinality.CardOneToOne,
+ matchingLabels: [],
+ on: false,
+ include: [],
+ fill: { lhs: 42, rhs: 23 },
},
},
];
@@ -203,7 +272,7 @@ describe('buildVectorMatching test', () => {
const node = syntaxTree(state).topNode.getChild(BinaryExpr);
expect(node).toBeTruthy();
if (node) {
- expect(value.expectedVectorMatching).toEqual(buildVectorMatching(state, node));
+ expect(buildVectorMatching(state, node)).toEqual(value.expectedVectorMatching);
}
});
});
diff --git a/web/ui/module/codemirror-promql/src/parser/vector.ts b/web/ui/module/codemirror-promql/src/parser/vector.ts
index c47ca1fb76..9fc31bf5c6 100644
--- a/web/ui/module/codemirror-promql/src/parser/vector.ts
+++ b/web/ui/module/codemirror-promql/src/parser/vector.ts
@@ -24,6 +24,11 @@ import {
On,
Or,
Unless,
+ NumberDurationLiteral,
+ FillModifier,
+ FillClause,
+ FillLeftClause,
+ FillRightClause,
} from '@prometheus-io/lezer-promql';
import { VectorMatchCardinality, VectorMatching } from '../types';
import { containsAtLeastOneChild } from './path-finder';
@@ -37,6 +42,10 @@ export function buildVectorMatching(state: EditorState, binaryNode: SyntaxNode):
matchingLabels: [],
on: false,
include: [],
+ fill: {
+ lhs: null,
+ rhs: null,
+ },
};
const modifierClause = binaryNode.getChild(MatchingModifierClause);
if (modifierClause) {
@@ -60,6 +69,32 @@ export function buildVectorMatching(state: EditorState, binaryNode: SyntaxNode):
}
}
+ const fillModifier = binaryNode.getChild(FillModifier);
+ if (fillModifier) {
+ const fill = fillModifier.getChild(FillClause);
+ const fillLeft = fillModifier.getChild(FillLeftClause);
+ const fillRight = fillModifier.getChild(FillRightClause);
+
+ const getFillValue = (node: SyntaxNode) => {
+ const valueNode = node.getChild(NumberDurationLiteral);
+ return valueNode ? parseFloat(state.sliceDoc(valueNode.from, valueNode.to)) : null;
+ };
+
+ if (fill) {
+ const value = getFillValue(fill);
+ result.fill.lhs = value;
+ result.fill.rhs = value;
+ }
+
+ if (fillLeft) {
+ result.fill.lhs = getFillValue(fillLeft);
+ }
+
+ if (fillRight) {
+ result.fill.rhs = getFillValue(fillRight);
+ }
+ }
+
const isSetOperator = containsAtLeastOneChild(binaryNode, And, Or, Unless);
if (isSetOperator && result.card === VectorMatchCardinality.CardOneToOne) {
result.card = VectorMatchCardinality.CardManyToMany;
diff --git a/web/ui/module/codemirror-promql/src/types/vector.ts b/web/ui/module/codemirror-promql/src/types/vector.ts
index 4e7a4f4c45..709b0b76d6 100644
--- a/web/ui/module/codemirror-promql/src/types/vector.ts
+++ b/web/ui/module/codemirror-promql/src/types/vector.ts
@@ -18,6 +18,11 @@ export enum VectorMatchCardinality {
CardManyToMany = 'many-to-many',
}
+export interface FillValues {
+ lhs: number | null;
+ rhs: number | null;
+}
+
export interface VectorMatching {
// The cardinality of the two Vectors.
card: VectorMatchCardinality;
@@ -30,4 +35,6 @@ export interface VectorMatching {
// Include contains additional labels that should be included in
// the result from the side with the lower cardinality.
include: string[];
+ // Fill contains optional fill values for missing elements.
+ fill: FillValues;
}
diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json
index eccae9a163..7a969b57e4 100644
--- a/web/ui/module/lezer-promql/package.json
+++ b/web/ui/module/lezer-promql/package.json
@@ -33,7 +33,7 @@
"devDependencies": {
"@lezer/generator": "^1.8.0",
"@lezer/highlight": "^1.2.3",
- "@lezer/lr": "^1.4.3",
+ "@lezer/lr": "^1.4.8",
"@rollup/plugin-node-resolve": "^16.0.3"
},
"peerDependencies": {
diff --git a/web/ui/module/lezer-promql/src/promql.grammar b/web/ui/module/lezer-promql/src/promql.grammar
index 5fe8d4d025..9308ad01be 100644
--- a/web/ui/module/lezer-promql/src/promql.grammar
+++ b/web/ui/module/lezer-promql/src/promql.grammar
@@ -101,11 +101,30 @@ MatchingModifierClause {
((GroupLeft | GroupRight) (!group GroupingLabels)?)?
}
+FillClause {
+ Fill "(" NumberDurationLiteral ")"
+}
+
+FillLeftClause {
+ FillLeft "(" NumberDurationLiteral ")"
+}
+
+FillRightClause {
+ FillRight "(" NumberDurationLiteral ")"
+}
+
+FillModifier {
+ (FillClause | FillLeftClause | FillRightClause) |
+ (FillLeftClause FillRightClause) |
+ (FillRightClause FillLeftClause)
+}
+
BoolModifier { Bool }
binModifiers {
BoolModifier?
MatchingModifierClause?
+ FillModifier?
}
GroupingLabels {
@@ -366,7 +385,10 @@ NumberDurationLiteralInDurationContext {
Start,
End,
Smoothed,
- Anchored
+ Anchored,
+ Fill,
+ FillLeft,
+ FillRight
}
@external propSource promQLHighLight from "./highlight"
diff --git a/web/ui/module/lezer-promql/src/tokens.js b/web/ui/module/lezer-promql/src/tokens.js
index 523c306ae9..6fd681f1f8 100644
--- a/web/ui/module/lezer-promql/src/tokens.js
+++ b/web/ui/module/lezer-promql/src/tokens.js
@@ -12,82 +12,88 @@
// limitations under the License.
import {
- And,
- Avg,
- Atan2,
- Bool,
- Bottomk,
- By,
- Count,
- CountValues,
- End,
- Group,
- GroupLeft,
- GroupRight,
- Ignoring,
- inf,
- Max,
- Min,
- nan,
- Offset,
- On,
- Or,
- Quantile,
- LimitK,
- LimitRatio,
- Start,
- Stddev,
- Stdvar,
- Sum,
- Topk,
- Unless,
- Without,
- Smoothed,
- Anchored,
-} from './parser.terms.js';
+ And,
+ Avg,
+ Atan2,
+ Bool,
+ Bottomk,
+ By,
+ Count,
+ CountValues,
+ End,
+ Group,
+ GroupLeft,
+ GroupRight,
+ Ignoring,
+ inf,
+ Max,
+ Min,
+ nan,
+ Offset,
+ On,
+ Or,
+ Quantile,
+ LimitK,
+ LimitRatio,
+ Start,
+ Stddev,
+ Stdvar,
+ Sum,
+ Topk,
+ Unless,
+ Without,
+ Smoothed,
+ Anchored,
+ Fill,
+ FillLeft,
+ FillRight,
+} from "./parser.terms.js";
const keywordTokens = {
- inf: inf,
- nan: nan,
- bool: Bool,
- ignoring: Ignoring,
- on: On,
- group_left: GroupLeft,
- group_right: GroupRight,
- offset: Offset,
+ inf: inf,
+ nan: nan,
+ bool: Bool,
+ ignoring: Ignoring,
+ on: On,
+ group_left: GroupLeft,
+ group_right: GroupRight,
+ offset: Offset,
};
export const specializeIdentifier = (value, stack) => {
- return keywordTokens[value.toLowerCase()] || -1;
+ return keywordTokens[value.toLowerCase()] || -1;
};
const contextualKeywordTokens = {
- avg: Avg,
- atan2: Atan2,
- bottomk: Bottomk,
- count: Count,
- count_values: CountValues,
- group: Group,
- max: Max,
- min: Min,
- quantile: Quantile,
- limitk: LimitK,
- limit_ratio: LimitRatio,
- stddev: Stddev,
- stdvar: Stdvar,
- sum: Sum,
- topk: Topk,
- by: By,
- without: Without,
- and: And,
- or: Or,
- unless: Unless,
- start: Start,
- end: End,
- smoothed: Smoothed,
- anchored: Anchored,
+ avg: Avg,
+ atan2: Atan2,
+ bottomk: Bottomk,
+ count: Count,
+ count_values: CountValues,
+ group: Group,
+ max: Max,
+ min: Min,
+ quantile: Quantile,
+ limitk: LimitK,
+ limit_ratio: LimitRatio,
+ stddev: Stddev,
+ stdvar: Stdvar,
+ sum: Sum,
+ topk: Topk,
+ by: By,
+ without: Without,
+ and: And,
+ or: Or,
+ unless: Unless,
+ start: Start,
+ end: End,
+ smoothed: Smoothed,
+ anchored: Anchored,
+ fill: Fill,
+ fill_left: FillLeft,
+ fill_right: FillRight,
};
export const extendIdentifier = (value, stack) => {
- return contextualKeywordTokens[value.toLowerCase()] || -1;
+ return contextualKeywordTokens[value.toLowerCase()] || -1;
};
diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json
index 764fd87820..7669399b66 100644
--- a/web/ui/package-lock.json
+++ b/web/ui/package-lock.json
@@ -13,11 +13,11 @@
],
"devDependencies": {
"@types/jest": "^29.5.14",
- "@typescript-eslint/eslint-plugin": "^8.46.3",
- "@typescript-eslint/parser": "^8.46.3",
+ "@typescript-eslint/eslint-plugin": "^8.54.0",
+ "@typescript-eslint/parser": "^8.54.0",
"eslint-config-prettier": "^10.1.8",
- "prettier": "^3.6.2",
- "ts-jest": "^29.4.5",
+ "prettier": "^3.8.1",
+ "ts-jest": "^29.4.6",
"typescript": "^5.9.3",
"vite": "^6.4.1"
}
@@ -26,57 +26,57 @@
"name": "@prometheus-io/mantine-ui",
"version": "0.309.1",
"dependencies": {
- "@codemirror/autocomplete": "^6.19.1",
- "@codemirror/language": "^6.11.3",
- "@codemirror/lint": "^6.9.2",
- "@codemirror/state": "^6.5.2",
- "@codemirror/view": "^6.38.6",
- "@floating-ui/dom": "^1.7.4",
- "@lezer/common": "^1.3.0",
+ "@codemirror/autocomplete": "^6.20.0",
+ "@codemirror/language": "^6.12.1",
+ "@codemirror/lint": "^6.9.3",
+ "@codemirror/state": "^6.5.4",
+ "@codemirror/view": "^6.39.12",
+ "@floating-ui/dom": "^1.7.5",
+ "@lezer/common": "^1.5.1",
"@lezer/highlight": "^1.2.3",
- "@mantine/code-highlight": "^8.3.6",
- "@mantine/core": "^8.3.6",
- "@mantine/dates": "^8.3.6",
- "@mantine/hooks": "^8.3.6",
- "@mantine/notifications": "^8.3.6",
+ "@mantine/code-highlight": "^8.3.14",
+ "@mantine/core": "^8.3.14",
+ "@mantine/dates": "^8.3.14",
+ "@mantine/hooks": "^8.3.14",
+ "@mantine/notifications": "^8.3.14",
"@microsoft/fetch-event-source": "^2.0.1",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.309.1",
- "@reduxjs/toolkit": "^2.10.1",
- "@tabler/icons-react": "^3.35.0",
- "@tanstack/react-query": "^5.90.7",
+ "@reduxjs/toolkit": "^2.11.2",
+ "@tabler/icons-react": "^3.36.1",
+ "@tanstack/react-query": "^5.90.20",
"@testing-library/jest-dom": "^6.9.1",
- "@testing-library/react": "^16.3.0",
- "@types/lodash": "^4.17.20",
+ "@testing-library/react": "^16.3.2",
+ "@types/lodash": "^4.17.23",
"@types/sanitize-html": "^2.16.0",
- "@uiw/react-codemirror": "^4.25.3",
+ "@uiw/react-codemirror": "^4.25.4",
"clsx": "^2.1.1",
"dayjs": "^1.11.19",
"highlight.js": "^11.11.1",
- "lodash": "^4.17.21",
- "react": "^19.2.0",
- "react-dom": "^19.2.0",
- "react-infinite-scroll-component": "^6.1.0",
+ "lodash": "^4.17.23",
+ "react": "^19.2.4",
+ "react-dom": "^19.2.4",
+ "react-infinite-scroll-component": "^6.1.1",
"react-redux": "^9.2.0",
- "react-router-dom": "^7.9.5",
+ "react-router-dom": "^7.13.0",
"sanitize-html": "^2.17.0",
"uplot": "^1.6.32",
"uplot-react": "^1.2.4",
- "use-query-params": "^2.2.1"
+ "use-query-params": "^2.2.2"
},
"devDependencies": {
"@eslint/compat": "^1.4.1",
- "@eslint/eslintrc": "^3.3.1",
- "@eslint/js": "^9.39.1",
- "@types/react": "^19.2.2",
- "@types/react-dom": "^19.2.2",
- "@typescript-eslint/eslint-plugin": "^8.46.3",
- "@typescript-eslint/parser": "^8.46.3",
+ "@eslint/eslintrc": "^3.3.3",
+ "@eslint/js": "^9.39.2",
+ "@types/react": "^19.2.13",
+ "@types/react-dom": "^19.2.3",
+ "@typescript-eslint/eslint-plugin": "^8.54.0",
+ "@typescript-eslint/parser": "^8.54.0",
"@vitejs/plugin-react": "^4.7.0",
- "eslint": "^9.39.1",
+ "eslint": "^9.39.2",
"eslint-plugin-react-hooks": "^5.2.0",
- "eslint-plugin-react-refresh": "^0.4.24",
+ "eslint-plugin-react-refresh": "^0.5.0",
"globals": "^16.5.0",
"jsdom": "^25.0.1",
"postcss": "^8.5.6",
@@ -86,24 +86,108 @@
"vitest": "^3.2.4"
}
},
+ "mantine-ui/node_modules/@mantine/code-highlight": {
+ "version": "8.3.14",
+ "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-8.3.14.tgz",
+ "integrity": "sha512-7ywMnadaw4O/QG9sQOCIWPZKh6Q97ibyZgkH2cjVNvVbChmZKXIlcHW/QbQJUS84Bs/eGDhnkxwnq78v9w16gQ==",
+ "license": "MIT",
+ "dependencies": {
+ "clsx": "^2.1.1"
+ },
+ "peerDependencies": {
+ "@mantine/core": "8.3.14",
+ "@mantine/hooks": "8.3.14",
+ "react": "^18.x || ^19.x",
+ "react-dom": "^18.x || ^19.x"
+ }
+ },
+ "mantine-ui/node_modules/@mantine/core": {
+ "version": "8.3.14",
+ "resolved": "https://registry.npmjs.org/@mantine/core/-/core-8.3.14.tgz",
+ "integrity": "sha512-ZOxggx65Av1Ii1NrckCuqzluRpmmG+8DyEw24wDom3rmwsPg9UV+0le2QTyI5Eo60LzPfUju1KuEPiUzNABIPg==",
+ "license": "MIT",
+ "dependencies": {
+ "@floating-ui/react": "^0.27.16",
+ "clsx": "^2.1.1",
+ "react-number-format": "^5.4.4",
+ "react-remove-scroll": "^2.7.1",
+ "react-textarea-autosize": "8.5.9",
+ "type-fest": "^4.41.0"
+ },
+ "peerDependencies": {
+ "@mantine/hooks": "8.3.14",
+ "react": "^18.x || ^19.x",
+ "react-dom": "^18.x || ^19.x"
+ }
+ },
+ "mantine-ui/node_modules/@mantine/dates": {
+ "version": "8.3.14",
+ "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-8.3.14.tgz",
+ "integrity": "sha512-NdStRo2ZQ55MoMF5B9vjhpBpHRDHF1XA9Dkb1kKSdNuLlaFXKlvoaZxj/3LfNPpn7Nqlns78nWt4X8/cgC2YIg==",
+ "license": "MIT",
+ "dependencies": {
+ "clsx": "^2.1.1"
+ },
+ "peerDependencies": {
+ "@mantine/core": "8.3.14",
+ "@mantine/hooks": "8.3.14",
+ "dayjs": ">=1.0.0",
+ "react": "^18.x || ^19.x",
+ "react-dom": "^18.x || ^19.x"
+ }
+ },
+ "mantine-ui/node_modules/@mantine/hooks": {
+ "version": "8.3.14",
+ "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-8.3.14.tgz",
+ "integrity": "sha512-0SbHnGEuHcF2QyjzBBcqidpjNmIb6n7TC3obnhkBToYhUTbMcJSK/8ei/yHtAelridJH4CPeohRlQdc0HajHyQ==",
+ "license": "MIT",
+ "peerDependencies": {
+ "react": "^18.x || ^19.x"
+ }
+ },
+ "mantine-ui/node_modules/@mantine/notifications": {
+ "version": "8.3.14",
+ "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-8.3.14.tgz",
+ "integrity": "sha512-+ia97wrcU9Zfv+jXYvgr2GdISqKTHbQE9nnEIZvGUBPAqKr9b2JAsaXQS/RsAdoXUI+kKDEtH2fyVYS7zrSi/Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@mantine/store": "8.3.14",
+ "react-transition-group": "4.4.5"
+ },
+ "peerDependencies": {
+ "@mantine/core": "8.3.14",
+ "@mantine/hooks": "8.3.14",
+ "react": "^18.x || ^19.x",
+ "react-dom": "^18.x || ^19.x"
+ }
+ },
+ "mantine-ui/node_modules/@mantine/store": {
+ "version": "8.3.14",
+ "resolved": "https://registry.npmjs.org/@mantine/store/-/store-8.3.14.tgz",
+ "integrity": "sha512-bgW+fYHDOp7Pk4+lcEm3ZF7dD/sIMKHyR985cOqSHAYJPRcVFb+zcEK/SWoFZqlyA4qh08CNrASOaod8N0XKfA==",
+ "license": "MIT",
+ "peerDependencies": {
+ "react": "^18.x || ^19.x"
+ }
+ },
"module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql",
"version": "0.309.1",
"license": "Apache-2.0",
"dependencies": {
"@prometheus-io/lezer-promql": "0.309.1",
- "lru-cache": "^11.2.2"
+ "lru-cache": "^11.2.5"
},
"devDependencies": {
- "@codemirror/autocomplete": "^6.19.1",
- "@codemirror/language": "^6.11.3",
- "@codemirror/lint": "^6.9.2",
- "@codemirror/state": "^6.5.2",
- "@codemirror/view": "^6.38.6",
- "@lezer/common": "^1.3.0",
+ "@codemirror/autocomplete": "^6.20.0",
+ "@codemirror/language": "^6.12.1",
+ "@codemirror/lint": "^6.9.3",
+ "@codemirror/state": "^6.5.4",
+ "@codemirror/view": "^6.39.12",
+ "@lezer/common": "^1.5.1",
"@lezer/highlight": "^1.2.3",
- "@lezer/lr": "^1.4.3",
- "eslint-plugin-prettier": "^5.5.4",
+ "@lezer/lr": "^1.4.8",
+ "eslint-plugin-prettier": "^5.5.5",
"isomorphic-fetch": "^3.0.0",
"nock": "^14.0.10"
},
@@ -126,7 +210,7 @@
"devDependencies": {
"@lezer/generator": "^1.8.0",
"@lezer/highlight": "^1.2.3",
- "@lezer/lr": "^1.4.3",
+ "@lezer/lr": "^1.4.8",
"@rollup/plugin-node-resolve": "^16.0.3"
},
"peerDependencies": {
@@ -727,9 +811,10 @@
"peer": true
},
"node_modules/@codemirror/autocomplete": {
- "version": "6.19.1",
- "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.19.1.tgz",
- "integrity": "sha512-q6NenYkEy2fn9+JyjIxMWcNjzTL/IhwqfzOut1/G3PrIFkrbl4AL7Wkse5tLrQUUyqGoAKU5+Pi5jnnXxH5HGw==",
+ "version": "6.20.0",
+ "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.20.0.tgz",
+ "integrity": "sha512-bOwvTOIJcG5FVo5gUUupiwYh8MioPLQ4UcqbcRf7UQ98X90tCa9E1kZ3Z7tqwpZxYyOvh1YTYbmZE9RTfTp5hg==",
+ "license": "MIT",
"dependencies": {
"@codemirror/language": "^6.0.0",
"@codemirror/state": "^6.0.0",
@@ -750,23 +835,24 @@
}
},
"node_modules/@codemirror/language": {
- "version": "6.11.3",
- "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.11.3.tgz",
- "integrity": "sha512-9HBM2XnwDj7fnu0551HkGdrUrrqmYq/WC5iv6nbY2WdicXdGbhR/gfbZOH73Aqj4351alY1+aoG9rCNfiwS1RA==",
+ "version": "6.12.1",
+ "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.12.1.tgz",
+ "integrity": "sha512-Fa6xkSiuGKc8XC8Cn96T+TQHYj4ZZ7RdFmXA3i9xe/3hLHfwPZdM+dqfX0Cp0zQklBKhVD8Yzc8LS45rkqcwpQ==",
"license": "MIT",
"dependencies": {
"@codemirror/state": "^6.0.0",
"@codemirror/view": "^6.23.0",
- "@lezer/common": "^1.1.0",
+ "@lezer/common": "^1.5.0",
"@lezer/highlight": "^1.0.0",
"@lezer/lr": "^1.0.0",
"style-mod": "^4.0.0"
}
},
"node_modules/@codemirror/lint": {
- "version": "6.9.2",
- "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.9.2.tgz",
- "integrity": "sha512-sv3DylBiIyi+xKwRCJAAsBZZZWo82shJ/RTMymLabAdtbkV5cSKwWDeCgtUq3v8flTaXS2y1kKkICuRYtUswyQ==",
+ "version": "6.9.3",
+ "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.9.3.tgz",
+ "integrity": "sha512-y3YkYhdnhjDBAe0VIA0c4wVoFOvnp8CnAvfLqi0TqotIv92wIlAAP7HELOpLBsKwjAX6W92rSflA6an/2zBvXw==",
+ "license": "MIT",
"dependencies": {
"@codemirror/state": "^6.0.0",
"@codemirror/view": "^6.35.0",
@@ -785,9 +871,9 @@
}
},
"node_modules/@codemirror/state": {
- "version": "6.5.2",
- "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.5.2.tgz",
- "integrity": "sha512-FVqsPqtPWKVVL3dPSxy8wEF/ymIEuVzF1PK3VbUgrxXpJUSHQWWZz4JMToquRxnkw+36LTamCZG2iua2Ptq0fA==",
+ "version": "6.5.4",
+ "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.5.4.tgz",
+ "integrity": "sha512-8y7xqG/hpB53l25CIoit9/ngxdfoG+fx+V3SHBrinnhOtLvKHRyAJJuHzkWrR4YXXLX8eXBsejgAAxHUOdW1yw==",
"license": "MIT",
"dependencies": {
"@marijn/find-cluster-break": "^1.0.0"
@@ -806,9 +892,10 @@
}
},
"node_modules/@codemirror/view": {
- "version": "6.38.6",
- "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.38.6.tgz",
- "integrity": "sha512-qiS0z1bKs5WOvHIAC0Cybmv4AJSkAXgX5aD6Mqd2epSLlVJsQl8NG23jCVouIgkh4All/mrbdsf2UOLFnJw0tw==",
+ "version": "6.39.12",
+ "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.39.12.tgz",
+ "integrity": "sha512-f+/VsHVn/kOA9lltk/GFzuYwVVAKmOnNjxbrhkk3tPHntFqjWeI2TbIXx006YkBkqC10wZ4NsnWXCQiFPeAISQ==",
+ "license": "MIT",
"dependencies": {
"@codemirror/state": "^6.5.0",
"crelt": "^1.0.6",
@@ -1242,9 +1329,9 @@
}
},
"node_modules/@eslint-community/eslint-utils": {
- "version": "4.9.0",
- "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz",
- "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==",
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz",
+ "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -1274,9 +1361,9 @@
}
},
"node_modules/@eslint-community/regexpp": {
- "version": "4.12.1",
- "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz",
- "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==",
+ "version": "4.12.2",
+ "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz",
+ "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==",
"dev": true,
"license": "MIT",
"engines": {
@@ -1342,9 +1429,9 @@
}
},
"node_modules/@eslint/eslintrc": {
- "version": "3.3.1",
- "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz",
- "integrity": "sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ==",
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.3.tgz",
+ "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -1354,7 +1441,7 @@
"globals": "^14.0.0",
"ignore": "^5.2.0",
"import-fresh": "^3.2.1",
- "js-yaml": "^4.1.0",
+ "js-yaml": "^4.1.1",
"minimatch": "^3.1.2",
"strip-json-comments": "^3.1.1"
},
@@ -1378,10 +1465,11 @@
}
},
"node_modules/@eslint/js": {
- "version": "9.39.1",
- "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.1.tgz",
- "integrity": "sha512-S26Stp4zCy88tH94QbBv3XCuzRQiZ9yXofEILmglYTh/Ug/a9/umqvgFtYBAo3Lp0nsI/5/qH1CCrbdK3AP1Tw==",
+ "version": "9.39.2",
+ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz",
+ "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
@@ -1412,21 +1500,21 @@
}
},
"node_modules/@floating-ui/core": {
- "version": "1.7.3",
- "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.3.tgz",
- "integrity": "sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==",
+ "version": "1.7.4",
+ "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.4.tgz",
+ "integrity": "sha512-C3HlIdsBxszvm5McXlB8PeOEWfBhcGBTZGkGlWc2U0KFY5IwG5OQEuQ8rq52DZmcHDlPLd+YFBK+cZcytwIFWg==",
"license": "MIT",
"dependencies": {
"@floating-ui/utils": "^0.2.10"
}
},
"node_modules/@floating-ui/dom": {
- "version": "1.7.4",
- "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.4.tgz",
- "integrity": "sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA==",
+ "version": "1.7.5",
+ "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.5.tgz",
+ "integrity": "sha512-N0bD2kIPInNHUHehXhMke1rBGs1dwqvC9O9KYMyyjK7iXt7GAhnro7UlcuYcGdS/yYOlq0MAVgrow8IbWJwyqg==",
"license": "MIT",
"dependencies": {
- "@floating-ui/core": "^1.7.3",
+ "@floating-ui/core": "^1.7.4",
"@floating-ui/utils": "^0.2.10"
}
},
@@ -1570,9 +1658,9 @@
}
},
"node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": {
- "version": "3.14.1",
- "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
- "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "version": "3.14.2",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz",
+ "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==",
"dev": true,
"license": "MIT",
"peer": true,
@@ -2043,9 +2131,10 @@
}
},
"node_modules/@lezer/common": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.3.0.tgz",
- "integrity": "sha512-L9X8uHCYU310o99L3/MpJKYxPzXPOS7S0NmBaM7UO/x2Kb2WbmMLSkfvdr1KxRIFYOpbY0Jhn7CfLSUDzL8arQ=="
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.5.1.tgz",
+ "integrity": "sha512-6YRVG9vBkaY7p1IVxL4s44n5nUnaNnGM2/AckNgYOnxTG2kWh1vR8BMxPseWPjRNpb5VtXnMpeYAEAADoRV1Iw==",
+ "license": "MIT"
},
"node_modules/@lezer/generator": {
"version": "1.8.0",
@@ -2070,91 +2159,14 @@
}
},
"node_modules/@lezer/lr": {
- "version": "1.4.3",
- "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.3.tgz",
- "integrity": "sha512-yenN5SqAxAPv/qMnpWW0AT7l+SxVrgG+u0tNsRQWqbrz66HIl8DnEbBObvy21J5K7+I1v7gsAnlE2VQ5yYVSeA==",
+ "version": "1.4.8",
+ "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.8.tgz",
+ "integrity": "sha512-bPWa0Pgx69ylNlMlPvBPryqeLYQjyJjqPx+Aupm5zydLIF3NE+6MMLT8Yi23Bd9cif9VS00aUebn+6fDIGBcDA==",
+ "license": "MIT",
"dependencies": {
"@lezer/common": "^1.0.0"
}
},
- "node_modules/@mantine/code-highlight": {
- "version": "8.3.6",
- "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-8.3.6.tgz",
- "integrity": "sha512-9jPrhchbfNCA73V3hMjXVcCBYL82/UOA9LiEs5LSwxr1q4JYBEBU8znMmVuxZlXA234Ci234AqxGNXdu9f+p4w==",
- "dependencies": {
- "clsx": "^2.1.1"
- },
- "peerDependencies": {
- "@mantine/core": "8.3.6",
- "@mantine/hooks": "8.3.6",
- "react": "^18.x || ^19.x",
- "react-dom": "^18.x || ^19.x"
- }
- },
- "node_modules/@mantine/core": {
- "version": "8.3.6",
- "resolved": "https://registry.npmjs.org/@mantine/core/-/core-8.3.6.tgz",
- "integrity": "sha512-paTl+0x+O/QtgMtqVJaG8maD8sfiOdgPmLOyG485FmeGZ1L3KMdEkhxZtmdGlDFsLXhmMGQ57ducT90bvhXX5A==",
- "dependencies": {
- "@floating-ui/react": "^0.27.16",
- "clsx": "^2.1.1",
- "react-number-format": "^5.4.4",
- "react-remove-scroll": "^2.7.1",
- "react-textarea-autosize": "8.5.9",
- "type-fest": "^4.41.0"
- },
- "peerDependencies": {
- "@mantine/hooks": "8.3.6",
- "react": "^18.x || ^19.x",
- "react-dom": "^18.x || ^19.x"
- }
- },
- "node_modules/@mantine/dates": {
- "version": "8.3.6",
- "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-8.3.6.tgz",
- "integrity": "sha512-lSi1zvyL86SKeePH0J3vOjAR7ZIVNOrZm6ja7jAH6IBdcpQOKH8TXbrcAi5okEStvmvkne7pVaGu0VkdE8KnAw==",
- "dependencies": {
- "clsx": "^2.1.1"
- },
- "peerDependencies": {
- "@mantine/core": "8.3.6",
- "@mantine/hooks": "8.3.6",
- "dayjs": ">=1.0.0",
- "react": "^18.x || ^19.x",
- "react-dom": "^18.x || ^19.x"
- }
- },
- "node_modules/@mantine/hooks": {
- "version": "8.3.6",
- "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-8.3.6.tgz",
- "integrity": "sha512-liHfaWXHAkLjJy+Bkr29UsCwAoDQ/a64WrM67lksx8F0qqyjR5RQH8zVlhuOjdpQnwtlUkE/YiTvbJiPcoI0bw==",
- "peerDependencies": {
- "react": "^18.x || ^19.x"
- }
- },
- "node_modules/@mantine/notifications": {
- "version": "8.3.6",
- "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-8.3.6.tgz",
- "integrity": "sha512-d3A96lyrFOVXtrwASEXALfzooKnnA60T2LclMXFF/4k27Ay5Hwza4D+ylqgxf0RkPfF9J6LhBXk72OjL5RH5Kg==",
- "dependencies": {
- "@mantine/store": "8.3.6",
- "react-transition-group": "4.4.5"
- },
- "peerDependencies": {
- "@mantine/core": "8.3.6",
- "@mantine/hooks": "8.3.6",
- "react": "^18.x || ^19.x",
- "react-dom": "^18.x || ^19.x"
- }
- },
- "node_modules/@mantine/store": {
- "version": "8.3.6",
- "resolved": "https://registry.npmjs.org/@mantine/store/-/store-8.3.6.tgz",
- "integrity": "sha512-fo86wF6nL8RPukY8cseAFQKk+bRVv3Ga/WmHJMYRsCbNleZOEZMXXUf/OVhmr1D3t+xzCzAlJe/sQ8MIS+c+pA==",
- "peerDependencies": {
- "react": "^18.x || ^19.x"
- }
- },
"node_modules/@marijn/find-cluster-break": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@marijn/find-cluster-break/-/find-cluster-break-1.0.2.tgz",
@@ -2199,41 +2211,6 @@
"@nexucis/fuzzy": "^0.5.1"
}
},
- "node_modules/@nodelib/fs.scandir": {
- "version": "2.1.5",
- "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
- "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
- "dev": true,
- "dependencies": {
- "@nodelib/fs.stat": "2.0.5",
- "run-parallel": "^1.1.9"
- },
- "engines": {
- "node": ">= 8"
- }
- },
- "node_modules/@nodelib/fs.stat": {
- "version": "2.0.5",
- "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
- "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
- "dev": true,
- "engines": {
- "node": ">= 8"
- }
- },
- "node_modules/@nodelib/fs.walk": {
- "version": "1.2.8",
- "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
- "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
- "dev": true,
- "dependencies": {
- "@nodelib/fs.scandir": "2.1.5",
- "fastq": "^1.6.0"
- },
- "engines": {
- "node": ">= 8"
- }
- },
"node_modules/@open-draft/deferred-promise": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/@open-draft/deferred-promise/-/deferred-promise-2.2.0.tgz",
@@ -2285,13 +2262,14 @@
"link": true
},
"node_modules/@reduxjs/toolkit": {
- "version": "2.10.1",
- "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.10.1.tgz",
- "integrity": "sha512-/U17EXQ9Do9Yx4DlNGU6eVNfZvFJfYpUtRRdLf19PbPjdWBxNlxGZXywQZ1p1Nz8nMkWplTI7iD/23m07nolDA==",
+ "version": "2.11.2",
+ "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.11.2.tgz",
+ "integrity": "sha512-Kd6kAHTA6/nUpp8mySPqj3en3dm0tdMIgbttnQ1xFMVpufoj+ADi8pXLBsd4xzTRHQa7t/Jv8W5UnCuW4kuWMQ==",
+ "license": "MIT",
"dependencies": {
"@standard-schema/spec": "^1.0.0",
"@standard-schema/utils": "^0.3.0",
- "immer": "^10.2.0",
+ "immer": "^11.0.0",
"redux": "^5.0.1",
"redux-thunk": "^3.1.0",
"reselect": "^5.1.0"
@@ -2688,12 +2666,12 @@
}
},
"node_modules/@tabler/icons-react": {
- "version": "3.35.0",
- "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-3.35.0.tgz",
- "integrity": "sha512-XG7t2DYf3DyHT5jxFNp5xyLVbL4hMJYJhiSdHADzAjLRYfL7AnjlRfiHDHeXxkb2N103rEIvTsBRazxXtAUz2g==",
+ "version": "3.36.1",
+ "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-3.36.1.tgz",
+ "integrity": "sha512-/8nOXeNeMoze9xY/QyEKG65wuvRhkT3q9aytaur6Gj8bYU2A98YVJyLc9MRmc5nVvpy+bRlrrwK/Ykr8WGyUWg==",
"license": "MIT",
"dependencies": {
- "@tabler/icons": "3.35.0"
+ "@tabler/icons": ""
},
"funding": {
"type": "github",
@@ -2704,20 +2682,22 @@
}
},
"node_modules/@tanstack/query-core": {
- "version": "5.90.7",
- "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.90.7.tgz",
- "integrity": "sha512-6PN65csiuTNfBMXqQUxQhCNdtm1rV+9kC9YwWAIKcaxAauq3Wu7p18j3gQY3YIBJU70jT/wzCCZ2uqto/vQgiQ==",
+ "version": "5.90.20",
+ "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.90.20.tgz",
+ "integrity": "sha512-OMD2HLpNouXEfZJWcKeVKUgQ5n+n3A2JFmBaScpNDUqSrQSjiveC7dKMe53uJUg1nDG16ttFPz2xfilz6i2uVg==",
+ "license": "MIT",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/tannerlinsley"
}
},
"node_modules/@tanstack/react-query": {
- "version": "5.90.7",
- "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.90.7.tgz",
- "integrity": "sha512-wAHc/cgKzW7LZNFloThyHnV/AX9gTg3w5yAv0gvQHPZoCnepwqCMtzbuPbb2UvfvO32XZ46e8bPOYbfZhzVnnQ==",
+ "version": "5.90.20",
+ "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.90.20.tgz",
+ "integrity": "sha512-vXBxa+qeyveVO7OA0jX1z+DeyCA4JKnThKv411jd5SORpBKgkcVnYKCiBgECvADvniBX7tobwBmg01qq9JmMJw==",
+ "license": "MIT",
"dependencies": {
- "@tanstack/query-core": "5.90.7"
+ "@tanstack/query-core": "5.90.20"
},
"funding": {
"type": "github",
@@ -2772,9 +2752,9 @@
"license": "MIT"
},
"node_modules/@testing-library/react": {
- "version": "16.3.0",
- "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.0.tgz",
- "integrity": "sha512-kFSyxiEDwv1WLl2fgsq6pPBbw5aWKrsY2/noi1Id0TK0UParSF62oFQFGHXIyaG4pp2tEub/Zlel+fjjZILDsw==",
+ "version": "16.3.2",
+ "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.2.tgz",
+ "integrity": "sha512-XU5/SytQM+ykqMnAnvB2umaJNIOsLF3PVv//1Ew4CTcpz0/BRyy/af40qqrt7SjKpDdT1saBMc42CUok5gaw+g==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.12.5"
@@ -2963,9 +2943,9 @@
"dev": true
},
"node_modules/@types/lodash": {
- "version": "4.17.20",
- "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.20.tgz",
- "integrity": "sha512-H3MHACvFUEiujabxhaI/ImO6gUrd8oOurg7LQtS7mbwIXA/cUqWrvBsaeJ23aZEPk1TAYkurjfMbSELfoCXlGA==",
+ "version": "4.17.23",
+ "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.23.tgz",
+ "integrity": "sha512-RDvF6wTulMPjrNdCoYRC8gNR880JNGT8uB+REUpC2Ns4pRqQJhGz90wh7rgdXDPpCczF3VGktDuFGVnz8zP7HA==",
"license": "MIT"
},
"node_modules/@types/node": {
@@ -2978,19 +2958,21 @@
}
},
"node_modules/@types/react": {
- "version": "19.2.2",
- "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.2.tgz",
- "integrity": "sha512-6mDvHUFSjyT2B2yeNx2nUgMxh9LtOWvkhIU3uePn2I2oyNymUAX1NIsdgviM4CH+JSrp2D2hsMvJOkxY+0wNRA==",
+ "version": "19.2.13",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.13.tgz",
+ "integrity": "sha512-KkiJeU6VbYbUOp5ITMIc7kBfqlYkKA5KhEHVrGMmUUMt7NeaZg65ojdPk+FtNrBAOXNVM5QM72jnADjM+XVRAQ==",
"devOptional": true,
+ "license": "MIT",
"dependencies": {
- "csstype": "^3.0.2"
+ "csstype": "^3.2.2"
}
},
"node_modules/@types/react-dom": {
- "version": "19.2.2",
- "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.2.tgz",
- "integrity": "sha512-9KQPoO6mZCi7jcIStSnlOWn2nEF3mNmyr3rIAsGnAbQKYbRLyqmeSc39EVgtxXVia+LMT8j3knZLAZAh+xLmrw==",
+ "version": "19.2.3",
+ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz",
+ "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==",
"devOptional": true,
+ "license": "MIT",
"peerDependencies": {
"@types/react": "^19.2.0"
}
@@ -3042,20 +3024,20 @@
"license": "MIT"
},
"node_modules/@typescript-eslint/eslint-plugin": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.46.3.tgz",
- "integrity": "sha512-sbaQ27XBUopBkRiuY/P9sWGOWUW4rl8fDoHIUmLpZd8uldsTyB4/Zg6bWTegPoTLnKj9Hqgn3QD6cjPNB32Odw==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.54.0.tgz",
+ "integrity": "sha512-hAAP5io/7csFStuOmR782YmTthKBJ9ND3WVL60hcOjvtGFb+HJxH4O5huAcmcZ9v9G8P+JETiZ/G1B8MALnWZQ==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@eslint-community/regexpp": "^4.10.0",
- "@typescript-eslint/scope-manager": "8.46.3",
- "@typescript-eslint/type-utils": "8.46.3",
- "@typescript-eslint/utils": "8.46.3",
- "@typescript-eslint/visitor-keys": "8.46.3",
- "graphemer": "^1.4.0",
- "ignore": "^7.0.0",
+ "@eslint-community/regexpp": "^4.12.2",
+ "@typescript-eslint/scope-manager": "8.54.0",
+ "@typescript-eslint/type-utils": "8.54.0",
+ "@typescript-eslint/utils": "8.54.0",
+ "@typescript-eslint/visitor-keys": "8.54.0",
+ "ignore": "^7.0.5",
"natural-compare": "^1.4.0",
- "ts-api-utils": "^2.1.0"
+ "ts-api-utils": "^2.4.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3065,7 +3047,7 @@
"url": "https://opencollective.com/typescript-eslint"
},
"peerDependencies": {
- "@typescript-eslint/parser": "^8.46.3",
+ "@typescript-eslint/parser": "^8.54.0",
"eslint": "^8.57.0 || ^9.0.0",
"typescript": ">=4.8.4 <6.0.0"
}
@@ -3081,16 +3063,17 @@
}
},
"node_modules/@typescript-eslint/parser": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.46.3.tgz",
- "integrity": "sha512-6m1I5RmHBGTnUGS113G04DMu3CpSdxCAU/UvtjNWL4Nuf3MW9tQhiJqRlHzChIkhy6kZSAQmc+I1bcGjE3yNKg==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.54.0.tgz",
+ "integrity": "sha512-BtE0k6cjwjLZoZixN0t5AKP0kSzlGu7FctRXYuPAm//aaiZhmfq1JwdYpYr1brzEspYyFeF+8XF5j2VK6oalrA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/scope-manager": "8.46.3",
- "@typescript-eslint/types": "8.46.3",
- "@typescript-eslint/typescript-estree": "8.46.3",
- "@typescript-eslint/visitor-keys": "8.46.3",
- "debug": "^4.3.4"
+ "@typescript-eslint/scope-manager": "8.54.0",
+ "@typescript-eslint/types": "8.54.0",
+ "@typescript-eslint/typescript-estree": "8.54.0",
+ "@typescript-eslint/visitor-keys": "8.54.0",
+ "debug": "^4.4.3"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3105,14 +3088,15 @@
}
},
"node_modules/@typescript-eslint/project-service": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.46.3.tgz",
- "integrity": "sha512-Fz8yFXsp2wDFeUElO88S9n4w1I4CWDTXDqDr9gYvZgUpwXQqmZBr9+NTTql5R3J7+hrJZPdpiWaB9VNhAKYLuQ==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.54.0.tgz",
+ "integrity": "sha512-YPf+rvJ1s7MyiWM4uTRhE4DvBXrEV+d8oC3P9Y2eT7S+HBS0clybdMIPnhiATi9vZOYDc7OQ1L/i6ga6NFYK/g==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/tsconfig-utils": "^8.46.3",
- "@typescript-eslint/types": "^8.46.3",
- "debug": "^4.3.4"
+ "@typescript-eslint/tsconfig-utils": "^8.54.0",
+ "@typescript-eslint/types": "^8.54.0",
+ "debug": "^4.4.3"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3126,13 +3110,14 @@
}
},
"node_modules/@typescript-eslint/scope-manager": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.46.3.tgz",
- "integrity": "sha512-FCi7Y1zgrmxp3DfWfr+3m9ansUUFoy8dkEdeQSgA9gbm8DaHYvZCdkFRQrtKiedFf3Ha6VmoqoAaP68+i+22kg==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.54.0.tgz",
+ "integrity": "sha512-27rYVQku26j/PbHYcVfRPonmOlVI6gihHtXFbTdB5sb6qA0wdAQAbyXFVarQ5t4HRojIz64IV90YtsjQSSGlQg==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "8.46.3",
- "@typescript-eslint/visitor-keys": "8.46.3"
+ "@typescript-eslint/types": "8.54.0",
+ "@typescript-eslint/visitor-keys": "8.54.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3143,10 +3128,11 @@
}
},
"node_modules/@typescript-eslint/tsconfig-utils": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.46.3.tgz",
- "integrity": "sha512-GLupljMniHNIROP0zE7nCcybptolcH8QZfXOpCfhQDAdwJ/ZTlcaBOYebSOZotpti/3HrHSw7D3PZm75gYFsOA==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.54.0.tgz",
+ "integrity": "sha512-dRgOyT2hPk/JwxNMZDsIXDgyl9axdJI3ogZ2XWhBPsnZUv+hPesa5iuhdYt2gzwA9t8RE5ytOJ6xB0moV0Ujvw==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
@@ -3159,16 +3145,17 @@
}
},
"node_modules/@typescript-eslint/type-utils": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.46.3.tgz",
- "integrity": "sha512-ZPCADbr+qfz3aiTTYNNkCbUt+cjNwI/5McyANNrFBpVxPt7GqpEYz5ZfdwuFyGUnJ9FdDXbGODUu6iRCI6XRXw==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.54.0.tgz",
+ "integrity": "sha512-hiLguxJWHjjwL6xMBwD903ciAwd7DmK30Y9Axs/etOkftC3ZNN9K44IuRD/EB08amu+Zw6W37x9RecLkOo3pMA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "8.46.3",
- "@typescript-eslint/typescript-estree": "8.46.3",
- "@typescript-eslint/utils": "8.46.3",
- "debug": "^4.3.4",
- "ts-api-utils": "^2.1.0"
+ "@typescript-eslint/types": "8.54.0",
+ "@typescript-eslint/typescript-estree": "8.54.0",
+ "@typescript-eslint/utils": "8.54.0",
+ "debug": "^4.4.3",
+ "ts-api-utils": "^2.4.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3183,10 +3170,11 @@
}
},
"node_modules/@typescript-eslint/types": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.46.3.tgz",
- "integrity": "sha512-G7Ok9WN/ggW7e/tOf8TQYMaxgID3Iujn231hfi0Pc7ZheztIJVpO44ekY00b7akqc6nZcvregk0Jpah3kep6hA==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.54.0.tgz",
+ "integrity": "sha512-PDUI9R1BVjqu7AUDsRBbKMtwmjWcn4J3le+5LpcFgWULN3LvHC5rkc9gCVxbrsrGmO1jfPybN5s6h4Jy+OnkAA==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
@@ -3196,21 +3184,21 @@
}
},
"node_modules/@typescript-eslint/typescript-estree": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.46.3.tgz",
- "integrity": "sha512-f/NvtRjOm80BtNM5OQtlaBdM5BRFUv7gf381j9wygDNL+qOYSNOgtQ/DCndiYi80iIOv76QqaTmp4fa9hwI0OA==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.54.0.tgz",
+ "integrity": "sha512-BUwcskRaPvTk6fzVWgDPdUndLjB87KYDrN5EYGetnktoeAvPtO4ONHlAZDnj5VFnUANg0Sjm7j4usBlnoVMHwA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/project-service": "8.46.3",
- "@typescript-eslint/tsconfig-utils": "8.46.3",
- "@typescript-eslint/types": "8.46.3",
- "@typescript-eslint/visitor-keys": "8.46.3",
- "debug": "^4.3.4",
- "fast-glob": "^3.3.2",
- "is-glob": "^4.0.3",
- "minimatch": "^9.0.4",
- "semver": "^7.6.0",
- "ts-api-utils": "^2.1.0"
+ "@typescript-eslint/project-service": "8.54.0",
+ "@typescript-eslint/tsconfig-utils": "8.54.0",
+ "@typescript-eslint/types": "8.54.0",
+ "@typescript-eslint/visitor-keys": "8.54.0",
+ "debug": "^4.4.3",
+ "minimatch": "^9.0.5",
+ "semver": "^7.7.3",
+ "tinyglobby": "^0.2.15",
+ "ts-api-utils": "^2.4.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3228,6 +3216,7 @@
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0"
}
@@ -3237,6 +3226,7 @@
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
"integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
"dev": true,
+ "license": "ISC",
"dependencies": {
"brace-expansion": "^2.0.1"
},
@@ -3248,15 +3238,16 @@
}
},
"node_modules/@typescript-eslint/utils": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.46.3.tgz",
- "integrity": "sha512-VXw7qmdkucEx9WkmR3ld/u6VhRyKeiF1uxWwCy/iuNfokjJ7VhsgLSOTjsol8BunSw190zABzpwdNsze2Kpo4g==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.54.0.tgz",
+ "integrity": "sha512-9Cnda8GS57AQakvRyG0PTejJNlA2xhvyNtEVIMlDWOOeEyBkYWhGPnfrIAnqxLMTSTo6q8g12XVjjev5l1NvMA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@eslint-community/eslint-utils": "^4.7.0",
- "@typescript-eslint/scope-manager": "8.46.3",
- "@typescript-eslint/types": "8.46.3",
- "@typescript-eslint/typescript-estree": "8.46.3"
+ "@eslint-community/eslint-utils": "^4.9.1",
+ "@typescript-eslint/scope-manager": "8.54.0",
+ "@typescript-eslint/types": "8.54.0",
+ "@typescript-eslint/typescript-estree": "8.54.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -3271,12 +3262,13 @@
}
},
"node_modules/@typescript-eslint/visitor-keys": {
- "version": "8.46.3",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.46.3.tgz",
- "integrity": "sha512-uk574k8IU0rOF/AjniX8qbLSGURJVUCeM5e4MIMKBFFi8weeiLrG1fyQejyLXQpRZbU/1BuQasleV/RfHC3hHg==",
+ "version": "8.54.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.54.0.tgz",
+ "integrity": "sha512-VFlhGSl4opC0bprJiItPQ1RfUhGDIBokcPwaFH4yiBCaNPeld/9VeXbiPO1cLyorQi1G1vL+ecBk1x8o1axORA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "8.46.3",
+ "@typescript-eslint/types": "8.54.0",
"eslint-visitor-keys": "^4.2.1"
},
"engines": {
@@ -3288,9 +3280,10 @@
}
},
"node_modules/@uiw/codemirror-extensions-basic-setup": {
- "version": "4.25.3",
- "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.25.3.tgz",
- "integrity": "sha512-F1doRyD50CWScwGHG2bBUtUpwnOv/zqSnzkZqJcX5YAHQx6Z1CuX8jdnFMH6qktRrPU1tfpNYftTWu3QIoHiMA==",
+ "version": "4.25.4",
+ "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.25.4.tgz",
+ "integrity": "sha512-YzNwkm0AbPv1EXhCHYR5v0nqfemG2jEB0Z3Att4rBYqKrlG7AA9Rhjc3IyBaOzsBu18wtrp9/+uhTyu7TXSRng==",
+ "license": "MIT",
"dependencies": {
"@codemirror/autocomplete": "^6.0.0",
"@codemirror/commands": "^6.0.0",
@@ -3314,15 +3307,16 @@
}
},
"node_modules/@uiw/react-codemirror": {
- "version": "4.25.3",
- "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.25.3.tgz",
- "integrity": "sha512-1wtBZTXPIp8u6F/xjHvsUAYlEeF5Dic4xZBnqJyLzv7o7GjGYEUfSz9Z7bo9aK9GAx2uojG/AuBMfhA4uhvIVQ==",
+ "version": "4.25.4",
+ "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.25.4.tgz",
+ "integrity": "sha512-ipO067oyfUw+DVaXhQCxkB0ZD9b7RnY+ByrprSYSKCHaULvJ3sqWYC/Zen6zVQ8/XC4o5EPBfatGiX20kC7XGA==",
+ "license": "MIT",
"dependencies": {
"@babel/runtime": "^7.18.6",
"@codemirror/commands": "^6.1.0",
"@codemirror/state": "^6.1.1",
"@codemirror/theme-one-dark": "^6.0.0",
- "@uiw/codemirror-extensions-basic-setup": "4.25.3",
+ "@uiw/codemirror-extensions-basic-setup": "4.25.4",
"codemirror": "^6.0.0"
},
"funding": {
@@ -3782,9 +3776,9 @@
}
},
"node_modules/brace-expansion": {
- "version": "1.1.11",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
- "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "version": "1.1.12",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
+ "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -3881,6 +3875,20 @@
"node": ">=8"
}
},
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
@@ -4117,11 +4125,16 @@
"license": "MIT"
},
"node_modules/cookie": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.0.2.tgz",
- "integrity": "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA==",
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz",
+ "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==",
+ "license": "MIT",
"engines": {
"node": ">=18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
}
},
"node_modules/create-jest": {
@@ -4200,9 +4213,9 @@
}
},
"node_modules/csstype": {
- "version": "3.1.3",
- "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz",
- "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==",
+ "version": "3.2.3",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz",
+ "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==",
"license": "MIT"
},
"node_modules/data-urls": {
@@ -4407,6 +4420,21 @@
"url": "https://github.com/fb55/domutils?sponsor=1"
}
},
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/electron-to-chromium": {
"version": "1.5.228",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.228.tgz",
@@ -4459,6 +4487,26 @@
"is-arrayish": "^0.2.1"
}
},
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/es-module-lexer": {
"version": "1.7.0",
"resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz",
@@ -4466,6 +4514,35 @@
"dev": true,
"license": "MIT"
},
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-set-tostringtag": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
+ "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.6",
+ "has-tostringtag": "^1.0.2",
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/esbuild": {
"version": "0.25.0",
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.0.tgz",
@@ -4530,10 +4607,11 @@
}
},
"node_modules/eslint": {
- "version": "9.39.1",
- "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.1.tgz",
- "integrity": "sha512-BhHmn2yNOFA9H9JmmIVKJmd288g9hrVRDkdoIgRCRuSySRUHH7r/DI6aAXW9T1WwUuY3DFgrcaqB+deURBLR5g==",
+ "version": "9.39.2",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.2.tgz",
+ "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"@eslint-community/eslint-utils": "^4.8.0",
"@eslint-community/regexpp": "^4.12.1",
@@ -4541,7 +4619,7 @@
"@eslint/config-helpers": "^0.4.2",
"@eslint/core": "^0.17.0",
"@eslint/eslintrc": "^3.3.1",
- "@eslint/js": "9.39.1",
+ "@eslint/js": "9.39.2",
"@eslint/plugin-kit": "^0.4.1",
"@humanfs/node": "^0.16.6",
"@humanwhocodes/module-importer": "^1.0.1",
@@ -4605,14 +4683,14 @@
}
},
"node_modules/eslint-plugin-prettier": {
- "version": "5.5.4",
- "resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-5.5.4.tgz",
- "integrity": "sha512-swNtI95SToIz05YINMA6Ox5R057IMAmWZ26GqPxusAp1TZzj+IdY9tXNWWD3vkF/wEqydCONcwjTFpxybBqZsg==",
+ "version": "5.5.5",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-5.5.5.tgz",
+ "integrity": "sha512-hscXkbqUZ2sPithAuLm5MXL+Wph+U7wHngPBv9OMWwlP8iaflyxpjTYZkmdgB4/vPIhemRlBEoLrH7UC1n7aUw==",
"dev": true,
"license": "MIT",
"dependencies": {
- "prettier-linter-helpers": "^1.0.0",
- "synckit": "^0.11.7"
+ "prettier-linter-helpers": "^1.0.1",
+ "synckit": "^0.11.12"
},
"engines": {
"node": "^14.18.0 || >=16.0.0"
@@ -4649,12 +4727,13 @@
}
},
"node_modules/eslint-plugin-react-refresh": {
- "version": "0.4.24",
- "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.24.tgz",
- "integrity": "sha512-nLHIW7TEq3aLrEYWpVaJ1dRgFR+wLDPN8e8FpYAql/bMV2oBEfC37K0gLEGgv9fy66juNShSMV8OkTqzltcG/w==",
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.5.0.tgz",
+ "integrity": "sha512-ZYvmh7VfVgqR/7wR71I3Zl6hK/C5CcxdWYKZSpHawS5JCNgE4efhQWg/+/WPpgGAp9Ngp/rRZYyaIwmPQBq/lA==",
"dev": true,
+ "license": "MIT",
"peerDependencies": {
- "eslint": ">=8.40"
+ "eslint": ">=9"
}
},
"node_modules/eslint-scope": {
@@ -4852,34 +4931,6 @@
"dev": true,
"license": "Apache-2.0"
},
- "node_modules/fast-glob": {
- "version": "3.3.3",
- "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz",
- "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==",
- "dev": true,
- "dependencies": {
- "@nodelib/fs.stat": "^2.0.2",
- "@nodelib/fs.walk": "^1.2.3",
- "glob-parent": "^5.1.2",
- "merge2": "^1.3.0",
- "micromatch": "^4.0.8"
- },
- "engines": {
- "node": ">=8.6.0"
- }
- },
- "node_modules/fast-glob/node_modules/glob-parent": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
- "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
- "dev": true,
- "dependencies": {
- "is-glob": "^4.0.1"
- },
- "engines": {
- "node": ">= 6"
- }
- },
"node_modules/fast-json-stable-stringify": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
@@ -4894,15 +4945,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/fastq": {
- "version": "1.19.1",
- "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz",
- "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==",
- "dev": true,
- "dependencies": {
- "reusify": "^1.0.4"
- }
- },
"node_modules/fb-watchman": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz",
@@ -4976,14 +5018,16 @@
"dev": true
},
"node_modules/form-data": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
- "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
+ "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
"dev": true,
"license": "MIT",
"dependencies": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
+ "es-set-tostringtag": "^2.1.0",
+ "hasown": "^2.0.2",
"mime-types": "^2.1.12"
},
"engines": {
@@ -5044,6 +5088,31 @@
"node": "6.* || 8.* || >= 10.*"
}
},
+ "node_modules/get-intrinsic": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
"node_modules/get-nonce": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz",
@@ -5063,6 +5132,20 @@
"node": ">=8.0.0"
}
},
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/get-stream": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
@@ -5125,6 +5208,19 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
"node_modules/graceful-fs": {
"version": "4.2.11",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
@@ -5132,13 +5228,6 @@
"dev": true,
"license": "ISC"
},
- "node_modules/graphemer": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz",
- "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/handlebars": {
"version": "4.7.8",
"resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz",
@@ -5170,6 +5259,35 @@
"node": ">=8"
}
},
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-tostringtag": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
+ "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "has-symbols": "^1.0.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
"node_modules/hasown": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
@@ -5295,9 +5413,10 @@
}
},
"node_modules/immer": {
- "version": "10.2.0",
- "resolved": "https://registry.npmjs.org/immer/-/immer-10.2.0.tgz",
- "integrity": "sha512-d/+XTN3zfODyjr89gM3mPq1WNX2B8pYsu7eORitdwyA2sBubnTl3laYlBk4sXY5FUa5qTZGBDPJICVbvqzjlbw==",
+ "version": "11.1.3",
+ "resolved": "https://registry.npmjs.org/immer/-/immer-11.1.3.tgz",
+ "integrity": "sha512-6jQTc5z0KJFtr1UgFpIL3N9XSC3saRaI9PwWtzM2pSqkNGtiNkYY2OSwkOGDK2XcTRcLb1pi/aNkKZz0nxVH4Q==",
+ "license": "MIT",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/immer"
@@ -6544,9 +6663,9 @@
"license": "MIT"
},
"node_modules/js-yaml": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
- "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
+ "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -6726,9 +6845,9 @@
}
},
"node_modules/lodash": {
- "version": "4.17.21",
- "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
- "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
+ "version": "4.17.23",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
+ "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
"license": "MIT"
},
"node_modules/lodash.memoize": {
@@ -6764,10 +6883,10 @@
"license": "MIT"
},
"node_modules/lru-cache": {
- "version": "11.2.2",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.2.tgz",
- "integrity": "sha512-F9ODfyqML2coTIsQpSkRHnLSZMtkU8Q+mSfcaIyKwy58u+8k5nvAYeiNhsyMARvzNcXJ9QfWVrcPsC9e9rAxtg==",
- "license": "ISC",
+ "version": "11.2.5",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.5.tgz",
+ "integrity": "sha512-vFrFJkWtJvJnD5hg+hJvVE8Lh/TcMzKnTgCWmtBipwI5yLX/iX+5UB2tfuyODF5E7k9xEzMdYgGqaSb1c0c5Yw==",
+ "license": "BlueOak-1.0.0",
"engines": {
"node": "20 || >=22"
}
@@ -6827,6 +6946,16 @@
"tmpl": "1.0.5"
}
},
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/merge-stream": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
@@ -6835,15 +6964,6 @@
"license": "MIT",
"peer": true
},
- "node_modules/merge2": {
- "version": "1.4.1",
- "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
- "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
- "dev": true,
- "engines": {
- "node": ">= 8"
- }
- },
"node_modules/micromatch": {
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
@@ -7551,9 +7671,9 @@
}
},
"node_modules/prettier": {
- "version": "3.7.4",
- "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.7.4.tgz",
- "integrity": "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==",
+ "version": "3.8.1",
+ "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.8.1.tgz",
+ "integrity": "sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg==",
"dev": true,
"license": "MIT",
"bin": {
@@ -7567,9 +7687,9 @@
}
},
"node_modules/prettier-linter-helpers": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/prettier-linter-helpers/-/prettier-linter-helpers-1.0.0.tgz",
- "integrity": "sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==",
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/prettier-linter-helpers/-/prettier-linter-helpers-1.0.1.tgz",
+ "integrity": "sha512-SxToR7P8Y2lWmv/kTzVLC1t/GDI2WGjMwNhLLE9qtH8Q13C+aEmuRlzDst4Up4s0Wc8sF2M+J57iB3cMLqftfg==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -7675,49 +7795,31 @@
"license": "MIT",
"peer": true
},
- "node_modules/queue-microtask": {
- "version": "1.2.3",
- "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
- "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
- "dev": true,
- "funding": [
- {
- "type": "github",
- "url": "https://github.com/sponsors/feross"
- },
- {
- "type": "patreon",
- "url": "https://www.patreon.com/feross"
- },
- {
- "type": "consulting",
- "url": "https://feross.org/support"
- }
- ]
- },
"node_modules/react": {
- "version": "19.2.0",
- "resolved": "https://registry.npmjs.org/react/-/react-19.2.0.tgz",
- "integrity": "sha512-tmbWg6W31tQLeB5cdIBOicJDJRR2KzXsV7uSK9iNfLWQ5bIZfxuPEHp7M8wiHyHnn0DD1i7w3Zmin0FtkrwoCQ==",
+ "version": "19.2.4",
+ "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz",
+ "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==",
+ "license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/react-dom": {
- "version": "19.2.0",
- "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.0.tgz",
- "integrity": "sha512-UlbRu4cAiGaIewkPyiRGJk0imDN2T3JjieT6spoL2UeSf5od4n5LB/mQ4ejmxhCFT1tYe8IvaFulzynWovsEFQ==",
+ "version": "19.2.4",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz",
+ "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==",
+ "license": "MIT",
"dependencies": {
"scheduler": "^0.27.0"
},
"peerDependencies": {
- "react": "^19.2.0"
+ "react": "^19.2.4"
}
},
"node_modules/react-infinite-scroll-component": {
- "version": "6.1.0",
- "resolved": "https://registry.npmjs.org/react-infinite-scroll-component/-/react-infinite-scroll-component-6.1.0.tgz",
- "integrity": "sha512-SQu5nCqy8DxQWpnUVLx7V7b7LcA37aM7tvoWjTLZp1dk6EJibM5/4EJKzOnl07/BsM1Y40sKLuqjCwwH/xV0TQ==",
+ "version": "6.1.1",
+ "resolved": "https://registry.npmjs.org/react-infinite-scroll-component/-/react-infinite-scroll-component-6.1.1.tgz",
+ "integrity": "sha512-R8YoOyiNDynSWmfVme5LHslsKrP+/xcRUWR2ies8UgUab9dtyw5ECnMCVPPmnmjjF4MWQmfVdRwRWcWaDgeyMA==",
"license": "MIT",
"dependencies": {
"throttle-debounce": "^2.1.0"
@@ -7823,9 +7925,10 @@
}
},
"node_modules/react-router": {
- "version": "7.9.5",
- "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.9.5.tgz",
- "integrity": "sha512-JmxqrnBZ6E9hWmf02jzNn9Jm3UqyeimyiwzD69NjxGySG6lIz/1LVPsoTCwN7NBX2XjCEa1LIX5EMz1j2b6u6A==",
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.13.0.tgz",
+ "integrity": "sha512-PZgus8ETambRT17BUm/LL8lX3Of+oiLaPuVTRH3l1eLvSPpKO3AvhAEb5N7ihAFZQrYDqkvvWfFh9p0z9VsjLw==",
+ "license": "MIT",
"dependencies": {
"cookie": "^1.0.1",
"set-cookie-parser": "^2.6.0"
@@ -7844,11 +7947,12 @@
}
},
"node_modules/react-router-dom": {
- "version": "7.9.5",
- "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.9.5.tgz",
- "integrity": "sha512-mkEmq/K8tKN63Ae2M7Xgz3c9l9YNbY+NHH6NNeUmLA3kDkhKXRsNb/ZpxaEunvGo2/3YXdk5EJU3Hxp3ocaBPw==",
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.13.0.tgz",
+ "integrity": "sha512-5CO/l5Yahi2SKC6rGZ+HDEjpjkGaG/ncEP7eWFTvFxbHP8yeeI0PxTDjimtpXYlR3b3i9/WIL4VJttPrESIf2g==",
+ "license": "MIT",
"dependencies": {
- "react-router": "7.9.5"
+ "react-router": "7.13.0"
},
"engines": {
"node": ">=20.0.0"
@@ -8026,16 +8130,6 @@
"node": ">=10"
}
},
- "node_modules/reusify": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz",
- "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==",
- "dev": true,
- "engines": {
- "iojs": ">=1.0.0",
- "node": ">=0.10.0"
- }
- },
"node_modules/rollup": {
"version": "4.34.9",
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.34.9.tgz",
@@ -8082,29 +8176,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/run-parallel": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
- "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
- "dev": true,
- "funding": [
- {
- "type": "github",
- "url": "https://github.com/sponsors/feross"
- },
- {
- "type": "patreon",
- "url": "https://www.patreon.com/feross"
- },
- {
- "type": "consulting",
- "url": "https://feross.org/support"
- }
- ],
- "dependencies": {
- "queue-microtask": "^1.2.2"
- }
- },
"node_modules/safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
@@ -8157,15 +8228,16 @@
}
},
"node_modules/serialize-query-params": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/serialize-query-params/-/serialize-query-params-2.0.2.tgz",
- "integrity": "sha512-1chMo1dST4pFA9RDXAtF0Rbjaut4is7bzFbI1Z26IuMub68pNCILku85aYmeFhvnY//BXUPUhoRMjYcsT93J/Q==",
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/serialize-query-params/-/serialize-query-params-2.0.4.tgz",
+ "integrity": "sha512-y9WzzDj3BsGgKLCh0ugiinufS//YqOfao/yVJjkXA4VLuyNCfHOLU/cbulGPxs3aeCqhvROw7qPL04JSZnCo0w==",
"license": "ISC"
},
"node_modules/set-cookie-parser": {
"version": "2.7.2",
"resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.2.tgz",
- "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw=="
+ "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==",
+ "license": "MIT"
},
"node_modules/shebang-command": {
"version": "2.0.0",
@@ -8479,9 +8551,9 @@
"license": "MIT"
},
"node_modules/synckit": {
- "version": "0.11.11",
- "resolved": "https://registry.npmjs.org/synckit/-/synckit-0.11.11.tgz",
- "integrity": "sha512-MeQTA1r0litLUf0Rp/iisCaL8761lKAZHaimlbGK4j0HysC4PLfqygQj9srcs0m2RdtDYnF8UuYyKpbjHYp7Jw==",
+ "version": "0.11.12",
+ "resolved": "https://registry.npmjs.org/synckit/-/synckit-0.11.12.tgz",
+ "integrity": "sha512-Bh7QjT8/SuKUIfObSXNHNSK6WHo6J1tHCqJsuaFDP7gP0fkzSfTxI8y85JrppZ0h8l0maIgc2tfuZQ6/t3GtnQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -8681,10 +8753,11 @@
}
},
"node_modules/ts-api-utils": {
- "version": "2.1.0",
- "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz",
- "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==",
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.4.0.tgz",
+ "integrity": "sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=18.12"
},
@@ -8948,12 +9021,12 @@
}
},
"node_modules/use-query-params": {
- "version": "2.2.1",
- "resolved": "https://registry.npmjs.org/use-query-params/-/use-query-params-2.2.1.tgz",
- "integrity": "sha512-i6alcyLB8w9i3ZK3caNftdb+UnbfBRNPDnc89CNQWkGRmDrm/gfydHvMBfVsQJRq3NoHOM2dt/ceBWG2397v1Q==",
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/use-query-params/-/use-query-params-2.2.2.tgz",
+ "integrity": "sha512-OwGab8u8/x2xZp9uSyBsx0kXlkR9IR436zbygsYVGikPYY3OJosvve6IJVGwIJPcfyb/YHwvPrUNu65/JR++Kw==",
"license": "ISC",
"dependencies": {
- "serialize-query-params": "^2.0.2"
+ "serialize-query-params": "^2.0.3"
},
"peerDependencies": {
"@reach/router": "^1.2.1",
diff --git a/web/ui/package.json b/web/ui/package.json
index e634652b41..172e646aeb 100644
--- a/web/ui/package.json
+++ b/web/ui/package.json
@@ -16,11 +16,11 @@
],
"devDependencies": {
"@types/jest": "^29.5.14",
- "@typescript-eslint/eslint-plugin": "^8.46.3",
- "@typescript-eslint/parser": "^8.46.3",
+ "@typescript-eslint/eslint-plugin": "^8.54.0",
+ "@typescript-eslint/parser": "^8.54.0",
"eslint-config-prettier": "^10.1.8",
- "prettier": "^3.6.2",
- "ts-jest": "^29.4.5",
+ "prettier": "^3.8.1",
+ "ts-jest": "^29.4.6",
"typescript": "^5.9.3",
"vite": "^6.4.1"
}
diff --git a/web/ui/react-app/src/globals.ts b/web/ui/react-app/src/globals.ts
index d2a5f1d50a..7a59bdbffd 100644
--- a/web/ui/react-app/src/globals.ts
+++ b/web/ui/react-app/src/globals.ts
@@ -1,6 +1,5 @@
import jquery from 'jquery';
+import moment from 'moment';
-// eslint-disable-next-line @typescript-eslint/no-explicit-any
-(window as any).jQuery = jquery;
-// eslint-disable-next-line @typescript-eslint/no-explicit-any
-(window as any).moment = require('moment');
+window.jQuery = jquery;
+window.moment = moment;
diff --git a/web/ui/react-app/src/pages/graph/HistogramChart.test.tsx b/web/ui/react-app/src/pages/graph/HistogramChart.test.tsx
index 27018c50ca..e9529282b1 100644
--- a/web/ui/react-app/src/pages/graph/HistogramChart.test.tsx
+++ b/web/ui/react-app/src/pages/graph/HistogramChart.test.tsx
@@ -68,7 +68,6 @@ describe('HistogramChart', () => {
scale: 'linear' as 'linear' | 'exponential',
};
-
beforeEach(() => {
mockFormat.mockClear();
mockResolvedOptions.mockClear();
@@ -163,7 +162,9 @@ describe('HistogramChart', () => {
describe('Exponential Scale', () => {
beforeEach(() => {
- wrapper = mount( );
+ wrapper = mount(
+
+ );
});
it('renders the correct number of buckets', () => {
@@ -225,17 +226,24 @@ describe('HistogramChart', () => {
expect(b4.find('.histogram-bucket').prop('style')).toHaveProperty('height', `${b4Height}%`);
expect(parseFloat(b4.prop('style')?.left as string)).toBeGreaterThan(0);
expect(parseFloat(b4.prop('style')?.width as string)).toBeGreaterThan(0);
- expect(parseFloat(b4.prop('style')?.left as string) + parseFloat(b4.prop('style')?.width as string)).toBeLessThanOrEqual(100.01);
+ expect(
+ parseFloat(b4.prop('style')?.left as string) + parseFloat(b4.prop('style')?.width as string)
+ ).toBeLessThanOrEqual(100.01);
});
it('handles zero-crossing bucket correctly in exponential scale', () => {
- wrapper = mount( );
+ wrapper = mount(
+
+ );
const buckets = wrapper.find('.histogram-bucket-slot');
const countMax = 15;
const b2 = buckets.at(1);
const b2Height = (5 / countMax) * 100;
- expect(b2.find('.histogram-bucket').prop('style')).toHaveProperty('height', expect.stringContaining(b2Height.toFixed(1)));
+ expect(b2.find('.histogram-bucket').prop('style')).toHaveProperty(
+ 'height',
+ expect.stringContaining(b2Height.toFixed(1))
+ );
expect(parseFloat(b2.prop('style')?.left as string)).toBeGreaterThanOrEqual(0);
expect(parseFloat(b2.prop('style')?.width as string)).toBeGreaterThan(0);
});
diff --git a/web/ui/react-app/src/pages/graph/HistorgramHelpers.test.tsx b/web/ui/react-app/src/pages/graph/HistorgramHelpers.test.tsx
index ea70a17d08..480fb3716f 100644
--- a/web/ui/react-app/src/pages/graph/HistorgramHelpers.test.tsx
+++ b/web/ui/react-app/src/pages/graph/HistorgramHelpers.test.tsx
@@ -37,34 +37,31 @@ describe('HistogramHelpers', () => {
];
const bucketsStartingWithZeroCross: Bucket[] = [
- [0, '-1', '1', '5'],
- [0, '1', '10', '20'],
- [0, '10', '100', '8'],
+ [0, '-1', '1', '5'],
+ [0, '1', '10', '20'],
+ [0, '10', '100', '8'],
];
- const bucketsEndingWithZeroCross: Bucket[] = [
- [0, '-100', '-10', '10'],
- [0, '-10', '-1', '15'],
- [0, '-1', '1', '5'],
+ const bucketsEndingWithZeroCross: Bucket[] = [
+ [0, '-100', '-10', '10'],
+ [0, '-10', '-1', '15'],
+ [0, '-1', '1', '5'],
];
- const singleZeroBucket: Bucket[] = [
- [0, '0', '0', '10'],
- ];
+ const singleZeroBucket: Bucket[] = [[0, '0', '0', '10']];
- const emptyBuckets: Bucket[] = [];
+ const emptyBuckets: Bucket[] = [];
- const bucketsWithZeroFallback: Bucket[] = [
- [0, '1', '10', '5'],
- [0, '10', '100', '15'],
- [0, '0', '0', '2']
- ];
-
- const bucketsNegThenPosNoCross: Bucket[] = [
- [0, '-10', '-1', '15'],
- [0, '5', '10', '20'],
- ];
+ const bucketsWithZeroFallback: Bucket[] = [
+ [0, '1', '10', '5'],
+ [0, '10', '100', '15'],
+ [0, '0', '0', '2'],
+ ];
+ const bucketsNegThenPosNoCross: Bucket[] = [
+ [0, '-10', '-1', '15'],
+ [0, '5', '10', '20'],
+ ];
describe('calculateDefaultExpBucketWidth', () => {
it('calculates width for a standard positive bucket', () => {
@@ -75,29 +72,30 @@ describe('HistogramHelpers', () => {
it('calculates width for a standard negative bucket', () => {
const lastBucket = bucketsAllNegative[bucketsAllNegative.length - 1];
- const expectedAbs = Math.abs(Math.log(Math.abs(parseFloat(lastBucket[2]))) - Math.log(Math.abs(parseFloat(lastBucket[1]))));
+ const expectedAbs = Math.abs(
+ Math.log(Math.abs(parseFloat(lastBucket[2]))) - Math.log(Math.abs(parseFloat(lastBucket[1])))
+ );
expect(calculateDefaultExpBucketWidth(lastBucket, bucketsAllNegative)).toBeCloseTo(expectedAbs);
});
it('uses the previous bucket if the last bucket is [0, 0]', () => {
- const lastBucket = bucketsWithZeroFallback[bucketsWithZeroFallback.length - 1];
- const expected = Math.log(100) - Math.log(10);
- expect(calculateDefaultExpBucketWidth(lastBucket, bucketsWithZeroFallback)).toBeCloseTo(expected);
+ const lastBucket = bucketsWithZeroFallback[bucketsWithZeroFallback.length - 1];
+ const expected = Math.log(100) - Math.log(10);
+ expect(calculateDefaultExpBucketWidth(lastBucket, bucketsWithZeroFallback)).toBeCloseTo(expected);
});
it('throws an error if only a single [0, 0] bucket exists', () => {
- const lastBucket = singleZeroBucket[0];
- expect(() => calculateDefaultExpBucketWidth(lastBucket, singleZeroBucket)).toThrow(
- 'Only one bucket in histogram ([-0, 0]). Cannot calculate defaultExpBucketWidth.'
- );
+ const lastBucket = singleZeroBucket[0];
+ expect(() => calculateDefaultExpBucketWidth(lastBucket, singleZeroBucket)).toThrow(
+ 'Only one bucket in histogram ([-0, 0]). Cannot calculate defaultExpBucketWidth.'
+ );
});
});
-
describe('findMinPositive', () => {
- it('returns the first positive left bound when all are positive', () => {
- expect(findMinPositive(bucketsAllPositive)).toEqual(1);
- });
+ it('returns the first positive left bound when all are positive', () => {
+ expect(findMinPositive(bucketsAllPositive)).toEqual(1);
+ });
it('returns the left bound when it is the first positive value', () => {
expect(findMinPositive(bucketsNegThenPosNoCross)).toBe(5);
@@ -108,43 +106,42 @@ describe('HistogramHelpers', () => {
});
it('returns the right bound when the first bucket crosses zero', () => {
- expect(findMinPositive(bucketsStartingWithZeroCross)).toBe(1);
+ expect(findMinPositive(bucketsStartingWithZeroCross)).toBe(1);
});
it('returns the right bound when the last bucket crosses zero', () => {
expect(findMinPositive(bucketsEndingWithZeroCross)).toBe(1);
});
- it('returns 0 when all buckets are negative', () => {
- expect(findMinPositive(bucketsAllNegative)).toBe(0);
- });
+ it('returns 0 when all buckets are negative', () => {
+ expect(findMinPositive(bucketsAllNegative)).toBe(0);
+ });
it('returns 0 for empty buckets', () => {
expect(findMinPositive(emptyBuckets)).toBe(0);
});
- it('returns 0 for only zero bucket', () => {
- expect(findMinPositive(singleZeroBucket)).toBe(0);
- });
+ it('returns 0 for only zero bucket', () => {
+ expect(findMinPositive(singleZeroBucket)).toBe(0);
+ });
it('returns 0 when buckets is undefined', () => {
expect(findMinPositive(undefined as any)).toBe(0);
});
- it('returns the correct positive bound with exact zero bucket present', () => {
- expect(findMinPositive(bucketsWithExactZeroBucket)).toBe(1);
- });
+ it('returns the correct positive bound with exact zero bucket present', () => {
+ expect(findMinPositive(bucketsWithExactZeroBucket)).toBe(1);
+ });
});
-
describe('findMaxNegative', () => {
- it('returns 0 when all buckets are positive', () => {
- expect(findMaxNegative(bucketsAllPositive)).toBe(0);
- });
+ it('returns 0 when all buckets are positive', () => {
+ expect(findMaxNegative(bucketsAllPositive)).toBe(0);
+ });
- it('returns the right bound of the last negative bucket when all are negative', () => {
- expect(findMaxNegative(bucketsAllNegative)).toEqual(-1);
- });
+ it('returns the right bound of the last negative bucket when all are negative', () => {
+ expect(findMaxNegative(bucketsAllNegative)).toEqual(-1);
+ });
it('returns the right bound of the bucket before the middle zero-crossing bucket', () => {
expect(findMaxNegative(bucketsCrossingZeroMid)).toEqual(-1);
@@ -155,7 +152,7 @@ describe('HistogramHelpers', () => {
});
it('returns the right bound of the bucket before the last zero-crossing bucket', () => {
- expect(findMaxNegative(bucketsEndingWithZeroCross)).toEqual(-1);
+ expect(findMaxNegative(bucketsEndingWithZeroCross)).toEqual(-1);
});
it('returns 0 for empty buckets', () => {
@@ -171,23 +168,28 @@ describe('HistogramHelpers', () => {
});
it('returns the right bound of the bucket before an exact zero bucket', () => {
- expect(findMaxNegative(bucketsWithExactZeroBucket)).toEqual(-1);
+ expect(findMaxNegative(bucketsWithExactZeroBucket)).toEqual(-1);
});
});
-
describe('findZeroBucket', () => {
it('returns the index of bucket strictly containing zero', () => {
expect(findZeroBucket(bucketsCrossingZeroMid)).toBe(2);
});
it('returns the index of bucket with zero as left boundary', () => {
- const buckets: Bucket[] = [[0, '-5','-1', '10'], [0, '0', '5', '15']];
+ const buckets: Bucket[] = [
+ [0, '-5', '-1', '10'],
+ [0, '0', '5', '15'],
+ ];
expect(findZeroBucket(buckets)).toBe(1);
});
it('returns the index of bucket with zero as right boundary', () => {
- const buckets: Bucket[] = [[0, '-5', '0', '10'], [0, '1', '5', '15']];
+ const buckets: Bucket[] = [
+ [0, '-5', '0', '10'],
+ [0, '1', '5', '15'],
+ ];
expect(findZeroBucket(buckets)).toBe(0);
});
@@ -208,49 +210,51 @@ describe('HistogramHelpers', () => {
});
it('returns 0 if the first bucket crosses zero', () => {
- expect(findZeroBucket(bucketsStartingWithZeroCross)).toBe(0);
+ expect(findZeroBucket(bucketsStartingWithZeroCross)).toBe(0);
});
- it('returns the last index if the last bucket crosses zero', () => {
- expect(findZeroBucket(bucketsEndingWithZeroCross)).toBe(2);
- });
+ it('returns the last index if the last bucket crosses zero', () => {
+ expect(findZeroBucket(bucketsEndingWithZeroCross)).toBe(2);
+ });
it('returns -1 when buckets array is empty', () => {
expect(findZeroBucket(emptyBuckets)).toBe(-1);
});
});
-
describe('findZeroAxisLeft', () => {
it('calculates correctly for linear scale crossing zero', () => {
- const rangeMin = -100; const rangeMax = 100;
+ const rangeMin = -100;
+ const rangeMax = 100;
const expected = '50%';
const result = findZeroAxisLeft('linear', rangeMin, rangeMax, 1, -1, 2, 0, 0, 0);
expect(result).toEqual(expected);
});
- it('calculates correctly for asymmetric linear scale crossing zero', () => {
- const rangeMin = -10; const rangeMax = 90;
- const expectedNumber = ((0 - rangeMin) / (rangeMax - rangeMin)) * 100;
- const resultString = findZeroAxisLeft('linear', rangeMin, rangeMax, 1, -1, 0, 0, 0, 0);
- expect(parseFloat(resultString)).toBeCloseTo(expectedNumber, 1);
- });
+ it('calculates correctly for asymmetric linear scale crossing zero', () => {
+ const rangeMin = -10;
+ const rangeMax = 90;
+ const expectedNumber = ((0 - rangeMin) / (rangeMax - rangeMin)) * 100;
+ const resultString = findZeroAxisLeft('linear', rangeMin, rangeMax, 1, -1, 0, 0, 0, 0);
+ expect(parseFloat(resultString)).toBeCloseTo(expectedNumber, 1);
+ });
it('calculates correctly for linear scale all positive (off-scale left)', () => {
- const rangeMin = 10; const rangeMax = 100;
+ const rangeMin = 10;
+ const rangeMax = 100;
const expectedNumber = ((0 - rangeMin) / (rangeMax - rangeMin)) * 100;
const resultString = findZeroAxisLeft('linear', rangeMin, rangeMax, 10, 0, -1, 0, 0, 0);
expect(parseFloat(resultString)).toBeCloseTo(expectedNumber, 1);
});
it('calculates correctly for linear scale all negative (off-scale right)', () => {
- const rangeMin = -100; const rangeMax = -10;
+ const rangeMin = -100;
+ const rangeMax = -10;
const expectedNumber = ((0 - rangeMin) / (rangeMax - rangeMin)) * 100;
const resultString = findZeroAxisLeft('linear', rangeMin, rangeMax, 0, -10, -1, 0, 0, 0);
expect(parseFloat(resultString)).toBeCloseTo(expectedNumber, 1);
});
-
const expMinPos = 1;
const expMaxNeg = -1;
const expZeroIdx = 2;
@@ -264,22 +268,46 @@ describe('HistogramHelpers', () => {
});
it('returns 100% for exponential scale when minPositive is 0', () => {
- expect(findZeroAxisLeft('exponential', -100, -1, 0, -1, -1, expNegWidth, expNegWidth + defaultExpBW, defaultExpBW)).toEqual('100%');
+ expect(
+ findZeroAxisLeft('exponential', -100, -1, 0, -1, -1, expNegWidth, expNegWidth + defaultExpBW, defaultExpBW)
+ ).toEqual('100%');
});
it('calculates position between buckets when zeroBucketIdx is -1 (exponential)', () => {
- const minPos = 5; const maxNeg = -1; const zeroIdx = -1;
+ const minPos = 5;
+ const maxNeg = -1;
+ const zeroIdx = -1;
const negW = Math.log(Math.abs(-1)) - Math.log(Math.abs(-10));
const posW = Math.log(10) - Math.log(5);
const totalW = Math.abs(negW) + posW + defaultExpBW;
const expectedNumber = (Math.abs(negW) / totalW) * 100;
- const resultString = findZeroAxisLeft('exponential', -10, 10, minPos, maxNeg, zeroIdx, Math.abs(negW), totalW, defaultExpBW);
+ const resultString = findZeroAxisLeft(
+ 'exponential',
+ -10,
+ 10,
+ minPos,
+ maxNeg,
+ zeroIdx,
+ Math.abs(negW),
+ totalW,
+ defaultExpBW
+ );
expect(parseFloat(resultString)).toBeCloseTo(expectedNumber, 1);
});
it('calculates position using bucket width when zeroBucketIdx exists (exponential)', () => {
const expectedNumber = ((expNegWidth + 0.5 * defaultExpBW) / expTotalWidth) * 100;
- const resultString = findZeroAxisLeft('exponential', -100, 100, expMinPos, expMaxNeg, expZeroIdx, expNegWidth, expTotalWidth, defaultExpBW);
+ const resultString = findZeroAxisLeft(
+ 'exponential',
+ -100,
+ 100,
+ expMinPos,
+ expMaxNeg,
+ expZeroIdx,
+ expNegWidth,
+ expTotalWidth,
+ defaultExpBW
+ );
expect(parseFloat(resultString)).toBeCloseTo(expectedNumber, 1);
});
@@ -288,7 +316,6 @@ describe('HistogramHelpers', () => {
});
});
-
describe('showZeroAxis', () => {
it('returns true when axis is between 5% and 95%', () => {
expect(showZeroAxis('5.01%')).toBe(true);
@@ -308,4 +335,4 @@ describe('HistogramHelpers', () => {
expect(showZeroAxis('120%')).toBe(false);
});
});
-});
\ No newline at end of file
+});
diff --git a/web/ui/react-app/src/types/index.d.ts b/web/ui/react-app/src/types/index.d.ts
index addf1cc702..9cf8fbd7cc 100644
--- a/web/ui/react-app/src/types/index.d.ts
+++ b/web/ui/react-app/src/types/index.d.ts
@@ -68,3 +68,8 @@ interface JQueryStatic {
scale: () => Color;
};
}
+
+interface Window {
+ jQuery: JQueryStatic;
+ moment: typeof import('moment');
+}
diff --git a/web/ui/react-app/src/utils/utils.test.ts b/web/ui/react-app/src/utils/utils.test.ts
index 93174df87b..61fcd733ab 100644
--- a/web/ui/react-app/src/utils/utils.test.ts
+++ b/web/ui/react-app/src/utils/utils.test.ts
@@ -333,13 +333,13 @@ describe('Utils', () => {
expect(parsePrometheusFloat('-1.7e+01')).toEqual(-17);
});
});
- describe('createExpressionLink',()=>{
- it('<....>builds link',()=>{
+ describe('createExpressionLink', () => {
+ it('<....>builds link', () => {
expect(createExpressionLink('up')).toEqual(
`../graph?g0.expr=up&g0.tab=1&g0.display_mode=${GraphDisplayMode.Lines}&g0.show_exemplars=0&g0.range_input=1h`
);
});
- it('url-encodes PromQL',() =>{
+ it('url-encodes PromQL', () => {
expect(createExpressionLink('ALERTS{alertname="HighCPU"}')).toEqual(
`../graph?g0.expr=ALERTS%7Balertname%3D%22High%20CPU%22%7D&g0.tab=1&g0.display_mode=${GraphDisplayMode.Lines}&g0.show_exemplars=0&g0.range_input=1h`
);
diff --git a/web/web.go b/web/web.go
index afe78e4255..583492abc9 100644
--- a/web/web.go
+++ b/web/web.go
@@ -36,6 +36,7 @@ import (
"time"
"github.com/alecthomas/units"
+ "github.com/felixge/fgprof"
"github.com/grafana/regexp"
"github.com/mwitkow/go-conntrack"
remoteapi "github.com/prometheus/client_golang/exp/api/remote"
@@ -53,6 +54,7 @@ import (
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/promql"
+ "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
@@ -112,6 +114,8 @@ const (
Stopping
)
+var fgprofHandler = fgprof.Handler()
+
// withStackTracer logs the stack trace in case the request panics. The function
// will re-raise the error which will then be handled by the net/http package.
// It is needed because the go-kit log package doesn't manage properly the
@@ -304,6 +308,9 @@ type Options struct {
Gatherer prometheus.Gatherer
Registerer prometheus.Registerer
FeatureRegistry features.Collector
+
+ // Parser is the PromQL parser used for parsing query expressions.
+ Parser parser.Parser
}
// New initializes a new web Handler.
@@ -311,6 +318,9 @@ func New(logger *slog.Logger, o *Options) *Handler {
if logger == nil {
logger = promslog.NewNopLogger()
}
+ if o.Parser == nil {
+ o.Parser = parser.NewParser(parser.Options{})
+ }
m := newMetrics(o.Registerer)
router := route.New().
@@ -356,12 +366,20 @@ func New(logger *slog.Logger, o *Options) *Handler {
factoryAr := func(context.Context) api_v1.AlertmanagerRetriever { return h.notifier }
FactoryRr := func(context.Context) api_v1.RulesRetriever { return h.ruleManager }
- var app storage.Appendable
+ var (
+ app storage.Appendable
+ appV2 storage.AppendableV2
+ )
if o.EnableRemoteWriteReceiver || o.EnableOTLPWriteReceiver {
- app = h.storage
+ app, appV2 = h.storage, h.storage
}
- h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, app, h.exemplarStorage, factorySPr, factoryTr, factoryAr,
+ version := ""
+ if o.Version != nil {
+ version = o.Version.Version
+ }
+
+ h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, app, appV2, h.exemplarStorage, factorySPr, factoryTr, factoryAr,
func() config.Config {
h.mtx.RLock()
defer h.mtx.RUnlock()
@@ -402,6 +420,11 @@ func New(logger *slog.Logger, o *Options) *Handler {
o.AppendMetadata,
nil,
o.FeatureRegistry,
+ api_v1.OpenAPIOptions{
+ ExternalURL: o.ExternalURL.String(),
+ Version: version,
+ },
+ o.Parser,
)
if r := o.FeatureRegistry; r != nil {
@@ -418,6 +441,8 @@ func New(logger *slog.Logger, o *Options) *Handler {
r.Enable(features.API, "time_range_series") // start/end parameters for /series endpoint.
r.Enable(features.API, "time_range_labels") // start/end parameters for /labels endpoints.
r.Enable(features.API, "exclude_alerts") // exclude_alerts parameter for /rules endpoint.
+ r.Enable(features.API, "openapi_3.1") // OpenAPI 3.1 specification support.
+ r.Enable(features.API, "openapi_3.2") // OpenAPI 3.2 specification support.
r.Set(features.UI, "ui_v3", !o.UseOldUI)
r.Set(features.UI, "ui_v2", o.UseOldUI)
}
@@ -604,6 +629,8 @@ func serveDebug(w http.ResponseWriter, req *http.Request) {
pprof.Symbol(w, req)
case "trace":
pprof.Trace(w, req)
+ case "fgprof":
+ fgprofHandler.ServeHTTP(w, req)
default:
req.URL.Path = "/debug/pprof/" + subpath
pprof.Index(w, req)
@@ -634,8 +661,8 @@ func (h *Handler) testReady(f http.HandlerFunc) http.HandlerFunc {
case Ready:
f(w, r)
case NotReady:
- w.WriteHeader(http.StatusServiceUnavailable)
w.Header().Set("X-Prometheus-Stopping", "false")
+ w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintf(w, "Service Unavailable")
case Stopping:
w.Header().Set("X-Prometheus-Stopping", "true")
diff --git a/web/web_test.go b/web/web_test.go
index ae7d532f1f..cbcf15ffdc 100644
--- a/web/web_test.go
+++ b/web/web_test.go
@@ -140,11 +140,32 @@ func TestReadyAndHealthy(t *testing.T) {
resp, err = http.Get(u)
require.NoError(t, err)
require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
+ require.Equal(t, "false", resp.Header.Get("X-Prometheus-Stopping"))
cleanupTestResponse(t, resp)
resp, err = http.Head(u)
require.NoError(t, err)
require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
+ require.Equal(t, "false", resp.Header.Get("X-Prometheus-Stopping"))
+ cleanupTestResponse(t, resp)
+ }
+
+ // Set to stopping
+ webHandler.SetReady(Stopping)
+
+ for _, u := range []string{
+ baseURL + "/-/ready",
+ } {
+ resp, err = http.Get(u)
+ require.NoError(t, err)
+ require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
+ require.Equal(t, "true", resp.Header.Get("X-Prometheus-Stopping"))
+ cleanupTestResponse(t, resp)
+
+ resp, err = http.Head(u)
+ require.NoError(t, err)
+ require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
+ require.Equal(t, "true", resp.Header.Get("X-Prometheus-Stopping"))
cleanupTestResponse(t, resp)
}
@@ -307,6 +328,7 @@ func TestDebugHandler(t *testing.T) {
Host: "localhost.localdomain:9090",
Scheme: "http",
},
+ Version: &PrometheusVersion{},
}
handler := New(nil, opts)
handler.SetReady(Ready)
@@ -332,6 +354,7 @@ func TestHTTPMetrics(t *testing.T) {
Host: "localhost.localdomain:9090",
Scheme: "http",
},
+ Version: &PrometheusVersion{},
})
getReady := func() int {
t.Helper()