diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..57a311a --- /dev/null +++ b/Dockerfile @@ -0,0 +1,18 @@ +FROM golang:1.20 as builder +WORKDIR /sidecar +COPY . . + +RUN apt-get update && apt-get install gettext-base +RUN go install github.com/client9/misspell/cmd/misspell@v0.3.4 \ + && go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.1 \ + && go install github.com/google/addlicense@v1.0.0 +RUN apt update && apt install -y make +RUN make build-collector + +FROM alpine:3 +RUN apk add --no-cache ca-certificates +COPY --from=builder /sidecar/bin/rungmpcol /rungmpcol +COPY collector-config.yaml /etc/rungmp/config.yaml + +ENTRYPOINT ["/rungmpcol"] +CMD ["--config", "/etc/rungmp/config.yaml"] \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..5b3f3af --- /dev/null +++ b/Makefile @@ -0,0 +1,148 @@ +# read PKG_VERSION from VERSION file +include VERSION + +# if GOOS is not supplied, set default value based on user's system, will be overridden for OS specific packaging commands +GOOS ?= $(shell go env GOOS) + +ALL_SRC := $(shell find . -name '*.go' -type f | sort) +ALL_DOC := $(shell find . \( -name "*.md" -o -name "*.yaml" \) -type f | sort) +GIT_SHA := $(shell git rev-parse --short HEAD) + +BUILD_INFO_IMPORT_PATH := collector/internal/version +BUILD_X1 := -X $(BUILD_INFO_IMPORT_PATH).GitHash=$(GIT_SHA) +BUILD_X2 := -X $(BUILD_INFO_IMPORT_PATH).Version=$(PKG_VERSION) +LD_FLAGS := -ldflags "${BUILD_X1} ${BUILD_X2}" + +TOOLS_DIR := internal/tools + +.EXPORT_ALL_VARIABLES: + +.DEFAULT_GOAL := presubmit + +# -------------------------- +# Helper Commands +# -------------------------- + +.PHONY: update-components-old +update-components-old: + grep -o github.com/open-telemetry/opentelemetry-collector-contrib/[[:lower:]]*/[[:lower:]]* go.mod | xargs -I '{}' go get {} + go mod tidy + cd $(TOOLS_DIR) && go get -u github.com/open-telemetry/opentelemetry-collector-contrib/cmd/mdatagen + cd $(TOOLS_DIR) && go mod tidy + +OTEL_VER ?= latest +.PHONY: update-components +update-components: + go list -m -f '{{if not (or .Indirect .Main)}}{{.Path}}{{end}}' all | \ + grep "^go.opentelemetry.io" | \ + grep -v "go.opentelemetry.io/collector/featuregate" | \ + grep -v "go.opentelemetry.io/collector/pdata" | \ + xargs -t -I '{}' go get {}@$(OTEL_VER) + go list -m -f '{{if not (or .Indirect .Main)}}{{.Path}}{{end}}' all | \ + grep "^github.com/open-telemetry/opentelemetry-collector-contrib" | \ + xargs -t -I '{}' go get {}@$(OTEL_VER) + go mod tidy + cd $(TOOLS_DIR) && go get -u github.com/open-telemetry/opentelemetry-collector-contrib/cmd/mdatagen@$(OTEL_VER) + cd $(TOOLS_DIR) && go mod tidy + +# We can bring this target back when https://github.com/open-telemetry/opentelemetry-collector/issues/8063 is resolved. +update-opentelemetry: + $(MAKE) update-components + $(MAKE) install-tools + $(MAKE) GO_BUILD_TAGS=gpu generate + +# -------------------------- +# Tools +# -------------------------- + +.PHONY: install-tools +install-tools: + cd $(TOOLS_DIR) && \ + go install \ + github.com/client9/misspell/cmd/misspell \ + github.com/golangci/golangci-lint/cmd/golangci-lint \ + github.com/google/addlicense \ + github.com/open-telemetry/opentelemetry-collector-contrib/cmd/mdatagen + +.PHONY: addlicense +addlicense: + addlicense -c "Google LLC" -l apache $(ALL_SRC) + +.PHONY: checklicense +checklicense: + @output=`addlicense -check $(ALL_SRC)` && echo checklicense finished successfully || (echo checklicense errors: $$output && exit 1) + +.PHONY: lint +lint: + golangci-lint run --allow-parallel-runners --build-tags=$(GO_BUILD_TAGS) --timeout=20m + +.PHONY: misspell +misspell: + @output=`misspell -error $(ALL_DOC)` && echo misspell finished successfully || (echo misspell errors:\\n$$output && exit 1) + +# -------------------------- +# CI +# -------------------------- + +# Adds license headers to files that are missing it, quiet tests +# so full output is visible at a glance. +.PHONY: precommit +precommit: addlicense lint misspell test + +# Checks for the presence of required license headers, runs verbose +# tests for complete information in CI job. +.PHONY: presubmit +presubmit: checklicense lint misspell test_verbose + +# -------------------------- +# Build and Test +# -------------------------- + +GO_BUILD_OUT ?= ./bin/rungmpcol +.PHONY: build-collector +build-collector: + CGO_ENABLED=0 go build -tags=$(GO_BUILD_TAGS) -o $(GO_BUILD_OUT) $(LD_FLAGS) -buildvcs=false ./collector/cmd/rungmpcol + +OTELCOL_BINARY = google-cloud-run-rmp-sidecar-$(GOOS) +.PHONY: build-collector-full-name +build-collector-full-name: + $(MAKE) GO_BUILD_OUT=./bin/$(OTELCOL_BINARY) build-collector + +.PHONY: test +test: + go test -tags=$(GO_BUILD_TAGS) $(GO_TEST_VERBOSE) -race ./... + +.PHONY: test_quiet +test_verbose: + $(MAKE) GO_TEST_VERBOSE=-v test + +.PHONY: generate +generate: + go generate ./... + +# -------------------- +# Docker +# -------------------- + +# set default docker build image name +BUILD_IMAGE_NAME ?= rungmpcol-build +BUILD_IMAGE_REPO ?= gcr.io/stackdriver-test-143416/opentelemetry-operations-collector:test + +.PHONY: docker-build-image +docker-build-image: + docker build -t $(BUILD_IMAGE_NAME) . + +.PHONY: docker-push-image +docker-push-image: + docker tag $(BUILD_IMAGE_NAME) $(BUILD_IMAGE_REPO) + docker push $(BUILD_IMAGE_REPO) + +.PHONY: docker-build-and-push +docker-build-and-push: docker-build-image docker-push-image + +# Usage: make TARGET= docker-run +# Example: make TARGET=build-collector docker-run +TARGET ?= build_collector +.PHONY: docker-run +docker-run: + docker run -e PKG_VERSION -v $(CURDIR):/mnt -w /mnt $(BUILD_IMAGE_NAME) /bin/bash -c "make $(TARGET)" diff --git a/README.md b/README.md index 7017bb1..675fb96 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ The bundled configuration file for Cloud Build (`cloudbuild.yaml`) requires a ne * `roles/artifactregistry.createOnPushWriter` * `roles/run.admin` -Running `create-service-account.sh` creates a new service account `run-otel-example-sa@.iam.gserviceaccount.com` for you. Then launch a Cloud Build task with `gcloud` command. +Running `create-service-account.sh` creates a new service account `run-gmp-sa@.iam.gserviceaccount.com` for you. Then launch a Cloud Build task with `gcloud` command. ```console ./create-service-account.sh @@ -67,7 +67,7 @@ gcloud builds submit . --config=cloudbuild.yaml After the build, run the following command to check the endpoint URL. ```console -gcloud run services describe opentelemetry-cloud-run-sample --region=us-east1 --format="value(status.url)" +gcloud run services describe run-gmp-sidecar-service --region=us-east1 --format="value(status.url)" ``` #### Build and Run Manually @@ -82,7 +82,7 @@ commands: ``` export GCP_PROJECT= -gcloud artifacts repositories create run-otel-example \ +gcloud artifacts repositories create run-gmp \ --repository-format=docker \ --location=us-east1 ``` @@ -98,8 +98,8 @@ Build and push the app with the following commands: ``` pushd app -docker build -t us-east1-docker.pkg.dev/$GCP_PROJECT/run-otel-example/sample-app . -docker push us-east1-docker.pkg.dev/$GCP_PROJECT/run-otel-example/sample-app +docker build -t us-east1-docker.pkg.dev/$GCP_PROJECT/run-gmp/sample-app . +docker push us-east1-docker.pkg.dev/$GCP_PROJECT/run-gmp/sample-app popd ``` @@ -112,10 +112,8 @@ config file with it. Build the Collector image with the following commands: ``` -pushd collector -docker build -t us-east1-docker.pkg.dev/$GCP_PROJECT/run-otel-example/collector . -docker push us-east1-docker.pkg.dev/$GCP_PROJECT/run-otel-example/collector -popd +docker build -t us-east1-docker.pkg.dev/$GCP_PROJECT/run-gmp/collector . +docker push us-east1-docker.pkg.dev/$GCP_PROJECT/run-gmp/collector ``` ##### Create the Cloud Run Service @@ -127,8 +125,8 @@ Replace the `%SAMPLE_APP_IMAGE%` and `%OTELCOL_IMAGE%` placeholders in `run-service.yaml` with the images you built above, ie: ``` -sed -i s@%OTELCOL_IMAGE%@us-east1-docker.pkg.dev/${GCP_PROJECT}/run-otel-example/collector@g run-service.yaml -sed -i s@%SAMPLE_APP_IMAGE%@us-east1-docker.pkg.dev/${GCP_PROJECT}/run-otel-example/sample-app@g run-service.yaml +sed -i s@%OTELCOL_IMAGE%@us-east1-docker.pkg.dev/${GCP_PROJECT}/run-gmp/collector@g run-service.yaml +sed -i s@%SAMPLE_APP_IMAGE%@us-east1-docker.pkg.dev/${GCP_PROJECT}/run-gmp/sample-app@g run-service.yaml ``` Create the Service with the following command: @@ -145,7 +143,7 @@ Finally before you make make the request to the URL, you need to change the Cloud Run service policy to accept unauthenticated HTTP access. ``` -gcloud run services set-iam-policy opentelemetry-cloud-run-sample policy.yaml +gcloud run services set-iam-policy run-gmp-sidecar-service policy.yaml ``` ### View telemetry in Google Cloud @@ -170,8 +168,8 @@ Updated sidecar-sample-counter metric! After running the demo, please make sure to clean up your project so that you don't consume unexpected resources and get charged. ```console -gcloud run services delete opentelemetry-cloud-run-sample --region us-east1 --quiet -gcloud artifacts repositories delete run-otel-example \ +gcloud run services delete run-gmp-sidecar-service --region us-east1 --quiet +gcloud artifacts repositories delete run-gmp \ --location=us-east1 \ --quiet ``` diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..ee0311e --- /dev/null +++ b/VERSION @@ -0,0 +1,3 @@ +# The release version and build number of the google-cloud-metrics-agent package. +PKG_VERSION=0.0.2 +PKG_BUILD=1 diff --git a/app/main.go b/app/main.go index ad77ed2..86325a1 100644 --- a/app/main.go +++ b/app/main.go @@ -1,3 +1,17 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package main import ( diff --git a/app/sample-app b/app/sample-app deleted file mode 100755 index 1cad48d..0000000 Binary files a/app/sample-app and /dev/null differ diff --git a/cloudbuild.yaml b/cloudbuild.yaml index c121907..45138a4 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -25,7 +25,7 @@ steps: - BUILD_SAMPLE_APP - name: "gcr.io/cloud-builders/docker" - args: ["build", "-t", "${_IMAGE_COLLECTOR}", "./collector"] + args: ["build", "-t", "${_IMAGE_COLLECTOR}", "."] id: BUILD_COLLECTOR waitFor: ["-"] @@ -81,10 +81,10 @@ steps: substitutions: _REGION: us-east1 - _REGISTRY: ${_REGION}-docker.pkg.dev/${PROJECT_ID}/run-otel-example + _REGISTRY: ${_REGION}-docker.pkg.dev/${PROJECT_ID}/run-gmp _IMAGE_APP: ${_REGISTRY}/sample-app _IMAGE_COLLECTOR: ${_REGISTRY}/collector - _SA_NAME: run-otel-example-sa + _SA_NAME: run-gmp-sa images: - ${_IMAGE_APP} diff --git a/collector/collector-config.yaml b/collector-config.yaml similarity index 100% rename from collector/collector-config.yaml rename to collector-config.yaml diff --git a/collector/.gitattributes b/collector/.gitattributes new file mode 100644 index 0000000..5c15f15 --- /dev/null +++ b/collector/.gitattributes @@ -0,0 +1,2 @@ +# See golang.org/issue/9281 +* -text diff --git a/collector/.gitignore b/collector/.gitignore new file mode 100644 index 0000000..3d29804 --- /dev/null +++ b/collector/.gitignore @@ -0,0 +1,27 @@ +bin/ +dist/ + +## Editors + +# GoLand IDEA +/.idea/ +*.iml + +# VS Code +.vscode + +# Emacs +*~ +\#*\# + +# Miscellaneous files +*.sw[op] +*.DS_Store + +# Coverage +coverage.txt +coverage.html + +# Wix +*.wixobj +*.wixpdb diff --git a/collector/.golangci.yaml b/collector/.golangci.yaml new file mode 100644 index 0000000..349cc25 --- /dev/null +++ b/collector/.golangci.yaml @@ -0,0 +1,62 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +run: + timeout: 5m + + skip-dirs: + - receiver/prometheusreceiver + +linters: + enable: + - gocritic + - gofmt + - goimports + - revive + - govet + - misspell + - exportloopref + - staticcheck + - unconvert + +issues: + exclude-rules: + - path: _test\.go + linters: + - exportloopref + +linters-settings: + gofmt: + # simplify code: gofmt with `-s` option, true by default + simplify: true + goimports: + # put imports beginning with prefix after 3rd-party packages; + # it's a comma-separated list of prefixes + local-prefixes: github.com/GoogleCloudPlatform/opentelemetry-operations-collector + govet: + # report about shadowed variables + check-shadowing: true + # settings per analyzer + settings: + printf: # analyzer name, run `go tool vet help` to see all analyzers + funcs: # run `go tool vet help printf` to see available settings for `printf` analyzer + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf + disable: + - fieldalignment + enable-all: true + misspell: + locale: US diff --git a/collector/Dockerfile b/collector/Dockerfile deleted file mode 100644 index af0723d..0000000 --- a/collector/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https:#www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM otel/opentelemetry-collector-contrib:0.75.0 - -COPY collector-config.yaml /etc/otelcol-contrib/config.yaml diff --git a/collector/cmd/rungmpcol/main.go b/collector/cmd/rungmpcol/main.go new file mode 100644 index 0000000..533cd69 --- /dev/null +++ b/collector/cmd/rungmpcol/main.go @@ -0,0 +1,25 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/service" +) + +func main() { + service.MainContext(context.Background()) +} diff --git a/collector/exporter/googlemanagedprometheusexporter/Makefile b/collector/exporter/googlemanagedprometheusexporter/Makefile new file mode 100644 index 0000000..c149622 --- /dev/null +++ b/collector/exporter/googlemanagedprometheusexporter/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common \ No newline at end of file diff --git a/collector/exporter/googlemanagedprometheusexporter/README.md b/collector/exporter/googlemanagedprometheusexporter/README.md new file mode 100644 index 0000000..b16be5f --- /dev/null +++ b/collector/exporter/googlemanagedprometheusexporter/README.md @@ -0,0 +1,181 @@ +# Google Managed Service for Prometheus Exporter + + +| Status | | +| ------------- |-----------| +| Stability | [beta]: metrics | +| Distributions | [contrib], [observiq] | + +[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[observiq]: https://github.com/observIQ/observiq-otel-collector + + +This exporter can be used to send metrics and traces to [Google Cloud Managed Service for Prometheus](https://cloud.google.com/stackdriver/docs/managed-prometheus). The difference between this exporter and the `googlecloud` exporter is that metrics sent with this exporter are queried using [promql](https://prometheus.io/docs/prometheus/latest/querying/basics/#querying-prometheus), rather than standard the standard MQL. + +This exporter is not the standard method of ingesting metrics into Google Cloud Managed Service for Prometheus, which is built on a drop-in replacement for the Prometheus server: https://github.com/GoogleCloudPlatform/prometheus. This exporter does not support the full range of Prometheus functionality, including the UI, recording and alerting rules, and can't be used with the GMP Operator, but does support sending metrics. + +## Configuration Reference + +The following configuration options are supported: + +- `project` (optional): GCP project identifier. +- `user_agent` (optional): Override the user agent string sent on requests to Cloud Monitoring (currently only applies to metrics). Specify `{{version}}` to include the application version number. Defaults to `opentelemetry-collector-contrib {{version}}`. +- `metric`(optional): Configuration for sending metrics to Cloud Monitoring. + - `endpoint` (optional): Endpoint where metric data is going to be sent to. Replaces `endpoint`. +- `use_insecure` (optional): If true, use gRPC as their communication transport. Only has effect if Endpoint is not "". +- `retry_on_failure` (optional): Configuration for how to handle retries when sending data to Google Cloud fails. + - `enabled` (default = false) + - `initial_interval` (default = 5s): Time to wait after the first failure before retrying; ignored if `enabled` is `false` + - `max_interval` (default = 30s): Is the upper bound on backoff; ignored if `enabled` is `false` + - `max_elapsed_time` (default = 120s): Is the maximum amount of time spent trying to send a batch; ignored if `enabled` is `false` +- `sending_queue` (optional): Configuration for how to buffer traces before sending. + - `enabled` (default = true) + - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false` + - `queue_size` (default = 1000): Maximum number of batches kept in memory before data; ignored if `enabled` is `false`; + User should calculate this as `num_seconds * requests_per_second` where: + - `num_seconds` is the number of seconds to buffer in case of a backend outage + - `requests_per_second` is the average number of requests per seconds. + +Note: These `retry_on_failure` and `sending_queue` are provided (and documented) by the [Exporter Helper](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/exporterhelper#configuration) + +## Example Configuration + +```yaml +receivers: + prometheus: + config: + scrape_configs: + # Add your prometheus scrape configuration here. + # Using kubernetes_sd_configs with namespaced resources (e.g. pod) + # ensures the namespace is set on your metrics. + - job_name: 'kubernetes-pods' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: (.+):(?:\d+);(\d+) + replacement: $$1:$$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) +processors: + batch: + # batch metrics before sending to reduce API usage + send_batch_max_size: 200 + send_batch_size: 200 + timeout: 5s + memory_limiter: + # drop metrics if memory usage gets too high + check_interval: 1s + limit_percentage: 65 + spike_limit_percentage: 20 + resourcedetection: + # detect cluster name and location + detectors: [gcp] + timeout: 10s + transform: + # "location", "cluster", "namespace", "job", "instance", and "project_id" are reserved, and + # metrics containing these labels will be rejected. Prefix them with exported_ to prevent this. + metric_statements: + - context: datapoint + statements: + - set(attributes["exported_location"], attributes["location"]) + - delete_key(attributes, "location") + - set(attributes["exported_cluster"], attributes["cluster"]) + - delete_key(attributes, "cluster") + - set(attributes["exported_namespace"], attributes["namespace"]) + - delete_key(attributes, "namespace") + - set(attributes["exported_job"], attributes["job"]) + - delete_key(attributes, "job") + - set(attributes["exported_instance"], attributes["instance"]) + - delete_key(attributes, "instance") + - set(attributes["exported_project_id"], attributes["project_id"]) + - delete_key(attributes, "project_id") + +exporters: + googlemanagedprometheus: + +service: + pipelines: + metrics: + receivers: [prometheus] + processors: [batch, memory_limiter, transform, resourcedetection] + exporters: [googlemanagedprometheus] +``` + +## Resource Attribute Handling + +The Google Managed Prometheus exporter maps metrics to the +[prometheus_target](https://cloud.google.com/monitoring/api/resources#tag_prometheus_target) +monitored resource. The logic for mapping to monitored resources is designed to +be used with the prometheus receiver, but can be used with other receivers as +well. To avoid collisions (i.e. "duplicate timeseries enountered" errors), you +need to ensure the prometheus_target resource uniquely identifies the source of +metrics. The exporter uses the following resource attributes to determine +monitored resource: + +* location: [`location`, `cloud.availability_zone`, `cloud.region`] +* cluster: [`cluster`, `k8s.cluster.name`] +* namespace: [`namespace`, `k8s.namespace.name`] +* job: [`service.name` + `service.namespace`] +* instance: [`service.instance.id`] + +In the configuration above, `cloud.availability_zone`, `cloud.region`, and +`k8s.cluster.name` are detected using the `resourcedetection` processor with +the `gcp` detector. The prometheus receiver sets `service.name` to the +configured `job_name`, and `service.instance.id` is set to the scrape target's +`instance`. The prometheus receiver sets `k8s.namespace.name` when using +`role: pod`. + +### Manually Setting location, cluster, or namespace + +In GMP, the above attributes are used to identify the `prometheus_target` +monitored resource. As such, it is recommended to avoid writing metric or resource labels +that match these keys. Doing so can cause errors when exporting metrics to +GMP or when trying to query from GMP. So, the recommended way to set them +is with the [resourcedetection processor](../../processor/resourcedetectionprocessor). + +If you still need to set `location`, `cluster`, or `namespace` labels +(such as when running in non-GCP environments), you can do so with the +[resource processor](../../processor/resourceprocessor) like so: + +```yaml +processors: + resource: + attributes: + - key: "location" + value: "us-east-1" + action: upsert +``` + +### Setting cluster, location or namespace using metric labels + +This example copies the `location` metric attribute to a new `exported_location` +attribute, then deletes the original `location`. It is recommended to use the `exported_*` +prefix, which is consistent with GMP's behavior. + +You can also use the [groupbyattrs processor](../../processor/groupbyattrsprocessor) +to move metric labels to resource labels. This is useful in situations +where, for example, an exporter monitors multiple namespaces (with +each namespace exported as a metric label). One such example is kube-state-metrics. + +Using `groupbyattrs` will promote that label to a resource label and +associate those metrics with the new resource. For example: + +```yaml +processors: + groupbyattrs: + keys: + - namespace + - cluster + - location +``` diff --git a/collector/exporter/googlemanagedprometheusexporter/config.go b/collector/exporter/googlemanagedprometheusexporter/config.go new file mode 100644 index 0000000..1bfee3c --- /dev/null +++ b/collector/exporter/googlemanagedprometheusexporter/config.go @@ -0,0 +1,84 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package googlemanagedprometheusexporter // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/exporter/googlemanagedprometheusexporter" + +import ( + "fmt" + + "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/collector" + "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/collector/googlemanagedprometheus" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +// Config defines configuration for Google Cloud Managed Service for Prometheus exporter. +type Config struct { + GMPConfig `mapstructure:",squash"` + + // Timeout for all API calls. If not set, defaults to 12 seconds. + exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueSettings `mapstructure:"sending_queue"` + exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` +} + +// GMPConfig is a subset of the collector config applicable to the GMP exporter. +type GMPConfig struct { + ProjectID string `mapstructure:"project"` + UserAgent string `mapstructure:"user_agent"` + MetricConfig MetricConfig `mapstructure:"metric"` + + // Setting UntypedDoubleExport to true makes the collector double write prometheus + // untyped metrics to GMP similar to the GMP collector. That is, it writes it once as + // a gauge with the metric name suffix `unknown` and once as a counter with the + // metric name suffix `unknown:counter`. + // For the counter, if the point value is smaller than the previous point in the series + // it is considered a reset point. + UntypedDoubleExport bool `mapstructure:"untyped_double_export"` +} + +type MetricConfig struct { + // Prefix configures the prefix of metrics sent to GoogleManagedPrometheus. Defaults to prometheus.googleapis.com. + // Changing this prefix is not recommended, as it may cause metrics to not be queryable with promql in the Cloud Monitoring UI. + Prefix string `mapstructure:"prefix"` + ClientConfig collector.ClientConfig `mapstructure:",squash"` +} + +func (c *GMPConfig) toCollectorConfig() collector.Config { + // start with whatever the default collector config is. + cfg := collector.DefaultConfig() + cfg.MetricConfig.Prefix = c.MetricConfig.Prefix + if c.MetricConfig.Prefix == "" { + cfg.MetricConfig.Prefix = "prometheus.googleapis.com" + } + cfg.MetricConfig.SkipCreateMetricDescriptor = true + cfg.MetricConfig.InstrumentationLibraryLabels = false + cfg.MetricConfig.ServiceResourceLabels = false + // Update metric naming to match GMP conventions + cfg.MetricConfig.GetMetricName = googlemanagedprometheus.GetMetricName + // Map to the prometheus_target monitored resource + cfg.MetricConfig.MapMonitoredResource = googlemanagedprometheus.MapToPrometheusTarget + cfg.MetricConfig.EnableSumOfSquaredDeviation = true + // map the GMP config's fields to the collector config + cfg.ProjectID = c.ProjectID + cfg.UserAgent = c.UserAgent + cfg.MetricConfig.ClientConfig = c.MetricConfig.ClientConfig + if c.UntypedDoubleExport { + cfg.MetricConfig.ExtraMetrics = func(m pmetric.Metrics) pmetric.ResourceMetricsSlice { + //nolint:errcheck + featuregate.GlobalRegistry().Set("gcp.untyped_double_export", true) + googlemanagedprometheus.AddUntypedMetrics(m) + return m.ResourceMetrics() + } + } + + return cfg +} + +func (cfg *Config) Validate() error { + if err := collector.ValidateConfig(cfg.toCollectorConfig()); err != nil { + return fmt.Errorf("exporter settings are invalid :%w", err) + } + return nil +} diff --git a/collector/exporter/googlemanagedprometheusexporter/config_test.go b/collector/exporter/googlemanagedprometheusexporter/config_test.go new file mode 100644 index 0000000..8709d59 --- /dev/null +++ b/collector/exporter/googlemanagedprometheusexporter/config_test.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package googlemanagedprometheusexporter // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/exporter/googlemanagedprometheusexporter" + +import ( + "path/filepath" + "testing" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/otelcol/otelcoltest" + + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/exporter/googlemanagedprometheusexporter/internal/metadata" +) + +func TestLoadConfig(t *testing.T) { + factories, err := otelcoltest.NopFactories() + assert.Nil(t, err) + + factory := NewFactory() + factories.Exporters[metadata.Type] = factory + cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + assert.Equal(t, len(cfg.Exporters), 3) + + r0 := cfg.Exporters[component.NewID(metadata.Type)].(*Config) + assert.Equal(t, r0, factory.CreateDefaultConfig().(*Config)) + + r1 := cfg.Exporters[component.NewIDWithName(metadata.Type, "customname")].(*Config) + assert.Equal(t, r1, + &Config{ + TimeoutSettings: exporterhelper.TimeoutSettings{ + Timeout: 20 * time.Second, + }, + GMPConfig: GMPConfig{ + ProjectID: "my-project", + UserAgent: "opentelemetry-collector-contrib {{version}}", + }, + RetrySettings: exporterhelper.RetrySettings{ + Enabled: true, + InitialInterval: 10 * time.Second, + MaxInterval: 1 * time.Minute, + MaxElapsedTime: 10 * time.Minute, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + }, + QueueSettings: exporterhelper.QueueSettings{ + Enabled: true, + NumConsumers: 2, + QueueSize: 10, + }, + }) + + r2 := cfg.Exporters[component.NewIDWithName(metadata.Type, "customprefix")].(*Config) + r2Expected := factory.CreateDefaultConfig().(*Config) + r2Expected.GMPConfig.MetricConfig.Prefix = "my-metric-domain.com" + assert.Equal(t, r2, r2Expected) +} diff --git a/collector/exporter/googlemanagedprometheusexporter/factory.go b/collector/exporter/googlemanagedprometheusexporter/factory.go new file mode 100644 index 0000000..fe681d9 --- /dev/null +++ b/collector/exporter/googlemanagedprometheusexporter/factory.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +package googlemanagedprometheusexporter // import "exporter/googlemanagedprometheusexporter" + +import ( + "context" + "time" + + "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/collector" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper" + + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/exporter/googlemanagedprometheusexporter/internal/metadata" +) + +const ( + defaultTimeout = 12 * time.Second // Consistent with Cloud Monitoring's timeout +) + +// NewFactory creates a factory for the googlemanagedprometheus exporter +func NewFactory() exporter.Factory { + return exporter.NewFactory( + metadata.Type, + createDefaultConfig, + exporter.WithMetrics(createMetricsExporter, metadata.MetricsStability), + ) +} + +// createDefaultConfig creates the default configuration for exporter. +func createDefaultConfig() component.Config { + retrySettings := exporterhelper.NewDefaultRetrySettings() + retrySettings.Enabled = false + return &Config{ + TimeoutSettings: exporterhelper.TimeoutSettings{Timeout: defaultTimeout}, + RetrySettings: retrySettings, + QueueSettings: exporterhelper.NewDefaultQueueSettings(), + } +} + +// createMetricsExporter creates a metrics exporter based on this config. +func createMetricsExporter( + ctx context.Context, + params exporter.CreateSettings, + cfg component.Config) (exporter.Metrics, error) { + eCfg := cfg.(*Config) + mExp, err := collector.NewGoogleCloudMetricsExporter(ctx, eCfg.GMPConfig.toCollectorConfig(), params.TelemetrySettings.Logger, params.BuildInfo.Version, eCfg.Timeout) + if err != nil { + return nil, err + } + return exporterhelper.NewMetricsExporter( + ctx, + params, + cfg, + mExp.PushMetrics, + exporterhelper.WithShutdown(mExp.Shutdown), + // Disable exporterhelper Timeout, since we are using a custom mechanism + // within exporter itself + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithQueue(eCfg.QueueSettings), + exporterhelper.WithRetry(eCfg.RetrySettings)) +} diff --git a/collector/exporter/googlemanagedprometheusexporter/factory_test.go b/collector/exporter/googlemanagedprometheusexporter/factory_test.go new file mode 100644 index 0000000..5fd7c1f --- /dev/null +++ b/collector/exporter/googlemanagedprometheusexporter/factory_test.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package googlemanagedprometheusexporter // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/exporter/googlemanagedprometheusexporter" + +import ( + "context" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/exporter/exportertest" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreateExporter(t *testing.T) { + if os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" { + t.Skip("Default credentials not set, skip creating Google Cloud exporter") + } + ctx := context.Background() + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + eCfg := cfg.(*Config) + eCfg.ProjectID = "test" + + te, err := factory.CreateTracesExporter(ctx, exportertest.NewNopCreateSettings(), eCfg) + assert.NoError(t, err) + assert.NotNil(t, te, "failed to create trace exporter") + + me, err := factory.CreateMetricsExporter(ctx, exportertest.NewNopCreateSettings(), eCfg) + assert.NoError(t, err) + assert.NotNil(t, me, "failed to create metrics exporter") +} diff --git a/collector/exporter/googlemanagedprometheusexporter/internal/metadata/generated_status.go b/collector/exporter/googlemanagedprometheusexporter/internal/metadata/generated_status.go new file mode 100644 index 0000000..fe5ab47 --- /dev/null +++ b/collector/exporter/googlemanagedprometheusexporter/internal/metadata/generated_status.go @@ -0,0 +1,12 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" +) + +const ( + Type = "googlemanagedprometheus" + MetricsStability = component.StabilityLevelBeta +) diff --git a/collector/exporter/googlemanagedprometheusexporter/metadata.yaml b/collector/exporter/googlemanagedprometheusexporter/metadata.yaml new file mode 100644 index 0000000..6d81854 --- /dev/null +++ b/collector/exporter/googlemanagedprometheusexporter/metadata.yaml @@ -0,0 +1,7 @@ +type: googlemanagedprometheus + +status: + class: exporter + stability: + beta: [metrics] + distributions: [contrib, observiq] diff --git a/collector/exporter/googlemanagedprometheusexporter/testdata/config.yaml b/collector/exporter/googlemanagedprometheusexporter/testdata/config.yaml new file mode 100644 index 0000000..366d19a --- /dev/null +++ b/collector/exporter/googlemanagedprometheusexporter/testdata/config.yaml @@ -0,0 +1,33 @@ +receivers: + nop: + +processors: + nop: + +exporters: + googlemanagedprometheus: + googlemanagedprometheus/customname: + project: my-project + user_agent: opentelemetry-collector-contrib {{version}} + timeout: 20s + sending_queue: + enabled: true + num_consumers: 2 + queue_size: 10 + retry_on_failure: + enabled: true + initial_interval: 10s + max_interval: 60s + max_elapsed_time: 10m + googlemanagedprometheus/customprefix: + metric: + prefix: my-metric-domain.com + + +service: + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [googlemanagedprometheus] + diff --git a/collector/internal/collectorerror/errors.go b/collector/internal/collectorerror/errors.go new file mode 100644 index 0000000..17fc05c --- /dev/null +++ b/collector/internal/collectorerror/errors.go @@ -0,0 +1,19 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectorerror + +import "errors" + +var ErrGPUSupportDisabled = errors.New("gpu support is disabled") diff --git a/collector/internal/env/env.go b/collector/internal/env/env.go new file mode 100644 index 0000000..bf4ea4c --- /dev/null +++ b/collector/internal/env/env.go @@ -0,0 +1,91 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package env + +import ( + "fmt" + "math" + "os" + "runtime" + + "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/mem" + "golang.org/x/text/cases" + "golang.org/x/text/language" + + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/internal/version" +) + +func Create() error { + userAgent, err := getUserAgent() + if err != nil { + return err + } + + os.Setenv("USERAGENT", userAgent) + return nil +} + +func getUserAgent() (string, error) { + hostInfo, err := host.Info() + if err != nil { + return "", err + } + + cores := runtime.NumCPU() + + memory, err := mem.VirtualMemory() + if err != nil { + return "", err + } + + partitions, err := disk.Partitions(false) + if err != nil { + return "", err + } + + var totalDiskCapacity uint64 + for _, partition := range partitions { + disk, err := disk.Usage(partition.Mountpoint) + if err != nil { + return "", err + } + totalDiskCapacity += disk.Total + } + + platform := hostInfo.Platform + if platform == "" { + platform = "Unknown" + } + + platformVersion := hostInfo.PlatformVersion + if platformVersion != "" { + platformVersion = fmt.Sprintf("v%v ", platformVersion) + } + + userAgent := fmt.Sprintf( + "Google Cloud Metrics Agent/%v (TargetPlatform=%v; Framework=OpenTelemetry Collector) %s %s(Cores=%v; Memory=%0.1fGB; Disk=%0.1fGB)", + version.Version, + cases.Title(language.English).String(runtime.GOOS), + platform, + platformVersion, + cores, + float64(memory.Total)/math.Pow(1024, 3), + float64(totalDiskCapacity)/math.Pow(1024, 3), + ) + + return userAgent, nil +} diff --git a/collector/internal/env/env_test.go b/collector/internal/env/env_test.go new file mode 100644 index 0000000..4079930 --- /dev/null +++ b/collector/internal/env/env_test.go @@ -0,0 +1,32 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package env + +import ( + "fmt" + "os" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Create(t *testing.T) { + require.NoError(t, Create()) + + expectedUserAgentRegex := fmt.Sprintf(`^Google Cloud Metrics Agent/latest \(TargetPlatform=(?i:%v); Framework=OpenTelemetry Collector\) .* \(Cores=\d+; Memory=(?:[0-9]*[.])?[0-9]+GB; Disk=(?:[0-9]*[.])?[0-9]+GB\)$`, runtime.GOOS) + assert.Regexp(t, expectedUserAgentRegex, os.Getenv("USERAGENT")) +} diff --git a/collector/internal/levelchanger/levelchanger.go b/collector/internal/levelchanger/levelchanger.go new file mode 100644 index 0000000..15f2f44 --- /dev/null +++ b/collector/internal/levelchanger/levelchanger.go @@ -0,0 +1,89 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package levelchanger + +import ( + "strings" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +type LevelChangeCondition func(entry zapcore.Entry, fields []zapcore.Field) bool + +type levelChangerCore struct { + next zapcore.Core + fromLevel zapcore.Level + toLevel zapcore.Level + conditions []LevelChangeCondition +} + +// This core is enabled at the requested level if the core it wraps +// is enabled at the requested level. +func (l levelChangerCore) Enabled(level zapcore.Level) bool { + return l.next.Enabled(level) +} + +// This core does not allow adding additional context. +func (l levelChangerCore) With([]zapcore.Field) zapcore.Core { return l } + +// This core will always add itself to the checked entry, since the Write +// method will determine whether the entry continues to the next core. +func (l levelChangerCore) Check(entry zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + return ce.AddCore(entry, l) +} + +// Check if the log passes any core conditions, and if the log level matches the fromLevel, +// change the log's level to the toLevel. +func (l levelChangerCore) Write(entry zapcore.Entry, fields []zapcore.Field) error { + // Always pass if there are no conditions, otherwise check if any conditions are met. + changeLevels := len(l.conditions) == 0 + for _, condition := range l.conditions { + changeLevels = changeLevels || condition(entry, fields) + } + + if changeLevels && entry.Level == l.fromLevel { + entry.Level = l.toLevel + } + + // Check if the next core is enabled at the (potentially) new log level. + if !l.next.Enabled(entry.Level) { + return nil + } + return l.next.Write(entry, fields) +} + +// No special syncing is required for this core. +func (levelChangerCore) Sync() error { return nil } + +// Create a zap option that wraps a core with a new levelChangerCore. +func NewLevelChangerOption(from, to zapcore.Level, conditions ...LevelChangeCondition) zap.Option { + return zap.WrapCore(func(core zapcore.Core) zapcore.Core { + return levelChangerCore{ + next: core, + fromLevel: from, + toLevel: to, + conditions: conditions, + } + }) +} + +// Make a level change condition that passes if the Entry's Caller File contains a +// substring. Can be used to change the level of all logs from some file or package. +func FilePathLevelChangeCondition(pathSubstr string) LevelChangeCondition { + return func(entry zapcore.Entry, fields []zapcore.Field) bool { + return strings.Contains(entry.Caller.File, pathSubstr) + } +} diff --git a/collector/internal/levelchanger/levelchanger_test.go b/collector/internal/levelchanger/levelchanger_test.go new file mode 100644 index 0000000..52c75b4 --- /dev/null +++ b/collector/internal/levelchanger/levelchanger_test.go @@ -0,0 +1,117 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package levelchanger_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/internal/levelchanger" +) + +type baseTestCase struct { + name string + loggerLevel zapcore.Level + from zapcore.Level + to zapcore.Level + logWriteFunc func(logger *zap.Logger) + expectedLogLevels []zapcore.Level +} + +func TestLevelChangerCoreNoConditions(t *testing.T) { + t.Parallel() + + testCases := []baseTestCase{ + { + name: "changes level", + loggerLevel: zapcore.DebugLevel, + from: zapcore.ErrorLevel, + to: zapcore.DebugLevel, + logWriteFunc: func(logger *zap.Logger) { + logger.Error("this should be debug") + }, + expectedLogLevels: []zapcore.Level{zapcore.DebugLevel}, + }, + { + name: "does not output the log when it changes to level the logger doesn't allow", + loggerLevel: zapcore.ErrorLevel, + from: zapcore.ErrorLevel, + to: zapcore.DebugLevel, + logWriteFunc: func(logger *zap.Logger) { + logger.Error("this should not get logged") + }, + expectedLogLevels: []zapcore.Level{}, + }, + { + name: "only changes level of logs it's supposed to", + loggerLevel: zapcore.DebugLevel, + from: zapcore.ErrorLevel, + to: zapcore.DebugLevel, + logWriteFunc: func(logger *zap.Logger) { + logger.Error("this should become debug") + logger.Info("this should stay info") + }, + expectedLogLevels: []zapcore.Level{zapcore.DebugLevel, zapcore.InfoLevel}, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + observedCore, observedLogs := observer.New(testCase.loggerLevel) + logger := zap.New( + observedCore, + levelchanger.NewLevelChangerOption(testCase.from, testCase.to)) + + testCase.logWriteFunc(logger) + + allLogs := observedLogs.All() + assert.Equal(t, len(testCase.expectedLogLevels), len(allLogs)) + for i, expectedLevel := range testCase.expectedLogLevels { + assert.Equal(t, expectedLevel, allLogs[i].Level) + } + }) + } +} + +func TestLevelChangerCoreFilePathCondition(t *testing.T) { + t.Parallel() + + filename := "levelchanger_test.go" + + observedCore, observedLogs := observer.New(zapcore.DebugLevel) + observedCoreOption := zap.WrapCore(func(zapcore.Core) zapcore.Core { + return observedCore + }) + + from := zapcore.ErrorLevel + to := zapcore.DebugLevel + levelChangeOption := levelchanger.NewLevelChangerOption( + from, + to, + levelchanger.FilePathLevelChangeCondition(filename)) + + // Using a development logger will log at debug level and will populate the entry + // with the calling file so we can test our condition with it. + logger, _ := zap.NewDevelopment(observedCoreOption, levelChangeOption) + + logger.Error("should be debug") + assert.Len(t, observedLogs.All(), 1) + log := observedLogs.All()[0] + assert.Equal(t, zapcore.DebugLevel, log.Level) +} diff --git a/collector/internal/tools/go.mod b/collector/internal/tools/go.mod new file mode 100644 index 0000000..a6c2f64 --- /dev/null +++ b/collector/internal/tools/go.mod @@ -0,0 +1,198 @@ +module github.com/GoogleCloudPlatform/opentelemetry-operations-collector + +go 1.20 + +require ( + github.com/client9/misspell v0.3.4 + github.com/golangci/golangci-lint v1.52.1 + github.com/google/addlicense v1.1.1 + github.com/open-telemetry/opentelemetry-collector-contrib/cmd/mdatagen v0.81.0 +) + +require ( + 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect + 4d63.com/gochecknoglobals v0.2.1 // indirect + github.com/Abirdcfly/dupword v0.0.11 // indirect + github.com/Antonboom/errname v0.1.9 // indirect + github.com/Antonboom/nilnil v0.1.3 // indirect + github.com/BurntSushi/toml v1.2.1 // indirect + github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/OpenPeeDeeP/depguard v1.1.1 // indirect + github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/alingse/asasalint v0.0.11 // indirect + github.com/ashanbrown/forbidigo v1.5.1 // indirect + github.com/ashanbrown/makezero v1.1.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bkielbasa/cyclop v1.2.0 // indirect + github.com/blizzy78/varnamelen v0.8.0 // indirect + github.com/bmatcuk/doublestar/v4 v4.6.0 // indirect + github.com/bombsimon/wsl/v3 v3.4.0 // indirect + github.com/breml/bidichk v0.2.4 // indirect + github.com/breml/errchkjson v0.3.1 // indirect + github.com/butuzov/ireturn v0.1.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/charithe/durationcheck v0.0.10 // indirect + github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect + github.com/curioswitch/go-reassign v0.2.0 // indirect + github.com/daixiang0/gci v0.10.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/denis-tingaikin/go-header v0.4.3 // indirect + github.com/esimonov/ifshort v1.0.4 // indirect + github.com/ettle/strcase v0.1.1 // indirect + github.com/fatih/color v1.15.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/firefart/nonamedreturns v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fzipp/gocyclo v0.6.0 // indirect + github.com/go-critic/go-critic v0.7.0 // indirect + github.com/go-toolsmith/astcast v1.1.0 // indirect + github.com/go-toolsmith/astcopy v1.1.0 // indirect + github.com/go-toolsmith/astequal v1.1.0 // indirect + github.com/go-toolsmith/astfmt v1.1.0 // indirect + github.com/go-toolsmith/astp v1.1.0 // indirect + github.com/go-toolsmith/strparse v1.1.0 // indirect + github.com/go-toolsmith/typep v1.1.0 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect + github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect + github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect + github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect + github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect + github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect + github.com/golangci/misspell v0.4.0 // indirect + github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect + github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28 // indirect + github.com/gostaticanalysis/analysisutil v0.7.1 // indirect + github.com/gostaticanalysis/comment v1.4.2 // indirect + github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect + github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jgautheron/goconst v1.5.1 // indirect + github.com/jingyugao/rowserrcheck v1.1.1 // indirect + github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/julz/importas v0.1.0 // indirect + github.com/junk1tm/musttag v0.5.0 // indirect + github.com/kisielk/errcheck v1.6.3 // indirect + github.com/kisielk/gotool v1.0.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.4 // indirect + github.com/knadh/koanf v1.5.0 // indirect + github.com/knadh/koanf/v2 v2.0.1 // indirect + github.com/kulti/thelper v0.6.3 // indirect + github.com/kunwardeep/paralleltest v1.0.6 // indirect + github.com/kyoh86/exportloopref v0.1.11 // indirect + github.com/ldez/gomoddirectives v0.2.3 // indirect + github.com/ldez/tagliatelle v0.4.0 // indirect + github.com/leonklingele/grouper v1.1.1 // indirect + github.com/lufeee/execinquery v1.2.1 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/maratori/testableexamples v1.0.0 // indirect + github.com/maratori/testpackage v1.1.1 // indirect + github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect + github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mbilski/exhaustivestruct v1.2.0 // indirect + github.com/mgechev/revive v1.3.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/moricho/tparallel v0.3.1 // indirect + github.com/nakabonne/nestif v0.3.1 // indirect + github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect + github.com/nishanths/exhaustive v0.9.5 // indirect + github.com/nishanths/predeclared v0.2.2 // indirect + github.com/nunnatsa/ginkgolinter v0.9.0 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.7 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polyfloyd/go-errorlint v1.4.0 // indirect + github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect + github.com/quasilyte/go-ruleguard v0.3.19 // indirect + github.com/quasilyte/gogrep v0.5.0 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect + github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/ryancurrah/gomodguard v1.3.0 // indirect + github.com/ryanrolds/sqlclosecheck v0.4.0 // indirect + github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect + github.com/sashamelentyev/interfacebloat v1.1.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.23.0 // indirect + github.com/securego/gosec/v2 v2.15.0 // indirect + github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect + github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sivchari/containedctx v1.0.2 // indirect + github.com/sivchari/nosnakecase v1.7.0 // indirect + github.com/sivchari/tenv v1.7.1 // indirect + github.com/sonatard/noctx v0.0.2 // indirect + github.com/sourcegraph/go-diff v0.7.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/cobra v1.6.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.15.0 // indirect + github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/testify v1.8.4 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect + github.com/tdakkota/asciicheck v0.2.0 // indirect + github.com/tetafro/godot v1.4.11 // indirect + github.com/timakin/bodyclose v0.0.0-20221125081123-e39cf3fc478e // indirect + github.com/timonwong/loggercheck v0.9.4 // indirect + github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect + github.com/ultraware/funlen v0.0.3 // indirect + github.com/ultraware/whitespace v0.0.5 // indirect + github.com/uudashr/gocognit v1.0.6 // indirect + github.com/yagipy/maintidx v1.0.0 // indirect + github.com/yeya24/promlinter v0.2.0 // indirect + gitlab.com/bosi/decorder v0.2.3 // indirect + go.opentelemetry.io/collector/confmap v0.81.0 // indirect + go.opentelemetry.io/collector/featuregate v1.0.0-rcv0013 // indirect + go.opentelemetry.io/collector/pdata v1.0.0-rcv0013 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/exp/typeparams v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/mod v0.9.0 // indirect + golang.org/x/net v0.12.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect + golang.org/x/tools v0.7.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/grpc v1.56.2 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + honnef.co/go/tools v0.4.3 // indirect + mvdan.cc/gofumpt v0.4.0 // indirect + mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect + mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect + mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8 // indirect +) diff --git a/collector/internal/tools/go.sum b/collector/internal/tools/go.sum new file mode 100644 index 0000000..c2170ea --- /dev/null +++ b/collector/internal/tools/go.sum @@ -0,0 +1,1165 @@ +4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= +4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= +4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= +4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Abirdcfly/dupword v0.0.11 h1:z6v8rMETchZXUIuHxYNmlUAuKuB21PeaSymTed16wgU= +github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA= +github.com/Antonboom/errname v0.1.9 h1:BZDX4r3l4TBZxZ2o2LNrlGxSHran4d1u4veZdoORTT4= +github.com/Antonboom/errname v0.1.9/go.mod h1:nLTcJzevREuAsgTbG85UsuiWpMpAqbKD1HNZ29OzE58= +github.com/Antonboom/nilnil v0.1.3 h1:6RTbx3d2mcEu3Zwq9TowQpQMVpP75zugwOtqY1RTtcE= +github.com/Antonboom/nilnil v0.1.3/go.mod h1:iOov/7gRcXkeEU+EMGpBu2ORih3iyVEiWjeste1SJm8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 h1:+r1rSv4gvYn0wmRjC8X7IAzX8QezqtFV9m0MUHFJgts= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZdunyA= +github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/ashanbrown/forbidigo v1.5.1 h1:WXhzLjOlnuDYPYQo/eFlcFMi8X/kLfvWLYu6CSoebis= +github.com/ashanbrown/forbidigo v1.5.1/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= +github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= +github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= +github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.6.0 h1:HTuxyug8GyFbRkrffIpzNCSK4luc0TY3wzXvzIZhEXc= +github.com/bmatcuk/doublestar/v4 v4.6.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU= +github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo= +github.com/breml/bidichk v0.2.4 h1:i3yedFWWQ7YzjdZJHnPo9d/xURinSq3OM+gyM43K4/8= +github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s= +github.com/breml/errchkjson v0.3.1 h1:hlIeXuspTyt8Y/UmP5qy1JocGNR00KQHgfaNtRAjoxQ= +github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U= +github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= +github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= +github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= +github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 h1:W9o46d2kbNL06lq7UNDPV0zYLzkrde/bjIqO02eoll0= +github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= +github.com/daixiang0/gci v0.10.1 h1:eheNA3ljF6SxnPD/vE4lCBusVHmV3Rs3dkKvFrJ7MR0= +github.com/daixiang0/gci v0.10.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= +github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= +github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= +github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= +github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= +github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-critic/go-critic v0.7.0 h1:tqbKzB8pqi0NsRZ+1pyU4aweAF7A7QN0Pi4Q02+rYnQ= +github.com/go-critic/go-critic v0.7.0/go.mod h1:moYzd7GdVXE2C2hYTwd7h0CPcqlUeclsyBRwMa38v64= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= +github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astequal v1.1.0 h1:kHKm1AWqClYn15R0K1KKE4RG614D46n+nqUQ06E1dTw= +github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= +github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= +github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= +github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= +github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY= +github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= +github.com/golangci/golangci-lint v1.52.1 h1:TwQtQi5dGE/uFOxYGKwddJo7T9sHsRfTUN00HZMl5Jo= +github.com/golangci/golangci-lint v1.52.1/go.mod h1:wlTh+d/oVlgZC2yCe6nlxrxNAnuhEQC0Zdygoh72Uak= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.4.0 h1:KtVB/hTK4bbL/S6bs64rYyk8adjmh1BygbBiaAiX+a0= +github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc= +github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= +github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/google/addlicense v1.1.1 h1:jpVf9qPbU8rz5MxKo7d+RMcNHkqxi4YJi/laauX4aAE= +github.com/google/addlicense v1.1.1/go.mod h1:Sm/DHu7Jk+T5miFHHehdIjbi4M5+dJDRS3Cq0rncIxA= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28 h1:9alfqbrhuD+9fLZ4iaAVwhlp5PEhmnBt7yvK2Oy5C1U= +github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= +github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= +github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= +github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/junk1tm/musttag v0.5.0 h1:bV1DTdi38Hi4pG4OVWa7Kap0hi0o7EczuK6wQt9zPOM= +github.com/junk1tm/musttag v0.5.0/go.mod h1:PcR7BA+oREQYvHwgjIDmw3exJeds5JzRcvEJTfjrA0M= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8= +github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= +github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= +github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= +github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= +github.com/knadh/koanf/v2 v2.0.1 h1:1dYGITt1I23x8cfx8ZnldtezdyaZtfAuRtIFOiRzK7g= +github.com/knadh/koanf/v2 v2.0.1/go.mod h1:ZeiIlIDXTE7w1lMT6UVcNiRAS2/rCeLn/GdLNvY1Dus= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= +github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= +github.com/kunwardeep/paralleltest v1.0.6 h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoaGS+Ugg8g= +github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes= +github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= +github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= +github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= +github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= +github.com/ldez/tagliatelle v0.4.0 h1:sylp7d9kh6AdXN2DpVGHBRb5guTVAgOxqNGhbqc4b1c= +github.com/ldez/tagliatelle v0.4.0/go.mod h1:mNtTfrHy2haaBAw+VT7IBV6VXBThS7TCreYWbBcJ87I= +github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= +github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= +github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= +github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= +github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= +github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2srm/LN17lpybq15AryXIRcWYLE= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= +github.com/mgechev/revive v1.3.1 h1:OlQkcH40IB2cGuprTPcjB0iIUddgVZgGmDX3IAMR8D4= +github.com/mgechev/revive v1.3.1/go.mod h1:YlD6TTWl2B8A103R9KWJSPVI9DrEf+oqr15q21Ld+5I= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= +github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA= +github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= +github.com/nishanths/exhaustive v0.9.5 h1:TzssWan6orBiLYVqewCG8faud9qlFntJE30ACpzmGME= +github.com/nishanths/exhaustive v0.9.5/go.mod h1:IbwrGdVMizvDcIxPYGVdQn5BqWJaOwpCvg4RGb8r/TA= +github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= +github.com/nunnatsa/ginkgolinter v0.9.0 h1:Sm0zX5QfjJzkeCjEp+t6d3Ha0jwvoDjleP9XCsrEzOA= +github.com/nunnatsa/ginkgolinter v0.9.0/go.mod h1:FHaMLURXP7qImeH6bvxWJUpyH+2tuqe5j4rW1gxJRmI= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo/v2 v2.8.0 h1:pAM+oBNPrpXRs+E/8spkeGx9QgekbRVyr74EUvRVOUI= +github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= +github.com/open-telemetry/opentelemetry-collector-contrib/cmd/mdatagen v0.81.0 h1:o6a5Z5XZGw/qddTJrvPW8yTa0IpXzXubK8r43nuRdLc= +github.com/open-telemetry/opentelemetry-collector-contrib/cmd/mdatagen v0.81.0/go.mod h1:8moVn5V/NwuBcJL6XS2xoZC/QehRQex1lRUUScBbO+I= +github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= +github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v1.4.0 h1:b+sQ5HibPIAjEZwtuwU8Wz/u0dMZ7YL+bk+9yWyHVJk= +github.com/polyfloyd/go-errorlint v1.4.0/go.mod h1:qJCkPeBn+0EXkdKTrUCcuFStM2xrDKfxI3MGLXPexUs= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/quasilyte/go-ruleguard v0.3.19 h1:tfMnabXle/HzOb5Xe9CUZYWXKfkS1KwRmZyPmD9nVcc= +github.com/quasilyte/go-ruleguard v0.3.19/go.mod h1:lHSn69Scl48I7Gt9cX3VrbsZYvYiBYszZOZW4A+oTEw= +github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw= +github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= +github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI= +github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= +github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.23.0 h1:01h+/2Kd+NblNItNeux0veSL5cBF1jbEOPrEhDzGYq0= +github.com/sashamelentyev/usestdlibvars v1.23.0/go.mod h1:YPwr/Y1LATzHI93CqoPUN/2BzGQ/6N/cl/KwgR0B/aU= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/securego/gosec/v2 v2.15.0 h1:v4Ym7FF58/jlykYmmhZ7mTm7FQvN/setNm++0fgIAtw= +github.com/securego/gosec/v2 v2.15.0/go.mod h1:VOjTrZOkUtSDt2QLSJmQBMWnvwiQPEjg0l+5juIqGk8= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI= +github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= +github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8= +github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= +github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= +github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= +github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= +github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= +github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= +github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= +github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= +github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= +github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/timakin/bodyclose v0.0.0-20221125081123-e39cf3fc478e h1:MV6KaVu/hzByHP0UvJ4HcMGE/8a6A4Rggc/0wx2AvJo= +github.com/timakin/bodyclose v0.0.0-20221125081123-e39cf3fc478e/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= +github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= +github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= +github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQpIXDJRhQ= +github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE= +github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= +github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= +github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= +github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= +github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0= +gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE= +go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opentelemetry.io/collector/component v0.81.0 h1:AKsl6bss/SRrW248GFpmGiiI/4kdemW92Ai/X82CCqY= +go.opentelemetry.io/collector/config/configtelemetry v0.81.0 h1:j3dhWbAcrfL1n0RmShRJf99X/xIMoPfEShN/5Z8bY0k= +go.opentelemetry.io/collector/confmap v0.81.0 h1:AqweoBGdF3jGM2/KgP5GS6bmN+1aVrEiCy4nPf7IBE4= +go.opentelemetry.io/collector/confmap v0.81.0/go.mod h1:iCTnTqGgZZJumhJxpY7rrJz9UQ/0zjPmsJz2Z7Tp4RY= +go.opentelemetry.io/collector/consumer v0.81.0 h1:8R2iCrSzD7T0RtC2Wh4GXxDiqla2vNhDokGW6Bcrfas= +go.opentelemetry.io/collector/featuregate v1.0.0-rcv0013 h1:tiTUG9X/gEDN1oDYQOBVUFYQfhUG2CvgW9VhBc2uk1U= +go.opentelemetry.io/collector/featuregate v1.0.0-rcv0013/go.mod h1:0mE3mDLmUrOXVoNsuvj+7dV14h/9HFl/Fy9YTLoLObo= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0013 h1:4sONXE9hAX+4Di8m0bQ/KaoH3Mi+OPt04cXkZ7A8W3k= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0013/go.mod h1:x09G/4KjEcDKNuWCjC5ZtnuDE0XEqiRwI+yrHSVjIy8= +go.opentelemetry.io/collector/receiver v0.81.0 h1:0c+YtIV7fmd9ev+zmwS9qjx5ASi8cw+gSypu4I7Gugc= +go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230321023759-10a507213a29 h1:e7LhZmJ631l59keHP9ssC3sgSn3/oiEHKHKXDkimURY= +golang.org/x/exp/typeparams v0.0.0-20230321023759-10a507213a29/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw= +honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= +mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM= +mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8 h1:VuJo4Mt0EVPychre4fNlDWDuE5AjXtPJpRUWqZDQhaI= +mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8/go.mod h1:Oh/d7dEtzsNHGOq1Cdv8aMm3KdKhVvPbRQcM8WFpBR8= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/collector/internal/tools/tools.go b/collector/internal/tools/tools.go new file mode 100644 index 0000000..4ddb9b0 --- /dev/null +++ b/collector/internal/tools/tools.go @@ -0,0 +1,22 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + _ "github.com/client9/misspell/cmd/misspell" + _ "github.com/golangci/golangci-lint/cmd/golangci-lint" + _ "github.com/google/addlicense" + _ "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/mdatagen" +) diff --git a/collector/internal/version/version.go b/collector/internal/version/version.go new file mode 100644 index 0000000..444132c --- /dev/null +++ b/collector/internal/version/version.go @@ -0,0 +1,58 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "bytes" + "fmt" + "runtime" +) + +// Version variable will be replaced at link time after `make` has been run. +var Version = "latest" + +// GitHash variable will be replaced at link time after `make` has been run. +var GitHash = "" + +// InfoVar is a singleton instance of the Info struct. +var InfoVar = Info([][2]string{ + {"Version", Version}, + {"GitHash", GitHash}, + {"Goversion", runtime.Version()}, + {"OS", runtime.GOOS}, + {"Architecture", runtime.GOARCH}, + // Add other valuable build-time information here. +}) + +// Info has properties about the build and runtime. +type Info [][2]string + +// String returns a formatted string, with linebreaks, intended to be displayed +// on stdout. +func (i Info) String() string { + buf := new(bytes.Buffer) + maxRow1Alignment := 0 + for _, prop := range i { + if cl0 := len(prop[0]); cl0 > maxRow1Alignment { + maxRow1Alignment = cl0 + } + } + + for _, prop := range i { + // Then finally print them with left alignment + fmt.Fprintf(buf, "%*s %s\n", -maxRow1Alignment, prop[0], prop[1]) + } + return buf.String() +} diff --git a/collector/internal/version/version_test.go b/collector/internal/version/version_test.go new file mode 100644 index 0000000..9be0958 --- /dev/null +++ b/collector/internal/version/version_test.go @@ -0,0 +1,17 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +// TODO: Add tests diff --git a/collector/receiver/prometheusreceiver/DESIGN.md b/collector/receiver/prometheusreceiver/DESIGN.md new file mode 100644 index 0000000..6da2c71 --- /dev/null +++ b/collector/receiver/prometheusreceiver/DESIGN.md @@ -0,0 +1,690 @@ +## Design Goals + +### Provide a seamless onboarding experience for users who are already familiar with Prometheus scrape config + +Prometheus has a very powerful config system for user to config how Prometheus +can scrape the metrics data from any application which expose a Prometheus +format metrics endpoint. It provides very useful features like filtering +unwanted metrics, relabeling tags, etc. The original Prometheus receiver of +OpenTelemetry took the approach of using Prometheus' own scraper's source code +as a library to achieve this goal. Overall the idea was great, however, the +original implementation has a lot of glitches, it cannot be fixed by small +patches. This new Prometheus receiver is going to follow the same idea of +leveraging Prometheus sourcecode, with a proper implementation. + +### Map Prometheus metrics to the corresponding OpenTelemetry metrics properly + +Prometheus receiver shall be able to map Prometheus metrics to OpenTelemetry's +proto based metrics, it shall respect the original metric name, value, +timestamp, as well as tags. It doesn't need to provide one-to-one mapping, +since supported metric types are different from the two systems. However, it +shall not drop data. + +### Parity between Prometheus and OpenTelemetry Prometheus exporter + +Prometheus itself can also used as an exporter, that it can expose the metrics +it scrape from other system with its own metrics endpoint, so is OpenTelemetry +service. We shall be able to retain parity from the following two setups: + +1. app -> prometheus -> metric-endpoint +2. app -> otelcol-with-prometheus-receiver -> otelcol-prometheus-exporter-metrics-endpoint + + +## Prometheus Text Format Overview + +Prometheus text format is a line orient format. Each non-empty line, which +does not begin with #, is a metric data point with includes a metric name and its +value, which is of float64 type, as well as some optional data such as tags and +timestamp, which is in milliseconds. For lines that begin with #, they are either +comments, which need to be filtered, or metadata, including type hints +and units that are usually indicating the beginning of a new individual metric +or a group of new metrics. More details of Prometheus text format can be found +from its [official +document](https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format). + +### Metric types +Based on this document, Prometheus supports the following 5 types of metrics: +* Counter +* Gauge +* Histogram +* Summary +* Untyped (untyped metrics are converted to `gauge` by default) + +However, this is not the whole story, from the implementation details of +Prometheus scraper, which the receiver based on, it supports a couple more +undocumented metrics types, including: + +* Gaugehistogram +* Info +* Statset + +More details can be found from the +[prometheus text parser source code](https://github.com/prometheus/prometheus/blob/afdd1357e008375e693a1b4c096f81b2358cb46f/model/textparse/interface.go#L25) + +### Metric Grouping + +Other than metric types, the type hint comment and metric grouping are also +important to know in order to parse Prometheus text metrics properly. From any +Prometheus metrics endpoints, metrics are usually grouped together by starting +with a comment section which includes some very important information like type +hints about the metrics, and metrics points of the same group will have the +same metric name but a different set of tag values, for example: + +``` +# HELP container_cpu_load_average_10s Value of container cpu load average over the last 10 seconds. +# TYPE container_cpu_load_average_10s gauge +container_cpu_load_average_10s{id="/",image="",name=""} 0 +container_cpu_load_average_10s{id="/000-metadata",image="",name=""} 0 +container_cpu_load_average_10s{id="/001-sysfs",image="",name=""} 0 +``` + +The above example was taken from a cadvisor metric endpoint, the type hint +tells that the name of this metric group is `container_cpu_load_average_10s` +and it's of `gauge` type. Then it is followed by some individual metric points +which have the same metric name. For each individual metric within this +group, they share the same set of tag keys, with unique value sets. + +## Prometheus Metric Scraper Anatomy + +The metrics scraper is a component which is used to scrape remote Prometheus +metric endpoints, it is also the component which Prometheus receiver is based +on. It's important to understand how it works in order to implement the +receiver properly. + +### Major components of Prometheus Scrape package +#### ScrapeManager +[ScrapeManager](https://github.com/prometheus/prometheus/blob/v2.9.2/scrape/manager.go) is a component which loads the scrape_config, and manages the scraping tasks + +#### ScrapePool +[ScrapePool](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/scrape/scrape.go#L154-L439) is an object which manages scrapes for a sets of targets + +#### Scraper +[Scraper](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/scrape/scrape.go#L506-L511) is a http client to fetch data from remote metrics endpoints + +#### Target +[Target](https://github.com/prometheus/prometheus/blob/v2.9.2/scrape/target.go) is a remote metric endpoint, as well as related relabeling settings and other metadata + +#### TextParser +[TextParser](https://github.com/prometheus/prometheus/tree/v2.9.2/pkg/textparse) is a DFA style streaming decoder/parser for prometheus text format + +#### Appendable +[Appendable](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/scrape/manager.go#L37-L39) is used to acquire a storage appender instance at the beginning of each scrapeLoop run + +#### StorageAppender +[StorageAppender](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/storage/interface.go#L86-L95) is an abstraction of the metric storage which can be a filesystem, a database or a remote endpoint...etc. For the OpenTelemetry prometheus receiver, this is +also the interface we need to implement to provide a customized storage appender backed by a metrics sink. + +#### ScrapeLoop +[ScrapeLoop](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/scrape/scrape.go#L586-L1024) is the actual scrape pipeline which performs the main scraping and ingestion logic. + +### Prometheus ScrapeLoop workflow explained +Each scraping cycle is triggered by a configured interval, its workflow is as +shown in the flowchart below: + +![ScrapeLoop Flowchart](scrapeloop-flowchart.png) + +It basically does the following things in turn: + + 1. make a http call to fetch data from the binding [target](#target)'s metrics endpoint with [scraper](#scraper) + 2. acquired a [storageAppender](#storageappender) instance with the [Appendable](#appendable) interface + 3. feed the data to a textParser + 4. parse and feed metric data points to storage appender + 5. commit if success or rollback + 6. report task status + +## Implementing Prometheus storage.Appender with metrics sink + +### The storage.Appender interface +As discussed in the previous section, the storage.Appender is the most +important component for us to implement to bring the two worlds +together. It has a very simple interface which is defined below: + +```go +type Appender interface { + Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) + + // Commit submits the collected samples and purges the batch. + Commit() error + + Rollback() error + + ExemplarAppender +} + +type ExemplarAppender interface { + AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) +} +``` + +*Note: the above code belongs to the Prometheus project, its license can be found [here](https://github.com/prometheus/prometheus/blob/v2.26.0/LICENSE)* + +One can see that the interface is very simple, it only has 4 methods (once we +account for the embedded `ExemplarAppender` interface): `Append`, `AppendExemplar`, +`Commit` and `Rollback`. The two lifecycle methods are easy to understand: `Commit` +is called when the processing of the scraped page is completed and successful, +whereas `Rollback` is called if an error occurs during the process. + +However, for the two methods starting with 'Append', the behavior is somewhat +more complicated. The documentation indicates that calls to 'Append' may return +an optional 'reference number' which may be used to add further samples in the +same or later transactions. A reference value of `0` is used to indicate that +no such caching should occur. The documentation indicates that current implementations +of `AppendExemplar` do not generate reference numbers and their doing so should +be considered erroneous and logged. In our system we do not generate any reference +numbers and always return `0` from `Append` and `AppendExemplar` to skip caching. + +### Challenges and solutions +Even though the definition of this interface is very simple, to +implement it properly is a bit challenging given that every time the +Append/AppendExemplar method is called it only provides the information about the +current data point. The context of what metric group this data point belonging +to is not provided; we have to keep track of it internally within the appender. +This is not the whole story, there are a couple other issues we need to +address, including: + +1. Have a way to link the Target with the current appender instance + +The labels provided to the Append/AppendExemplar methods do not include some target +specified information such as `job name` which is important in constructing the [Node +proto](https://github.com/census-instrumentation/opencensus-proto/blob/e2601ef16f8a085a69d94ace5133f97438f8945f/src/opencensus/proto/agent/common/v1/common.proto#L36-L51) +object of OpenTelemetry. The target object is not accessible from the Appender +interface, however, we can get it from the ScrapeManager, so when designing the +appender we need to have a way to inject the binding target into the appender +instance. + +2. Group metrics from the same family together + +In OpenTelemetry, metric points of the same name are usually grouped together +as one timeseries but different data points. It's important for the appender to +keep track of the metric family changes, and group metrics of the same family +together Keep in mind that the Append/AppendExemplar method is operated in a streaming +manner, ScrapeLoop does not provide any direct hints on metric name change, the +appender itself need to keep track of it. It's also important to know that for +some special types such as `histogram` and `summary`, not all the data points +have the same name, there are some special metric points has postfix like +`_sum` and `_count`, we need to handle this properly, and do not consider this +as a metric family change. + +3. Group complex metrics such as histogram together in proper order + +In Prometheus a single aggregated type of metric data such as `histogram` and +`summary` is represented by multiple metric data points, such as buckets and +quantiles, as well as the additional `_sum` and `_count` data. ScrapeLoop will +feed them into the appender individually. The appender needs to have a way to +bundle them together to transform them into a single Metric Datapoint Proto +object. + +4. Tags need to be handled carefully + +ScrapeLoop strips out any tag with empty value, however, in OpenTelemetry, the +tag keys is stored separately, we need to able to get all the possible tag keys +of the same metric family before committing the metric family to the sink. + +5. StartTimestamp and values of metrics of cumulative types + +In OpenTelemetry, every metric of cumulative type is required to have a +StartTimestamp, which records when a metric is first recorded. However, +Prometheus does not provide such data. One of the solutions to tackle this +problem is to cache the first observed value of these metrics as well as the +timestamp, then for any subsequent data of the same metric, use the cached +timestamp as StartTimestamp. Unfortunately, metrics can come and go, or the +remote server can restart at any given time, so the receiver also needs to +take care of issues such as when a new value is smaller than the previously +seen value, by considering it as a metric with a new StartTime. + +## Prometheus Metric to OpenTelemetry Metric Proto Mapping + +### Target as Node +The Target of Prometheus is defined by the scrape_config, it has information +such as the `hostname` of the remote service, and a user defined `job +name` that can be used as the service name. These two pieces of information +make it a great fit to map to the `Node` field of the OpenTelemetry +MetricsData type, as shown below: + +```go +type MetricsData struct { + Node *commonpb.Node + Resource *resourcepb.Resource + Metrics []*metricspb.Metric +} +``` + +The scrape page as a whole also can be fit into the above `MetricsData` data +structure, and all the metrics data points can be stored with the `Metrics` +array. We will explain the mappings of individual metric types in the following +sections + +### Metric Value Mapping + In OpenTelemetry metrics value types can be either `int64` or `float64`, + while in Prometheus the value can be safely assumed to always be `float64` + based on the [Prometheus Text Format + Document](https://prometheus.io/docs/instrumenting/exposition_formats/#text-format-details), + as quoted below: + +> value is a float represented as required by Go's ParseFloat() function. +> In addition to standard numerical values, Nan, +Inf, and -Inf are valid +> values representing not a number, positive infinity, and negative infinity, +> respectively. + +It will make sense for us to stick with this data type as much as possible +across all metrics types. + +### Counter +Counter as described in the [Prometheus Metric Types +Document](https://prometheus.io/docs/concepts/metric_types/#counter), + +> is a cumulative metric that represents a single monotonically increasing +> counter whose value can only increase or be reset to zero on restart. + +It is one of the simplest metric types found in both systems, however, it is +a cumulative type of metric. Consider what happens when we have two consecutive +scrapes from a target, with the first one as shown below: +``` +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 1027 +http_requests_total{method="post",code="400"} 3 +``` + +and the 2nd one: +``` +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 1028 +http_requests_total{method="post",code="400"} 5 +``` + +The Prometheus Receiver stores previously seen scrape data as metadata to +attempt to identify value resets and to provide a start time for produced metrics. + +The output of the first scrape is as shown below: +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_requests_total", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "method"}, {Key: "code"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "post", HasValue: true}, {Value: "200", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: startTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 1027.0}}, + }, + }, + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "post", HasValue: false}, {Value: "400", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: startTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 3.0}}, + }, + }, + }, + }, +} +``` + +The output of the second scrape is as shown below: +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_requests_total", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "method"}, {Key: "code"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "post", HasValue: true}, {Value: "200", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 1028.0}}, + }, + }, + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "post", HasValue: false}, {Value: "400", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 5.0}}, + }, + }, + }, + }, +} +``` + +*Note: `startTimestamp` is the timestamp cached from the first scrape, `currentTimestamp` is the timestamp of the current scrape* + + +### Gauge +Gauge, as described in the [Prometheus Metric Types Document](https://prometheus.io/docs/concepts/metric_types/#guage), +> is a metric that represents a single numerical value that can arbitrarily go up and down + +``` +# HELP gauge_test some test gauges. +# TYPE gauge_test gague +gauge_test{id="1",foo="bar"} 1.0 +gauge_test{id="2",foo=""} 2.0 + +``` + +A major difference between Gauges of Prometheus and OpenTelemetry are the value +types. In Prometheus, as mentioned earlier, all values can be considered as +float type, however, in OpenTelemetry, Gauges can either be `Int64` or +`Double`. To make the transformation easier, we always assume the data type is +`Double`. + +The corresponding OpenTelemetry Metric of the above examples will be: +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "gauge_test", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "id"}, {Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: nil, + LabelValues: []*metricspb.LabelValue{{Value: "1", HasValue: true}, {Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 1.0}}, + }, + }, + { + StartTimestamp: nil, + LabelValues: []*metricspb.LabelValue{{Value: "2", HasValue: true}, {Value: "", HasValue: false}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 2.0}}, + }, + }, + }, + }, +} +``` + +### Histogram +Histogram is a complex data type, in Prometheus, it uses multiple data points +to represent a single histogram. Its description can be found from: [Prometheus +Histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). + +Similar to counter, histogram is also a cumulative type metric, so the receiver +will store metadata that can be used to detect resets and provide an appropriate +start timestamp for subsequent metrics. + +An example of histogram with first scrape response: +``` +# HELP hist_test This is my histogram vec +# TYPE hist_test histogram +hist_test_bucket{t1="1",,le="10.0"} 1.0 +hist_test_bucket{t1="1",le="20.0"} 3.0 +hist_test_bucket{t1="1",le="+inf"} 10.0 +hist_test_sum{t1="1"} 100.0 +hist_test_count{t1="1"} 10.0 +hist_test_bucket{t1="2",,le="10.0"} 10.0 +hist_test_bucket{t1="2",le="20.0"} 30.0 +hist_test_bucket{t1="2",le="+inf"} 100.0 +hist_test_sum{t1="2"} 10000.0 +hist_test_count{t1="2"} 100.0 + +``` + +Its corresponding OpenTelemetry metrics will be: +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "hist_test", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + LabelKeys: []*metricspb.LabelKey{{Key: "t1"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "1", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: startTimestamp, Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 10, + Sum: 100.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 1}, {Count: 2}, {Count: 7}}, + }}}, + }, + }, + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "2", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: startTimestamp, Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 100, + Sum: 10000.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 10}, {Count: 20}, {Count: 70}}, + }}}, + }, + }, + }, + }, +} +``` + +And a subsequent 2nd scrape response: +``` +# HELP hist_test This is my histogram vec +# TYPE hist_test histogram +hist_test_bucket{t1="1",,le="10.0"} 2.0 +hist_test_bucket{t1="1",le="20.0"} 6.0 +hist_test_bucket{t1="1",le="+inf"} 13.0 +hist_test_sum{t1="1"} 150.0 +hist_test_count{t1="1"} 13.0 +hist_test_bucket{t1="2",,le="10.0"} 10.0 +hist_test_bucket{t1="2",le="20.0"} 30.0 +hist_test_bucket{t1="2",le="+inf"} 100.0 +hist_test_sum{t1="2"} 10000.0 +hist_test_count{t1="2"} 100.0 + +``` + +Its corresponding OpenTelemetry metrics will be: +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "hist_test", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + LabelKeys: []*metricspb.LabelKey{{Key: "t1"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "1", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 13, + Sum: 150.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 2}, {Count: 4}, {Count: 7}}, + }}}, + }, + }, + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "2", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 100, + Sum: 10000.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 10}, {Count: 20}, {Count: 70}}, + }}}, + }, + }, + }, + }, +} + +``` + +There's an important difference between Prometheus bucket and OpenTelemetry +bucket that, bucket counts from Prometheus are cumulative, to transform this +into OpenTelemetry format, one needs to apply the following formula: + +``` +CurrentOCBucketVlaue = CurrentPrometheusBucketValue - PrevPrometheusBucketValue +``` + +OpenTelemetry does not use `+inf` as an explicit bound, one needs to remove it to generate +the Bounds of the OpenTelemetry distribution. + +Other than that, the `SumOfSquaredDeviation`, which is required by +OpenTelemetry format for histogram, is not provided by Prometheus. We have to +set this value to `0` instead. + +### Gaugehistogram + +This is an undocumented data type, that's not currently supported. + +### Summary + +Same as histogram, summary is also a complex metric type which is represented by +multiple data points. A detailed description can be found from [Prometheus +Summary](https://prometheus.io/docs/concepts/metric_types/#summary) + +The sum and count from Summary are cumulative, however, the quantiles are +not. The receiver will again maintain some state to attempt to detect value resets +and to set appropriate start timestamps. + +For the following two scrapes, with the first one: + +``` +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 0.0001271 +go_gc_duration_seconds{quantile="0.25"} 0.0002455 +go_gc_duration_seconds{quantile="0.5"} 0.0002904 +go_gc_duration_seconds{quantile="0.75"} 0.0003426 +go_gc_duration_seconds{quantile="1"} 0.0023638 +go_gc_duration_seconds_sum 17.391350544 +go_gc_duration_seconds_count 52489 +``` + +Its corresponding OpenTelemetry metrics will be: +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_gc_duration_seconds", + Type: metricspb.MetricDescriptor_SUMMARY, + LabelKeys: []*metricspb.LabelKey{}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{}, + Points: []*metricspb.Point{ + {Timestamp: startTimestamp, Value: &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Sum: &wrappers.DoubleValue{Value: 17.391350544}, + Count: &wrappers.Int64Value{Value: 52489}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ + {Percentile: 0.0, Value: 0.0001271}, + {Percentile: 25.0, Value: 0.0002455}, + {Percentile: 50.0, Value: 0.0002904}, + {Percentile: 75.0, Value: 0.0003426}, + {Percentile: 100.0, Value: 0.0023638}, + }, + }}}}, + }, + }, + }, + }, +} + +``` + +And the 2nd one: +``` +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 0.0001271 +go_gc_duration_seconds{quantile="0.25"} 0.0002455 +go_gc_duration_seconds{quantile="0.5"} 0.0002904 +go_gc_duration_seconds{quantile="0.75"} 0.0003426 +go_gc_duration_seconds{quantile="1"} 0.0023639 +go_gc_duration_seconds_sum 17.491350544 +go_gc_duration_seconds_count 52490 +``` + +Its corresponding OpenTelemetry metrics will be: +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_gc_duration_seconds", + Type: metricspb.MetricDescriptor_SUMMARY, + LabelKeys: []*metricspb.LabelKey{}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Sum: &wrappers.DoubleValue{Value: 17.491350544}, + Count: &wrappers.Int64Value{Value: 52490}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ + {Percentile: 0.0, Value: 0.0001271}, + {Percentile: 25.0, Value: 0.0002455}, + {Percentile: 50.0, Value: 0.0002904}, + {Percentile: 75.0, Value: 0.0003426}, + {Percentile: 100.0, Value: 0.0023639}, + }, + }}}}, + }, + }, + }, + }, +} + +``` + +There are also some differences between the two systems. One difference is that +Prometheus uses `quantile`, while OpenTelemetry uses `percentile`. +Additionally, OpenTelemetry has optional values for `Sum` and `Count` of a +snapshot, however, they are not provided by Prometheus, and `nil` will be used +for these values. + +Other than that, in some Prometheus client implementations, such as the Python +version, Summary is allowed to have no quantiles, in which case the receiver +will produce an OpenTelemetry Summary with Snapshot set to `nil`. + +### Others + +For any other Prometheus metrics types, they will be transformed into the +OpenTelemetry [Gauge](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#gauge) type. diff --git a/collector/receiver/prometheusreceiver/Makefile b/collector/receiver/prometheusreceiver/Makefile new file mode 100644 index 0000000..ded7a36 --- /dev/null +++ b/collector/receiver/prometheusreceiver/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/collector/receiver/prometheusreceiver/README.md b/collector/receiver/prometheusreceiver/README.md new file mode 100644 index 0000000..bbd3111 --- /dev/null +++ b/collector/receiver/prometheusreceiver/README.md @@ -0,0 +1,118 @@ +# Prometheus Receiver + +| Status | | +| ------------------------ |-------------------| +| Stability | [beta] | +| Supported pipeline types | metrics | +| Distributions | [core], [contrib] | + +Receives metric data in [Prometheus](https://prometheus.io/) format. See the +[Design](DESIGN.md) for additional information on this receiver. + +## ⚠️ Warning + +Note: This component is currently work in progress. It has several limitations +and please don't use it if the following limitations is a concern: + +* Collector cannot auto-scale the scraping yet when multiple replicas of the + collector is run. +* When running multiple replicas of the collector with the same config, it will + scrape the targets multiple times. +* Users need to configure each replica with different scraping configuration + if they want to manually shard the scraping. +* The Prometheus receiver is a stateful component. + +## Unsupported features +The Prometheus receiver is meant to minimally be a drop-in replacement for Prometheus. However, +there are advanced features of Prometheus that we don't support and thus explicitly will return +an error for if the receiver's configuration YAML/code contains any of the following + +- [x] alert_config.alertmanagers +- [x] alert_config.relabel_configs +- [x] remote_read +- [x] remote_write +- [x] rule_files + + +## Getting Started + +This receiver is a drop-in replacement for getting Prometheus to scrape your +services. It supports [the full set of Prometheus configuration in `scrape_config`][sc], +including service discovery. Just like you would write in a YAML configuration +file before starting Prometheus, such as with: + +**Note**: Since the collector configuration supports env variable substitution +`$` characters in your prometheus configuration are interpreted as environment +variables. If you want to use $ characters in your prometheus configuration, +you must escape them using `$$`. + +```shell +prometheus --config.file=prom.yaml +``` + +**Feature gates**: + +- `receiver.prometheusreceiver.UseCreatedMetric`: Start time for Summary, Histogram + and Sum metrics can be retrieved from `_created` metrics. Currently, this behaviour + is disabled by default. To enable it, use the following feature gate option: + +```shell +"--feature-gates=receiver.prometheusreceiver.UseCreatedMetric" +``` + +You can copy and paste that same configuration under: + +```yaml +receivers: + prometheus: + config: +``` + +For example: + +```yaml +receivers: + prometheus: + config: + scrape_configs: + - job_name: 'otel-collector' + scrape_interval: 5s + static_configs: + - targets: ['0.0.0.0:8888'] + - job_name: k8s + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + regex: "true" + action: keep + metric_relabel_configs: + - source_labels: [__name__] + regex: "(request_duration_seconds.*|response_duration_seconds.*)" + action: keep +``` + +## OpenTelemetry Operator +Additional to this static job definitions this receiver allows to query a list of jobs from the +OpenTelemetryOperators TargetAllocator or a compatible endpoint. + +```yaml +receivers: + prometheus: + target_allocator: + endpoint: http://my-targetallocator-service + interval: 30s + collector_id: collector-1 +``` +## Exemplars +This receiver accepts exemplars coming in Prometheus format and converts it to OTLP format. +1. Value is expected to be received in `float64` format +2. Timestamp is expected to be received in `ms` +3. Labels with key `span_id` in prometheus exemplars are set as OTLP `span id` and labels with key `trace_id` are set as `trace id` +4. Rest of the labels are copied as it is to OTLP format + +[sc]: https://github.com/prometheus/prometheus/blob/v2.28.1/docs/configuration/configuration.md#scrape_config + +[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol \ No newline at end of file diff --git a/collector/receiver/prometheusreceiver/config.go b/collector/receiver/prometheusreceiver/config.go new file mode 100644 index 0000000..1867524 --- /dev/null +++ b/collector/receiver/prometheusreceiver/config.go @@ -0,0 +1,307 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver" + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "sort" + "strings" + "time" + + commonconfig "github.com/prometheus/common/config" + promconfig "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/file" + promHTTP "github.com/prometheus/prometheus/discovery/http" + "github.com/prometheus/prometheus/discovery/kubernetes" + "github.com/prometheus/prometheus/discovery/targetgroup" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "gopkg.in/yaml.v2" +) + +const ( + // The key for Prometheus scraping configs. + prometheusConfigKey = "config" + + // keys to access the http_sd_config from config root + targetAllocatorConfigKey = "target_allocator" + targetAllocatorHTTPSDConfigKey = "http_sd_config" +) + +// Config defines configuration for Prometheus receiver. +type Config struct { + PrometheusConfig *promconfig.Config `mapstructure:"-"` + BufferPeriod time.Duration `mapstructure:"buffer_period"` + BufferCount int `mapstructure:"buffer_count"` + // UseStartTimeMetric enables retrieving the start time of all counter metrics + // from the process_start_time_seconds metric. This is only correct if all counters on that endpoint + // started after the process start time, and the process is the only actor exporting the metric after + // the process started. It should not be used in "exporters" which export counters that may have + // started before the process itself. Use only if you know what you are doing, as this may result + // in incorrect rate calculations. + UseStartTimeMetric bool `mapstructure:"use_start_time_metric"` + StartTimeMetricRegex string `mapstructure:"start_time_metric_regex"` + + // PreserveUntyped is a setting that lets the collector preserve the untypedness of + // untyped metrics as a metric attribute. If set, all untyped prometheus metrics from + // this receiver will have an additional metric attribute called "prometheus_untyped_metric" + // that is a boolean value set to true. + PreserveUntyped bool `mapstructure:"preserve_untyped"` + + TargetAllocator *targetAllocator `mapstructure:"target_allocator"` + + // ConfigPlaceholder is just an entry to make the configuration pass a check + // that requires that all keys present in the config actually exist on the + // structure, ie.: it will error if an unknown key is present. + ConfigPlaceholder interface{} `mapstructure:"config"` +} + +type targetAllocator struct { + Endpoint string `mapstructure:"endpoint"` + Interval time.Duration `mapstructure:"interval"` + CollectorID string `mapstructure:"collector_id"` + // ConfigPlaceholder is just an entry to make the configuration pass a check + // that requires that all keys present in the config actually exist on the + // structure, ie.: it will error if an unknown key is present. + ConfigPlaceholder interface{} `mapstructure:"http_sd_config"` + HTTPSDConfig *promHTTP.SDConfig `mapstructure:"-"` +} + +var _ component.Config = (*Config)(nil) +var _ confmap.Unmarshaler = (*Config)(nil) + +func checkFile(fn string) error { + // Nothing set, nothing to error on. + if fn == "" { + return nil + } + _, err := os.Stat(fn) + return err +} + +func checkTLSConfig(tlsConfig commonconfig.TLSConfig) error { + if err := checkFile(tlsConfig.CertFile); err != nil { + return fmt.Errorf("error checking client cert file %q: %w", tlsConfig.CertFile, err) + } + if err := checkFile(tlsConfig.KeyFile); err != nil { + return fmt.Errorf("error checking client key file %q: %w", tlsConfig.KeyFile, err) + } + return nil +} + +// Method to exercise the prometheus file discovery behavior to ensure there are no errors +// - reference https://github.com/prometheus/prometheus/blob/c0c22ed04200a8d24d1d5719f605c85710f0d008/discovery/file/file.go#L372 +func checkSDFile(filename string) error { + content, err := os.ReadFile(filepath.Clean(filename)) + if err != nil { + return err + } + + var targetGroups []*targetgroup.Group + + switch ext := filepath.Ext(filename); strings.ToLower(ext) { + case ".json": + if err := json.Unmarshal(content, &targetGroups); err != nil { + return fmt.Errorf("error in unmarshaling json file extension: %w", err) + } + case ".yml", ".yaml": + if err := yaml.UnmarshalStrict(content, &targetGroups); err != nil { + return fmt.Errorf("error in unmarshaling yaml file extension: %w", err) + } + default: + return fmt.Errorf("invalid file extension: %q", ext) + } + + for i, tg := range targetGroups { + if tg == nil { + return fmt.Errorf("nil target group item found (index %d)", i) + } + } + return nil +} + +// Validate checks the receiver configuration is valid. +func (cfg *Config) Validate() error { + promConfig := cfg.PrometheusConfig + if promConfig != nil { + err := cfg.validatePromConfig(promConfig) + if err != nil { + return err + } + } + + if cfg.TargetAllocator != nil { + err := cfg.validateTargetAllocatorConfig() + if err != nil { + return err + } + } + return nil +} + +func (cfg *Config) validatePromConfig(promConfig *promconfig.Config) error { + if len(promConfig.ScrapeConfigs) == 0 && cfg.TargetAllocator == nil { + return errors.New("no Prometheus scrape_configs or target_allocator set") + } + + // Reject features that Prometheus supports but that the receiver doesn't support: + // See: + // * https://github.com/open-telemetry/opentelemetry-collector/issues/3863 + // * https://github.com/open-telemetry/wg-prometheus/issues/3 + unsupportedFeatures := make([]string, 0, 4) + if len(promConfig.RemoteWriteConfigs) != 0 { + unsupportedFeatures = append(unsupportedFeatures, "remote_write") + } + if len(promConfig.RemoteReadConfigs) != 0 { + unsupportedFeatures = append(unsupportedFeatures, "remote_read") + } + if len(promConfig.RuleFiles) != 0 { + unsupportedFeatures = append(unsupportedFeatures, "rule_files") + } + if len(promConfig.AlertingConfig.AlertRelabelConfigs) != 0 { + unsupportedFeatures = append(unsupportedFeatures, "alert_config.relabel_configs") + } + if len(promConfig.AlertingConfig.AlertmanagerConfigs) != 0 { + unsupportedFeatures = append(unsupportedFeatures, "alert_config.alertmanagers") + } + if len(unsupportedFeatures) != 0 { + // Sort the values for deterministic error messages. + sort.Strings(unsupportedFeatures) + return fmt.Errorf("unsupported features:\n\t%s", strings.Join(unsupportedFeatures, "\n\t")) + } + + for _, sc := range cfg.PrometheusConfig.ScrapeConfigs { + for _, rc := range sc.MetricRelabelConfigs { + if rc.TargetLabel == "__name__" { + // TODO(#2297): Remove validation after renaming is fixed + return fmt.Errorf("error validating scrapeconfig for job %v: %w", sc.JobName, errRenamingDisallowed) + } + } + + if sc.HTTPClientConfig.Authorization != nil { + if err := checkFile(sc.HTTPClientConfig.Authorization.CredentialsFile); err != nil { + return fmt.Errorf("error checking authorization credentials file %q: %w", sc.HTTPClientConfig.Authorization.CredentialsFile, err) + } + } + + if err := checkTLSConfig(sc.HTTPClientConfig.TLSConfig); err != nil { + return err + } + + for _, c := range sc.ServiceDiscoveryConfigs { + switch c := c.(type) { + case *kubernetes.SDConfig: + if err := checkTLSConfig(c.HTTPClientConfig.TLSConfig); err != nil { + return err + } + case *file.SDConfig: + for _, file := range c.Files { + files, err := filepath.Glob(file) + if err != nil { + return err + } + if len(files) != 0 { + for _, f := range files { + err = checkSDFile(f) + if err != nil { + return fmt.Errorf("checking SD file %q: %w", file, err) + } + } + continue + } + return fmt.Errorf("file %q for file_sd in scrape job %q does not exist", file, sc.JobName) + } + } + } + } + return nil +} + +func (cfg *Config) validateTargetAllocatorConfig() error { + // validate targetAllocator + targetAllocatorConfig := cfg.TargetAllocator + if targetAllocatorConfig == nil { + return nil + } + // ensure valid endpoint + if _, err := url.ParseRequestURI(targetAllocatorConfig.Endpoint); err != nil { + return fmt.Errorf("TargetAllocator endpoint is not valid: %s", targetAllocatorConfig.Endpoint) + } + // ensure valid collectorID without variables + if targetAllocatorConfig.CollectorID == "" || strings.Contains(targetAllocatorConfig.CollectorID, "${") { + return fmt.Errorf("CollectorID is not a valid ID") + } + + return nil +} + +// Unmarshal a config.Parser into the config struct. +func (cfg *Config) Unmarshal(componentParser *confmap.Conf) error { + if componentParser == nil { + return nil + } + // We need custom unmarshaling because prometheus "config" subkey defines its own + // YAML unmarshaling routines so we need to do it explicitly. + + err := componentParser.Unmarshal(cfg, confmap.WithErrorUnused()) + if err != nil { + return fmt.Errorf("prometheus receiver failed to parse config: %w", err) + } + + // Unmarshal prometheus's config values. Since prometheus uses `yaml` tags, so use `yaml`. + promCfg, err := componentParser.Sub(prometheusConfigKey) + if err != nil || len(promCfg.ToStringMap()) == 0 { + return err + } + out, err := yaml.Marshal(promCfg.ToStringMap()) + if err != nil { + return fmt.Errorf("prometheus receiver failed to marshal config to yaml: %w", err) + } + + err = yaml.UnmarshalStrict(out, &cfg.PrometheusConfig) + if err != nil { + return fmt.Errorf("prometheus receiver failed to unmarshal yaml to prometheus config: %w", err) + } + + // Unmarshal targetAllocator configs + targetAllocatorCfg, err := componentParser.Sub(targetAllocatorConfigKey) + if err != nil { + return err + } + targetAllocatorHTTPSDCfg, err := targetAllocatorCfg.Sub(targetAllocatorHTTPSDConfigKey) + if err != nil { + return err + } + + targetAllocatorHTTPSDMap := targetAllocatorHTTPSDCfg.ToStringMap() + if len(targetAllocatorHTTPSDMap) != 0 { + targetAllocatorHTTPSDMap["url"] = "http://placeholder" // we have to set it as else the marshal will fail + httpSDConf, err := yaml.Marshal(targetAllocatorHTTPSDMap) + if err != nil { + return fmt.Errorf("prometheus receiver failed to marshal config to yaml: %w", err) + } + err = yaml.UnmarshalStrict(httpSDConf, &cfg.TargetAllocator.HTTPSDConfig) + if err != nil { + return fmt.Errorf("prometheus receiver failed to unmarshal yaml to prometheus config: %w", err) + } + } + + return nil +} diff --git a/collector/receiver/prometheusreceiver/config_test.go b/collector/receiver/prometheusreceiver/config_test.go new file mode 100644 index 0000000..1e40a9e --- /dev/null +++ b/collector/receiver/prometheusreceiver/config_test.go @@ -0,0 +1,303 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "path/filepath" + "strings" + "testing" + "time" + + promConfig "github.com/prometheus/common/config" + promModel "github.com/prometheus/common/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap/confmaptest" +) + +func TestLoadConfig(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + r0 := cfg.(*Config) + assert.Equal(t, r0, factory.CreateDefaultConfig()) + + sub, err = cm.Sub(component.NewIDWithName(typeStr, "customname").String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + r1 := cfg.(*Config) + assert.Equal(t, r1.PrometheusConfig.ScrapeConfigs[0].JobName, "demo") + assert.Equal(t, time.Duration(r1.PrometheusConfig.ScrapeConfigs[0].ScrapeInterval), 5*time.Second) + assert.Equal(t, r1.UseStartTimeMetric, true) + assert.Equal(t, r1.StartTimeMetricRegex, "^(.+_)*process_start_time_seconds$") + + assert.Equal(t, "http://my-targetallocator-service", r1.TargetAllocator.Endpoint) + assert.Equal(t, 30*time.Second, r1.TargetAllocator.Interval) + assert.Equal(t, "collector-1", r1.TargetAllocator.CollectorID) + assert.Equal(t, promModel.Duration(60*time.Second), r1.TargetAllocator.HTTPSDConfig.RefreshInterval) + assert.Equal(t, "prometheus", r1.TargetAllocator.HTTPSDConfig.HTTPClientConfig.BasicAuth.Username) + assert.Equal(t, promConfig.Secret("changeme"), r1.TargetAllocator.HTTPSDConfig.HTTPClientConfig.BasicAuth.Password) +} + +func TestLoadTargetAllocatorConfig(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config_target_allocator.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + r0 := cfg.(*Config) + assert.NotNil(t, r0.PrometheusConfig) + assert.Equal(t, "http://localhost:8080", r0.TargetAllocator.Endpoint) + assert.Equal(t, 30*time.Second, r0.TargetAllocator.Interval) + assert.Equal(t, "collector-1", r0.TargetAllocator.CollectorID) + + sub, err = cm.Sub(component.NewIDWithName(typeStr, "withScrape").String()) + require.NoError(t, err) + cfg = factory.CreateDefaultConfig() + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + r1 := cfg.(*Config) + assert.NotNil(t, r0.PrometheusConfig) + assert.Equal(t, "http://localhost:8080", r0.TargetAllocator.Endpoint) + assert.Equal(t, 30*time.Second, r0.TargetAllocator.Interval) + assert.Equal(t, "collector-1", r0.TargetAllocator.CollectorID) + + assert.Equal(t, 1, len(r1.PrometheusConfig.ScrapeConfigs)) + assert.Equal(t, "demo", r1.PrometheusConfig.ScrapeConfigs[0].JobName) + assert.Equal(t, promModel.Duration(5*time.Second), r1.PrometheusConfig.ScrapeConfigs[0].ScrapeInterval) + + sub, err = cm.Sub(component.NewIDWithName(typeStr, "withOnlyScrape").String()) + require.NoError(t, err) + cfg = factory.CreateDefaultConfig() + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + r2 := cfg.(*Config) + assert.Equal(t, 1, len(r2.PrometheusConfig.ScrapeConfigs)) + assert.Equal(t, "demo", r2.PrometheusConfig.ScrapeConfigs[0].JobName) + assert.Equal(t, promModel.Duration(5*time.Second), r2.PrometheusConfig.ScrapeConfigs[0].ScrapeInterval) +} + +func TestLoadConfigFailsOnUnknownSection(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-section.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + require.Error(t, component.UnmarshalConfig(sub, cfg)) +} + +// As one of the config parameters is consuming prometheus +// configuration as a subkey, ensure that invalid configuration +// within the subkey will also raise an error. +func TestLoadConfigFailsOnUnknownPrometheusSection(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-prometheus-section.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + require.Error(t, component.UnmarshalConfig(sub, cfg)) +} + +// Renaming is not allowed +func TestLoadConfigFailsOnRenameDisallowed(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-prometheus-relabel.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + assert.Error(t, component.ValidateConfig(cfg)) + +} + +func TestRejectUnsupportedPrometheusFeatures(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-prometheus-unsupported-features.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + err = component.ValidateConfig(cfg) + require.NotNil(t, err, "Expected a non-nil error") + + wantErrMsg := `unsupported features: + alert_config.alertmanagers + alert_config.relabel_configs + remote_read + remote_write + rule_files` + + gotErrMsg := strings.ReplaceAll(err.Error(), "\t", strings.Repeat(" ", 8)) + require.Equal(t, wantErrMsg, gotErrMsg) + +} + +func TestNonExistentAuthCredentialsFile(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-prometheus-non-existent-auth-credentials-file.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + err = component.ValidateConfig(cfg) + require.NotNil(t, err, "Expected a non-nil error") + + wantErrMsg := `error checking authorization credentials file "/nonexistentauthcredentialsfile"` + + gotErrMsg := err.Error() + require.True(t, strings.HasPrefix(gotErrMsg, wantErrMsg)) +} + +func TestTLSConfigNonExistentCertFile(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-prometheus-non-existent-cert-file.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + err = component.ValidateConfig(cfg) + require.NotNil(t, err, "Expected a non-nil error") + + wantErrMsg := `error checking client cert file "/nonexistentcertfile"` + + gotErrMsg := err.Error() + require.True(t, strings.HasPrefix(gotErrMsg, wantErrMsg)) +} + +func TestTLSConfigNonExistentKeyFile(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-prometheus-non-existent-key-file.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + err = component.ValidateConfig(cfg) + require.NotNil(t, err, "Expected a non-nil error") + + wantErrMsg := `error checking client key file "/nonexistentkeyfile"` + + gotErrMsg := err.Error() + require.True(t, strings.HasPrefix(gotErrMsg, wantErrMsg)) +} + +func TestTLSConfigCertFileWithoutKeyFile(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-prometheus-cert-file-without-key-file.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + err = component.UnmarshalConfig(sub, cfg) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "exactly one of key or key_file must be configured when a client certificate is configured") + } +} + +func TestTLSConfigKeyFileWithoutCertFile(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-prometheus-key-file-without-cert-file.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + err = component.UnmarshalConfig(sub, cfg) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "exactly one of cert or cert_file must be configured when a client key is configured") + } +} + +func TestKubernetesSDConfigWithoutKeyFile(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-prometheus-kubernetes-sd-config.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + err = component.UnmarshalConfig(sub, cfg) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "exactly one of key or key_file must be configured when a client certificate is configured") + } +} + +func TestFileSDConfigJsonNilTargetGroup(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-prometheus-file-sd-config-json.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + err = component.ValidateConfig(cfg) + require.NotNil(t, err, "Expected a non-nil error") + + wantErrMsg := `checking SD file "./testdata/sd-config-with-null-target-group.json": nil target group item found (index 1)` + + gotErrMsg := err.Error() + require.Equal(t, wantErrMsg, gotErrMsg) +} + +func TestFileSDConfigYamlNilTargetGroup(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-prometheus-file-sd-config-yaml.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + err = component.ValidateConfig(cfg) + require.NotNil(t, err, "Expected a non-nil error") + + wantErrMsg := `checking SD file "./testdata/sd-config-with-null-target-group.yaml": nil target group item found (index 1)` + + gotErrMsg := err.Error() + require.Equal(t, wantErrMsg, gotErrMsg) +} diff --git a/collector/receiver/prometheusreceiver/doc.go b/collector/receiver/prometheusreceiver/doc.go new file mode 100644 index 0000000..83dee2c --- /dev/null +++ b/collector/receiver/prometheusreceiver/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheusreceiver autodiscovers and scrapes Prometheus metrics handlers, often served at /metrics. +package prometheusreceiver // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver" diff --git a/collector/receiver/prometheusreceiver/factory.go b/collector/receiver/prometheusreceiver/factory.go new file mode 100644 index 0000000..759e9ae --- /dev/null +++ b/collector/receiver/prometheusreceiver/factory.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver" + +import ( + "context" + "errors" + + promconfig "github.com/prometheus/prometheus/config" + _ "github.com/prometheus/prometheus/discovery/install" // init() of this package registers service discovery impl. + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/receiver" +) + +// This file implements config for Prometheus receiver. + +const ( + typeStr = "prometheus" + stability = component.StabilityLevelBeta +) + +var useCreatedMetricGate = featuregate.GlobalRegistry().MustRegister( + "receiver.prometheusreceiver.UseCreatedMetric", + featuregate.StageAlpha, + featuregate.WithRegisterDescription("When enabled, the Prometheus receiver will"+ + " retrieve the start time for Summary, Histogram and Sum metrics from _created metric"), +) + +var errRenamingDisallowed = errors.New("metric renaming using metric_relabel_configs is disallowed") + +// NewFactory creates a new Prometheus receiver factory. +func NewFactory() receiver.Factory { + return receiver.NewFactory( + typeStr, + createDefaultConfig, + receiver.WithMetrics(createMetricsReceiver, stability)) +} + +func createDefaultConfig() component.Config { + return &Config{ + PrometheusConfig: &promconfig.Config{ + GlobalConfig: promconfig.DefaultGlobalConfig, + }, + } +} + +func createMetricsReceiver( + _ context.Context, + set receiver.CreateSettings, + cfg component.Config, + nextConsumer consumer.Metrics, +) (receiver.Metrics, error) { + return newPrometheusReceiver(set, cfg.(*Config), nextConsumer, featuregate.GlobalRegistry()), nil +} diff --git a/collector/receiver/prometheusreceiver/factory_test.go b/collector/receiver/prometheusreceiver/factory_test.go new file mode 100644 index 0000000..f140af6 --- /dev/null +++ b/collector/receiver/prometheusreceiver/factory_test.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "context" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/receiver/receivertest" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreateReceiver(t *testing.T) { + cfg := createDefaultConfig() + + // The default config does not provide scrape_config so we expect that metrics receiver + // creation must also fail. + creationSet := receivertest.NewNopCreateSettings() + mReceiver, _ := createMetricsReceiver(context.Background(), creationSet, cfg, nil) + assert.NotNil(t, mReceiver) + assert.NotNil(t, mReceiver.(*pReceiver).cfg.PrometheusConfig.GlobalConfig) +} + +func TestFactoryCanParseServiceDiscoveryConfigs(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config_sd.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String()) + require.NoError(t, err) + assert.NoError(t, component.UnmarshalConfig(sub, cfg)) +} diff --git a/collector/receiver/prometheusreceiver/internal/appendable.go b/collector/receiver/prometheusreceiver/internal/appendable.go new file mode 100644 index 0000000..612aaa6 --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/appendable.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" + +import ( + "context" + "regexp" + "time" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/receiver" +) + +// appendable translates Prometheus scraping diffs into OpenTelemetry format. +type appendable struct { + sink consumer.Metrics + metricAdjuster MetricsAdjuster + useStartTimeMetric bool + preserveUntyped bool + startTimeMetricRegex *regexp.Regexp + externalLabels labels.Labels + + settings receiver.CreateSettings + obsrecv *obsreport.Receiver + registry *featuregate.Registry +} + +// NewAppendable returns a storage.Appendable instance that emits metrics to the sink. +func NewAppendable( + sink consumer.Metrics, + set receiver.CreateSettings, + gcInterval time.Duration, + useStartTimeMetric bool, + preserveUntyped bool, + startTimeMetricRegex *regexp.Regexp, + useCreatedMetric bool, + externalLabels labels.Labels, + registry *featuregate.Registry) (storage.Appendable, error) { + var metricAdjuster MetricsAdjuster + if !useStartTimeMetric { + metricAdjuster = NewInitialPointAdjuster(set.Logger, gcInterval, useCreatedMetric) + } else { + metricAdjuster = NewStartTimeMetricAdjuster(set.Logger, startTimeMetricRegex) + } + + obsrecv, err := obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: set.ID, Transport: transport, ReceiverCreateSettings: set}) + if err != nil { + return nil, err + } + + return &appendable{ + sink: sink, + settings: set, + metricAdjuster: metricAdjuster, + useStartTimeMetric: useStartTimeMetric, + startTimeMetricRegex: startTimeMetricRegex, + externalLabels: externalLabels, + obsrecv: obsrecv, + registry: registry, + preserveUntyped: preserveUntyped, + }, nil +} + +func (o *appendable) Appender(ctx context.Context) storage.Appender { + return newTransaction(ctx, o.metricAdjuster, o.sink, o.externalLabels, o.settings, o.obsrecv, o.registry, o.preserveUntyped) +} diff --git a/collector/receiver/prometheusreceiver/internal/logger.go b/collector/receiver/prometheusreceiver/internal/logger.go new file mode 100644 index 0000000..590f806 --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/logger.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" + +import ( + gokitLog "github.com/go-kit/log" + "github.com/go-kit/log/level" + "go.uber.org/zap" +) + +const ( + levelKey = "level" + msgKey = "msg" + errKey = "err" +) + +// NewZapToGokitLogAdapter create an adapter for zap.Logger to gokitLog.Logger +func NewZapToGokitLogAdapter(logger *zap.Logger) gokitLog.Logger { + // need to skip two levels in order to get the correct caller + // one for this method, the other for gokitLog + logger = logger.WithOptions(zap.AddCallerSkip(2)) + return &zapToGokitLogAdapter{l: logger.Sugar()} +} + +type zapToGokitLogAdapter struct { + l *zap.SugaredLogger +} + +type logData struct { + level level.Value + msg string + otherFields []interface{} +} + +func (w *zapToGokitLogAdapter) Log(keyvals ...interface{}) error { + // expecting key value pairs, the number of items need to be even + if len(keyvals)%2 == 0 { + // Extract log level and message and log them using corresponding zap function + ld := extractLogData(keyvals) + logFunc := levelToFunc(w.l, ld.level) + logFunc(ld.msg, ld.otherFields...) + } else { + // in case something goes wrong + w.l.Info(keyvals...) + } + return nil +} + +func extractLogData(keyvals []interface{}) logData { + ld := logData{ + level: level.InfoValue(), // default + } + + for i := 0; i < len(keyvals); i += 2 { + key := keyvals[i] + val := keyvals[i+1] + + if l, ok := matchLogLevel(key, val); ok { + ld.level = l + continue + } + + if m, ok := matchLogMessage(key, val); ok { + ld.msg = m + continue + } + + if err, ok := matchError(key, val); ok { + ld.otherFields = append(ld.otherFields, zap.Error(err)) + continue + } + + ld.otherFields = append(ld.otherFields, key, val) + } + + return ld +} + +// check if a given key-value pair represents go-kit log message and return it +func matchLogMessage(key interface{}, val interface{}) (string, bool) { + if strKey, ok := key.(string); !ok || strKey != msgKey { + return "", false + } + + msg, ok := val.(string) + if !ok { + return "", false + } + return msg, true +} + +// check if a given key-value pair represents go-kit log level and return it +func matchLogLevel(key interface{}, val interface{}) (level.Value, bool) { + strKey, ok := key.(string) + if !ok || strKey != levelKey { + return nil, false + } + + levelVal, ok := val.(level.Value) + if !ok { + return nil, false + } + return levelVal, true +} + +//revive:disable:error-return + +// check if a given key-value pair represents an error and return it +func matchError(key interface{}, val interface{}) (error, bool) { + strKey, ok := key.(string) + if !ok || strKey != errKey { + return nil, false + } + + err, ok := val.(error) + if !ok { + return nil, false + } + return err, true +} + +//revive:enable:error-return + +// find a matching zap logging function to be used for a given level +func levelToFunc(logger *zap.SugaredLogger, lvl level.Value) func(string, ...interface{}) { + switch lvl { + case level.DebugValue(): + return logger.Debugw + case level.InfoValue(): + return logger.Infow + case level.WarnValue(): + return logger.Warnw + case level.ErrorValue(): + return logger.Errorw + } + + // default + return logger.Infow +} + +var _ gokitLog.Logger = (*zapToGokitLogAdapter)(nil) diff --git a/collector/receiver/prometheusreceiver/internal/logger_test.go b/collector/receiver/prometheusreceiver/internal/logger_test.go new file mode 100644 index 0000000..9913080 --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/logger_test.go @@ -0,0 +1,301 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + "net/http" + "testing" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" +) + +func TestLog(t *testing.T) { + tcs := []struct { + name string + input []interface{} + wantLevel zapcore.Level + wantMessage string + }{ + { + name: "Starting provider", + input: []interface{}{ + "level", + level.DebugValue(), + "msg", + "Starting provider", + "provider", + "string/0", + "subs", + "[target1]", + }, + wantLevel: zapcore.DebugLevel, + wantMessage: "Starting provider", + }, + { + name: "Scrape failed", + input: []interface{}{ + "level", + level.ErrorValue(), + "scrape_pool", + "target1", + "msg", + "Scrape failed", + "err", + "server returned HTTP status 500 Internal Server Error", + }, + wantLevel: zapcore.ErrorLevel, + wantMessage: "Scrape failed", + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + conf := zap.NewProductionConfig() + conf.Level.SetLevel(zapcore.DebugLevel) + + // capture zap log entry + var entry zapcore.Entry + h := func(e zapcore.Entry) error { + entry = e + return nil + } + + logger, err := conf.Build(zap.Hooks(h)) + require.NoError(t, err) + + adapter := NewZapToGokitLogAdapter(logger) + err = adapter.Log(tc.input...) + require.NoError(t, err) + + assert.Equal(t, tc.wantLevel, entry.Level) + assert.Equal(t, tc.wantMessage, entry.Message) + }) + } +} + +func TestExtractLogData(t *testing.T) { + tcs := []struct { + name string + input []interface{} + wantLevel level.Value + wantMessage string + wantOutput []interface{} + }{ + { + name: "nil fields", + input: nil, + wantLevel: level.InfoValue(), // Default + wantMessage: "", + wantOutput: nil, + }, + { + name: "empty fields", + input: []interface{}{}, + wantLevel: level.InfoValue(), // Default + wantMessage: "", + wantOutput: nil, + }, + { + name: "info level", + input: []interface{}{ + "level", + level.InfoValue(), + }, + wantLevel: level.InfoValue(), + wantMessage: "", + wantOutput: nil, + }, + { + name: "warn level", + input: []interface{}{ + "level", + level.WarnValue(), + }, + wantLevel: level.WarnValue(), + wantMessage: "", + wantOutput: nil, + }, + { + name: "error level", + input: []interface{}{ + "level", + level.ErrorValue(), + }, + wantLevel: level.ErrorValue(), + wantMessage: "", + wantOutput: nil, + }, + { + name: "debug level + extra fields", + input: []interface{}{ + "timestamp", + 1596604719, + "level", + level.DebugValue(), + "msg", + "http client error", + }, + wantLevel: level.DebugValue(), + wantMessage: "http client error", + wantOutput: []interface{}{ + "timestamp", 1596604719, + }, + }, + { + name: "missing level field", + input: []interface{}{ + "timestamp", + 1596604719, + "msg", + "http client error", + }, + wantLevel: level.InfoValue(), // Default + wantMessage: "http client error", + wantOutput: []interface{}{ + "timestamp", 1596604719, + }, + }, + { + name: "invalid level type", + input: []interface{}{ + "level", + "warn", // String is not recognized + }, + wantLevel: level.InfoValue(), // Default + wantOutput: []interface{}{ + "level", "warn", // Field is preserved + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + ld := extractLogData(tc.input) + assert.Equal(t, tc.wantLevel, ld.level) + assert.Equal(t, tc.wantMessage, ld.msg) + assert.Equal(t, tc.wantOutput, ld.otherFields) + }) + } +} + +func TestE2E(t *testing.T) { + logger, observed := observer.New(zap.DebugLevel) + gLogger := NewZapToGokitLogAdapter(zap.New(logger)) + + const targetStr = "https://host.docker.internal:5000/prometheus" + + tcs := []struct { + name string + log func() error + wantLevel zapcore.Level + wantMessage string + wantOutput []zapcore.Field + }{ + { + name: "debug level", + log: func() error { + return level.Debug(gLogger).Log() + }, + wantLevel: zapcore.DebugLevel, + wantMessage: "", + wantOutput: []zapcore.Field{}, + }, + { + name: "info level", + log: func() error { + return level.Info(gLogger).Log() + }, + wantLevel: zapcore.InfoLevel, + wantMessage: "", + wantOutput: []zapcore.Field{}, + }, + { + name: "warn level", + log: func() error { + return level.Warn(gLogger).Log() + }, + wantLevel: zapcore.WarnLevel, + wantMessage: "", + wantOutput: []zapcore.Field{}, + }, + { + name: "error level", + log: func() error { + return level.Error(gLogger).Log() + }, + wantLevel: zapcore.ErrorLevel, + wantMessage: "", + wantOutput: []zapcore.Field{}, + }, + { + name: "logger with and msg", + log: func() error { + ngLogger := log.With(gLogger, "scrape_pool", "scrape_pool") + ngLogger = log.With(ngLogger, "target", targetStr) + return level.Debug(ngLogger).Log("msg", "http client error", "err", fmt.Errorf("%s %q: dial tcp 192.168.65.2:5000: connect: connection refused", http.MethodGet, targetStr)) + }, + wantLevel: zapcore.DebugLevel, + wantMessage: "http client error", + wantOutput: []zapcore.Field{ + zap.String("scrape_pool", "scrape_pool"), + zap.String("target", "https://host.docker.internal:5000/prometheus"), + zap.Error(fmt.Errorf("%s %q: dial tcp 192.168.65.2:5000: connect: connection refused", http.MethodGet, targetStr)), + }, + }, + { + name: "missing level", + log: func() error { + ngLogger := log.With(gLogger, "target", "foo") + return ngLogger.Log("msg", "http client error") + }, + wantLevel: zapcore.InfoLevel, // Default + wantMessage: "http client error", + wantOutput: []zapcore.Field{ + zap.String("target", "foo"), + }, + }, + { + name: "invalid level type", + log: func() error { + ngLogger := log.With(gLogger, "target", "foo") + return ngLogger.Log("msg", "http client error", "level", "warn") + }, + wantLevel: zapcore.InfoLevel, // Default + wantMessage: "http client error", + wantOutput: []zapcore.Field{ + zap.String("target", "foo"), + zap.String("level", "warn"), // Field is preserved + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + assert.NoError(t, tc.log()) + entries := observed.TakeAll() + require.Len(t, entries, 1) + assert.Equal(t, tc.wantLevel, entries[0].Level) + assert.Equal(t, tc.wantMessage, entries[0].Message) + assert.Equal(t, tc.wantOutput, entries[0].Context) + }) + } +} diff --git a/collector/receiver/prometheusreceiver/internal/metadata.go b/collector/receiver/prometheusreceiver/internal/metadata.go new file mode 100644 index 0000000..f1cb222 --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/metadata.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" + +import ( + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/scrape" +) + +type dataPoint struct { + value float64 + boundary float64 +} + +// internalMetricMetadata allows looking up metadata for internal scrape metrics +var internalMetricMetadata = map[string]*scrape.MetricMetadata{ + scrapeUpMetricName: { + Metric: scrapeUpMetricName, + Type: textparse.MetricTypeGauge, + Help: "The scraping was successful", + }, + "scrape_duration_seconds": { + Metric: "scrape_duration_seconds", + Unit: "seconds", + Type: textparse.MetricTypeGauge, + Help: "Duration of the scrape", + }, + "scrape_samples_scraped": { + Metric: "scrape_samples_scraped", + Type: textparse.MetricTypeGauge, + Help: "The number of samples the target exposed", + }, + "scrape_series_added": { + Metric: "scrape_series_added", + Type: textparse.MetricTypeGauge, + Help: "The approximate number of new series in this scrape", + }, + "scrape_samples_post_metric_relabeling": { + Metric: "scrape_samples_post_metric_relabeling", + Type: textparse.MetricTypeGauge, + Help: "The number of samples remaining after metric relabeling was applied", + }, +} + +func metadataForMetric(metricName string, mc scrape.MetricMetadataStore) (*scrape.MetricMetadata, string) { + if metadata, ok := internalMetricMetadata[metricName]; ok { + return metadata, metricName + } + if metadata, ok := mc.GetMetadata(metricName); ok { + return &metadata, metricName + } + // If we didn't find metadata with the original name, + // try with suffixes trimmed, in-case it is a "merged" metric type. + normalizedName := normalizeMetricName(metricName) + if metadata, ok := mc.GetMetadata(normalizedName); ok { + if metadata.Type == textparse.MetricTypeCounter { + return &metadata, metricName + } + return &metadata, normalizedName + } + // Otherwise, the metric is unknown + return &scrape.MetricMetadata{ + Metric: metricName, + Type: textparse.MetricTypeUnknown, + }, metricName +} diff --git a/collector/receiver/prometheusreceiver/internal/metricfamily.go b/collector/receiver/prometheusreceiver/internal/metricfamily.go new file mode 100644 index 0000000..73815f7 --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/metricfamily.go @@ -0,0 +1,435 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" + +import ( + "encoding/hex" + "fmt" + "math" + "sort" + "strings" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/scrape" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" +) + +const ( + traceIDKey = "trace_id" + spanIDKey = "span_id" + + GCPOpsAgentUntypedMetricKey = "prometheus.googleapis.com/internal/untyped_metric" +) + +type metricFamily struct { + mtype pmetric.MetricType + preserveUntyped bool + pType textparse.MetricType + // isMonotonic only applies to sums + isMonotonic bool + groups map[uint64]*metricGroup + name string + metadata *scrape.MetricMetadata + groupOrders []*metricGroup +} + +// metricGroup, represents a single metric of a metric family. for example a histogram metric is usually represent by +// a couple data complexValue (buckets and count/sum), a group of a metric family always share a same set of tags. for +// simple types like counter and gauge, each data point is a group of itself +type metricGroup struct { + mtype pmetric.MetricType + preserveUntyped bool + pType textparse.MetricType + ts int64 + ls labels.Labels + count float64 + hasCount bool + sum float64 + hasSum bool + created float64 + value float64 + complexValue []*dataPoint + exemplars pmetric.ExemplarSlice +} + +func newMetricFamily(metricName string, mc scrape.MetricMetadataStore, logger *zap.Logger, preserveUntyped bool) *metricFamily { + metadata, familyName := metadataForMetric(metricName, mc) + mtype, isMonotonic := convToMetricType(metadata.Type) + if mtype == pmetric.MetricTypeEmpty { + logger.Debug(fmt.Sprintf("Unknown-typed metric : %s %+v", metricName, metadata)) + } + + return &metricFamily{ + mtype: mtype, + pType: metadata.Type, + isMonotonic: isMonotonic, + groups: make(map[uint64]*metricGroup), + name: familyName, + metadata: metadata, + preserveUntyped: preserveUntyped, + } +} + +// includesMetric returns true if the metric is part of the family +func (mf *metricFamily) includesMetric(metricName string) bool { + if mf.mtype != pmetric.MetricTypeGauge { + // If it is a merged family type, then it should match the + // family name when suffixes are trimmed. + return normalizeMetricName(metricName) == mf.name + } + // If it isn't a merged type, the metricName and family name should match + return metricName == mf.name +} + +func (mg *metricGroup) sortPoints() { + sort.Slice(mg.complexValue, func(i, j int) bool { + return mg.complexValue[i].boundary < mg.complexValue[j].boundary + }) +} + +func (mg *metricGroup) toDistributionPoint(dest pmetric.HistogramDataPointSlice) { + if !mg.hasCount || len(mg.complexValue) == 0 { + return + } + + mg.sortPoints() + + // for OTLP the bounds won't include +inf + bounds := make([]float64, len(mg.complexValue)-1) + bucketCounts := make([]uint64, len(mg.complexValue)) + + pointIsStale := value.IsStaleNaN(mg.sum) || value.IsStaleNaN(mg.count) + + for i := 0; i < len(mg.complexValue); i++ { + if i != len(mg.complexValue)-1 { + // not need to add +inf as OTLP assumes it + bounds[i] = mg.complexValue[i].boundary + } else if mg.complexValue[i].boundary != math.Inf(1) { + // This histogram is missing the +Inf bucket, and isn't a complete prometheus histogram. + return + } + adjustedCount := mg.complexValue[i].value + // Buckets still need to be sent to know to set them as stale, + // but a staleness NaN converted to uint64 would be an extremely large number. + // Setting to 0 instead. + if pointIsStale { + adjustedCount = 0 + } else if i != 0 { + adjustedCount -= mg.complexValue[i-1].value + } + bucketCounts[i] = uint64(adjustedCount) + } + + point := dest.AppendEmpty() + + if pointIsStale { + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + } else { + point.SetCount(uint64(mg.count)) + if mg.hasSum { + point.SetSum(mg.sum) + } + } + + point.ExplicitBounds().FromRaw(bounds) + point.BucketCounts().FromRaw(bucketCounts) + + // The timestamp MUST be in retrieved from milliseconds and converted to nanoseconds. + tsNanos := timestampFromMs(mg.ts) + if mg.created != 0 { + point.SetStartTimestamp(timestampFromFloat64(mg.created)) + } else { + // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp + point.SetStartTimestamp(tsNanos) + } + point.SetTimestamp(tsNanos) + populateAttributes(pmetric.MetricTypeHistogram, mg.pType, mg.ls, point.Attributes(), mg.preserveUntyped) + mg.setExemplars(point.Exemplars()) +} + +func (mg *metricGroup) setExemplars(exemplars pmetric.ExemplarSlice) { + if mg == nil { + return + } + if mg.exemplars.Len() > 0 { + mg.exemplars.MoveAndAppendTo(exemplars) + } +} + +func (mg *metricGroup) toSummaryPoint(dest pmetric.SummaryDataPointSlice) { + // expecting count to be provided, however, in the following two cases, they can be missed. + // 1. data is corrupted + // 2. ignored by startValue evaluation + if !mg.hasCount { + return + } + + mg.sortPoints() + + point := dest.AppendEmpty() + pointIsStale := value.IsStaleNaN(mg.sum) || value.IsStaleNaN(mg.count) + if pointIsStale { + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + } else { + if mg.hasSum { + point.SetSum(mg.sum) + } + point.SetCount(uint64(mg.count)) + } + + quantileValues := point.QuantileValues() + for _, p := range mg.complexValue { + quantile := quantileValues.AppendEmpty() + // Quantiles still need to be sent to know to set them as stale, + // but a staleness NaN converted to uint64 would be an extremely large number. + // By not setting the quantile value, it will default to 0. + if !pointIsStale { + quantile.SetValue(p.value) + } + quantile.SetQuantile(p.boundary) + } + + // Based on the summary description from https://prometheus.io/docs/concepts/metric_types/#summary + // the quantiles are calculated over a sliding time window, however, the count is the total count of + // observations and the corresponding sum is a sum of all observed values, thus the sum and count used + // at the global level of the metricspb.SummaryValue + // The timestamp MUST be in retrieved from milliseconds and converted to nanoseconds. + tsNanos := timestampFromMs(mg.ts) + point.SetTimestamp(tsNanos) + if mg.created != 0 { + point.SetStartTimestamp(timestampFromFloat64(mg.created)) + } else { + // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp + point.SetStartTimestamp(tsNanos) + } + populateAttributes(pmetric.MetricTypeSummary, mg.pType, mg.ls, point.Attributes(), mg.preserveUntyped) +} + +func (mg *metricGroup) toNumberDataPoint(dest pmetric.NumberDataPointSlice) { + tsNanos := timestampFromMs(mg.ts) + point := dest.AppendEmpty() + // gauge/undefined types have no start time. + if mg.mtype == pmetric.MetricTypeSum { + if mg.created != 0 { + point.SetStartTimestamp(timestampFromFloat64(mg.created)) + } else { + // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp + point.SetStartTimestamp(tsNanos) + } + } + point.SetTimestamp(tsNanos) + if value.IsStaleNaN(mg.value) { + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + } else { + point.SetDoubleValue(mg.value) + } + populateAttributes(pmetric.MetricTypeGauge, mg.pType, mg.ls, point.Attributes(), mg.preserveUntyped) + mg.setExemplars(point.Exemplars()) +} + +func populateAttributes(mType pmetric.MetricType, pType textparse.MetricType, ls labels.Labels, dest pcommon.Map, preserveUntyped bool) { + dest.EnsureCapacity(ls.Len()) + names := getSortedNotUsefulLabels(mType) + j := 0 + for i := range ls { + for j < len(names) && names[j] < ls[i].Name { + j++ + } + if j < len(names) && ls[i].Name == names[j] { + continue + } + if ls[i].Value == "" { + // empty label values should be omitted + continue + } + dest.PutStr(ls[i].Name, ls[i].Value) + } + + // Preserve the untypedness of the metric as a metric attribute. + if preserveUntyped && (pType == textparse.MetricTypeUnknown) { + dest.PutBool(GCPOpsAgentUntypedMetricKey, true) + } +} + +func (mf *metricFamily) loadMetricGroupOrCreate(groupKey uint64, ls labels.Labels, ts int64) *metricGroup { + mg, ok := mf.groups[groupKey] + if !ok { + mg = &metricGroup{ + mtype: mf.mtype, + pType: mf.pType, + ts: ts, + ls: ls, + exemplars: pmetric.NewExemplarSlice(), + preserveUntyped: mf.preserveUntyped, + } + mf.groups[groupKey] = mg + // maintaining data insertion order is helpful to generate stable/reproducible metric output + mf.groupOrders = append(mf.groupOrders, mg) + } + return mg +} + +func (mf *metricFamily) addSeries(seriesRef uint64, metricName string, ls labels.Labels, t int64, v float64) error { + mg := mf.loadMetricGroupOrCreate(seriesRef, ls, t) + if mg.ts != t { + return fmt.Errorf("inconsistent timestamps on metric points for metric %v", metricName) + } + switch mf.mtype { + case pmetric.MetricTypeHistogram, pmetric.MetricTypeSummary: + switch { + case strings.HasSuffix(metricName, metricsSuffixSum): + mg.sum = v + mg.hasSum = true + case strings.HasSuffix(metricName, metricsSuffixCount): + // always use the timestamp from count, because is the only required field for histograms and summaries. + mg.ts = t + mg.count = v + mg.hasCount = true + case strings.HasSuffix(metricName, metricSuffixCreated): + mg.created = v + default: + boundary, err := getBoundary(mf.mtype, ls) + if err != nil { + return err + } + mg.complexValue = append(mg.complexValue, &dataPoint{value: v, boundary: boundary}) + } + case pmetric.MetricTypeSum: + if strings.HasSuffix(metricName, metricSuffixCreated) { + mg.created = v + } else { + mg.value = v + } + default: + mg.value = v + } + + return nil +} + +func (mf *metricFamily) appendMetric(metrics pmetric.MetricSlice, normalizer *prometheus.Normalizer) { + metric := pmetric.NewMetric() + // Trims type's and unit's suffixes from metric name + metric.SetName(normalizer.TrimPromSuffixes(mf.name, mf.mtype, mf.metadata.Unit)) + metric.SetDescription(mf.metadata.Help) + metric.SetUnit(mf.metadata.Unit) + + pointCount := 0 + + switch mf.mtype { + case pmetric.MetricTypeHistogram: + histogram := metric.SetEmptyHistogram() + histogram.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + hdpL := histogram.DataPoints() + for _, mg := range mf.groupOrders { + mg.toDistributionPoint(hdpL) + } + pointCount = hdpL.Len() + + case pmetric.MetricTypeSummary: + summary := metric.SetEmptySummary() + sdpL := summary.DataPoints() + for _, mg := range mf.groupOrders { + mg.toSummaryPoint(sdpL) + } + pointCount = sdpL.Len() + + case pmetric.MetricTypeSum: + sum := metric.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum.SetIsMonotonic(mf.isMonotonic) + sdpL := sum.DataPoints() + for _, mg := range mf.groupOrders { + mg.toNumberDataPoint(sdpL) + } + pointCount = sdpL.Len() + + default: // Everything else should be set to a Gauge. + gauge := metric.SetEmptyGauge() + gdpL := gauge.DataPoints() + for _, mg := range mf.groupOrders { + mg.toNumberDataPoint(gdpL) + } + pointCount = gdpL.Len() + } + + if pointCount == 0 { + return + } + + metric.MoveTo(metrics.AppendEmpty()) +} + +func (mf *metricFamily) addExemplar(seriesRef uint64, e exemplar.Exemplar) { + mg := mf.groups[seriesRef] + if mg == nil { + return + } + es := mg.exemplars + convertExemplar(e, es.AppendEmpty()) +} + +func convertExemplar(pe exemplar.Exemplar, e pmetric.Exemplar) { + e.SetTimestamp(timestampFromMs(pe.Ts)) + e.SetDoubleValue(pe.Value) + e.FilteredAttributes().EnsureCapacity(len(pe.Labels)) + for _, lb := range pe.Labels { + switch strings.ToLower(lb.Name) { + case traceIDKey: + var tid [16]byte + err := decodeAndCopyToLowerBytes(tid[:], []byte(lb.Value)) + if err == nil { + e.SetTraceID(tid) + } else { + e.FilteredAttributes().PutStr(lb.Name, lb.Value) + } + case spanIDKey: + var sid [8]byte + err := decodeAndCopyToLowerBytes(sid[:], []byte(lb.Value)) + if err == nil { + e.SetSpanID(sid) + } else { + e.FilteredAttributes().PutStr(lb.Name, lb.Value) + } + default: + e.FilteredAttributes().PutStr(lb.Name, lb.Value) + } + } +} + +/* + decodeAndCopyToLowerBytes copies src to dst on lower bytes instead of higher + +1. If len(src) > len(dst) -> copy first len(dst) bytes as it is. Example -> src = []byte{0xab,0xcd,0xef,0xgh,0xij}, dst = [2]byte, result dst = [2]byte{0xab, 0xcd} +2. If len(src) = len(dst) -> copy src to dst as it is +3. If len(src) < len(dst) -> prepend required 0s and then add src to dst. Example -> src = []byte{0xab, 0xcd}, dst = [8]byte, result dst = [8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xcd} +*/ +func decodeAndCopyToLowerBytes(dst []byte, src []byte) error { + var err error + decodedLen := hex.DecodedLen(len(src)) + if decodedLen >= len(dst) { + _, err = hex.Decode(dst, src[:hex.EncodedLen(len(dst))]) + } else { + _, err = hex.Decode(dst[len(dst)-decodedLen:], src) + } + return err +} diff --git a/collector/receiver/prometheusreceiver/internal/metricfamily_test.go b/collector/receiver/prometheusreceiver/internal/metricfamily_test.go new file mode 100644 index 0000000..311f46e --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/metricfamily_test.go @@ -0,0 +1,674 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "math" + "testing" + "time" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/scrape" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" +) + +type testMetadataStore map[string]scrape.MetricMetadata + +func (tmc testMetadataStore) GetMetadata(familyName string) (scrape.MetricMetadata, bool) { + lookup, ok := tmc[familyName] + return lookup, ok +} + +func (tmc testMetadataStore) ListMetadata() []scrape.MetricMetadata { return nil } + +func (tmc testMetadataStore) SizeMetadata() int { return 0 } + +func (tmc testMetadataStore) LengthMetadata() int { + return len(tmc) +} + +var mc = testMetadataStore{ + "counter": scrape.MetricMetadata{ + Metric: "cr", + Type: textparse.MetricTypeCounter, + Help: "This is some help for a counter", + Unit: "By", + }, + "gauge": scrape.MetricMetadata{ + Metric: "ge", + Type: textparse.MetricTypeGauge, + Help: "This is some help for a gauge", + Unit: "1", + }, + "gaugehistogram": scrape.MetricMetadata{ + Metric: "gh", + Type: textparse.MetricTypeGaugeHistogram, + Help: "This is some help for a gauge histogram", + Unit: "?", + }, + "histogram": scrape.MetricMetadata{ + Metric: "hg", + Type: textparse.MetricTypeHistogram, + Help: "This is some help for a histogram", + Unit: "ms", + }, + "histogram_with_created": scrape.MetricMetadata{ + Metric: "hg", + Type: textparse.MetricTypeHistogram, + Help: "This is some help for a histogram", + Unit: "ms", + }, + "histogram_stale": scrape.MetricMetadata{ + Metric: "hg_stale", + Type: textparse.MetricTypeHistogram, + Help: "This is some help for a histogram", + Unit: "ms", + }, + "summary": scrape.MetricMetadata{ + Metric: "s", + Type: textparse.MetricTypeSummary, + Help: "This is some help for a summary", + Unit: "ms", + }, + "summary_with_created": scrape.MetricMetadata{ + Metric: "s", + Type: textparse.MetricTypeSummary, + Help: "This is some help for a summary", + Unit: "ms", + }, + "summary_stale": scrape.MetricMetadata{ + Metric: "s_stale", + Type: textparse.MetricTypeSummary, + Help: "This is some help for a summary", + Unit: "ms", + }, + "unknown": scrape.MetricMetadata{ + Metric: "u", + Type: textparse.MetricTypeUnknown, + Help: "This is some help for an unknown metric", + Unit: "?", + }, +} + +func TestMetricGroupData_toDistributionUnitTest(t *testing.T) { + type scrape struct { + at int64 + value float64 + metric string + extraLabel labels.Label + } + tests := []struct { + name string + metricName string + labels labels.Labels + scrapes []*scrape + want func() pmetric.HistogramDataPoint + wantErr bool + intervalStartTimeMs int64 + }{ + { + name: "histogram with startTimestamp", + metricName: "histogram", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 11, value: 66, metric: "histogram_count"}, + {at: 11, value: 1004.78, metric: "histogram_sum"}, + {at: 11, value: 33, metric: "histogram_bucket", extraLabel: labels.Label{Name: "le", Value: "0.75"}}, + {at: 11, value: 55, metric: "histogram_bucket", extraLabel: labels.Label{Name: "le", Value: "2.75"}}, + {at: 11, value: 66, metric: "histogram_bucket", extraLabel: labels.Label{Name: "le", Value: "+Inf"}}, + }, + want: func() pmetric.HistogramDataPoint { + point := pmetric.NewHistogramDataPoint() + point.SetCount(66) + point.SetSum(1004.78) + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.ExplicitBounds().FromRaw([]float64{0.75, 2.75}) + point.BucketCounts().FromRaw([]uint64{33, 22, 11}) + point.SetStartTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + name: "histogram with startTimestamp from _created", + metricName: "histogram_with_created", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A"}), + scrapes: []*scrape{ + {at: 11, value: 66, metric: "histogram_with_created_count"}, + {at: 11, value: 1004.78, metric: "histogram_with_created_sum"}, + {at: 11, value: 600.78, metric: "histogram_with_created_created"}, + { + at: 11, + value: 33, + metric: "histogram_with_created_bucket", + extraLabel: labels.Label{Name: "le", Value: "0.75"}, + }, + { + at: 11, + value: 55, + metric: "histogram_with_created_bucket", + extraLabel: labels.Label{Name: "le", Value: "2.75"}, + }, + { + at: 11, + value: 66, + metric: "histogram_with_created_bucket", + extraLabel: labels.Label{Name: "le", Value: "+Inf"}}, + }, + want: func() pmetric.HistogramDataPoint { + point := pmetric.NewHistogramDataPoint() + point.SetCount(66) + point.SetSum(1004.78) + + // the time in milliseconds -> nanoseconds. + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) + point.SetStartTimestamp(timestampFromFloat64(600.78)) + + point.ExplicitBounds().FromRaw([]float64{0.75, 2.75}) + point.BucketCounts().FromRaw([]uint64{33, 22, 11}) + attributes := point.Attributes() + attributes.PutStr("a", "A") + return point + }, + }, + { + name: "histogram that is stale", + metricName: "histogram_stale", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 11, value: math.Float64frombits(value.StaleNaN), metric: "histogram_stale_count"}, + {at: 11, value: math.Float64frombits(value.StaleNaN), metric: "histogram_stale_sum"}, + {at: 11, value: math.Float64frombits(value.StaleNaN), metric: "histogram_bucket", extraLabel: labels.Label{Name: "le", Value: "0.75"}}, + {at: 11, value: math.Float64frombits(value.StaleNaN), metric: "histogram_bucket", extraLabel: labels.Label{Name: "le", Value: "2.75"}}, + {at: 11, value: math.Float64frombits(value.StaleNaN), metric: "histogram_bucket", extraLabel: labels.Label{Name: "le", Value: "+Inf"}}, + }, + want: func() pmetric.HistogramDataPoint { + point := pmetric.NewHistogramDataPoint() + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + point.ExplicitBounds().FromRaw([]float64{0.75, 2.75}) + point.BucketCounts().FromRaw([]uint64{0, 0, 0}) + point.SetStartTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + name: "histogram with inconsistent timestamps", + metricName: "histogram_inconsistent_ts", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "le": "0.75", "b": "B"}), + scrapes: []*scrape{ + {at: 11, value: math.Float64frombits(value.StaleNaN), metric: "histogram_stale_count"}, + {at: 12, value: math.Float64frombits(value.StaleNaN), metric: "histogram_stale_sum"}, + {at: 13, value: math.Float64frombits(value.StaleNaN), metric: "value"}, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + mp := newMetricFamily(tt.metricName, mc, zap.NewNop(), true) + for i, tv := range tt.scrapes { + var lbls labels.Labels + if tv.extraLabel.Name != "" { + lbls = labels.NewBuilder(tt.labels).Set(tv.extraLabel.Name, tv.extraLabel.Value).Labels(nil) + } else { + lbls = tt.labels.Copy() + } + sRef, _ := getSeriesRef(nil, lbls, mp.mtype) + err := mp.addSeries(sRef, tv.metric, lbls, tv.at, tv.value) + if tt.wantErr { + if i != 0 { + require.Error(t, err) + } + } else { + require.NoError(t, err) + } + } + if tt.wantErr { + // Don't check the result if we got an error + return + } + + require.Len(t, mp.groups, 1) + + sl := pmetric.NewMetricSlice() + mp.appendMetric(sl, prometheus.NewNormalizer(featuregate.GlobalRegistry())) + + require.Equal(t, 1, sl.Len(), "Exactly one metric expected") + metric := sl.At(0) + require.Equal(t, mc[tt.metricName].Help, metric.Description(), "Expected help metadata in metric description") + require.Equal(t, mc[tt.metricName].Unit, metric.Unit(), "Expected unit metadata in metric") + + hdpL := metric.Histogram().DataPoints() + require.Equal(t, 1, hdpL.Len(), "Exactly one point expected") + got := hdpL.At(0) + want := tt.want() + require.Equal(t, want, got, "Expected the points to be equal") + }) + } +} + +func TestMetricGroupData_toSummaryUnitTest(t *testing.T) { + type scrape struct { + at int64 + value float64 + metric string + } + + type labelsScrapes struct { + labels labels.Labels + scrapes []*scrape + } + tests := []struct { + name string + labelsScrapes []*labelsScrapes + want func() pmetric.SummaryDataPoint + wantErr bool + }{ + { + name: "summary", + labelsScrapes: []*labelsScrapes{ + { + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 10, metric: "summary_count"}, + {at: 14, value: 15, metric: "summary_sum"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.0", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 8, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.75", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 33.7, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.50", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 27, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.90", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 56, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.99", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 82, metric: "value"}, + }, + }, + }, + want: func() pmetric.SummaryDataPoint { + point := pmetric.NewSummaryDataPoint() + point.SetCount(10) + point.SetSum(15) + qtL := point.QuantileValues() + qn0 := qtL.AppendEmpty() + qn0.SetQuantile(0) + qn0.SetValue(8) + qn50 := qtL.AppendEmpty() + qn50.SetQuantile(.5) + qn50.SetValue(27) + qn75 := qtL.AppendEmpty() + qn75.SetQuantile(.75) + qn75.SetValue(33.7) + qn90 := qtL.AppendEmpty() + qn90.SetQuantile(.9) + qn90.SetValue(56) + qn99 := qtL.AppendEmpty() + qn99.SetQuantile(.99) + qn99.SetValue(82) + point.SetTimestamp(pcommon.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + name: "summary_with_created", + labelsScrapes: []*labelsScrapes{ + { + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 10, metric: "summary_with_created_count"}, + {at: 14, value: 15, metric: "summary_with_created_sum"}, + {at: 14, value: 150, metric: "summary_with_created_created"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.0", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 8, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.75", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 33.7, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.50", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 27, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.90", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 56, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.99", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 82, metric: "value"}, + }, + }, + }, + want: func() pmetric.SummaryDataPoint { + point := pmetric.NewSummaryDataPoint() + point.SetCount(10) + point.SetSum(15) + qtL := point.QuantileValues() + qn0 := qtL.AppendEmpty() + qn0.SetQuantile(0) + qn0.SetValue(8) + qn50 := qtL.AppendEmpty() + qn50.SetQuantile(.5) + qn50.SetValue(27) + qn75 := qtL.AppendEmpty() + qn75.SetQuantile(.75) + qn75.SetValue(33.7) + qn90 := qtL.AppendEmpty() + qn90.SetQuantile(.9) + qn90.SetValue(56) + qn99 := qtL.AppendEmpty() + qn99.SetQuantile(.99) + qn99.SetValue(82) + + // the time in milliseconds -> nanoseconds. + point.SetTimestamp(pcommon.Timestamp(14 * time.Millisecond)) + point.SetStartTimestamp(timestampFromFloat64(150)) + + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + name: "summary_stale", + labelsScrapes: []*labelsScrapes{ + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.0", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 10, metric: "summary_stale_count"}, + {at: 14, value: 12, metric: "summary_stale_sum"}, + {at: 14, value: 8, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.75", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 10, metric: "summary_stale_count"}, + {at: 14, value: 1004.78, metric: "summary_stale_sum"}, + {at: 14, value: 33.7, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.50", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 10, metric: "summary_stale_count"}, + {at: 14, value: 13, metric: "summary_stale_sum"}, + {at: 14, value: 27, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.90", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 10, metric: "summary_stale_count"}, + {at: 14, value: 14, metric: "summary_stale_sum"}, + {at: 14, value: 56, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.99", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: math.Float64frombits(value.StaleNaN), metric: "summary_stale_count"}, + {at: 14, value: math.Float64frombits(value.StaleNaN), metric: "summary_stale_sum"}, + {at: 14, value: math.Float64frombits(value.StaleNaN), metric: "value"}, + }, + }, + }, + want: func() pmetric.SummaryDataPoint { + point := pmetric.NewSummaryDataPoint() + qtL := point.QuantileValues() + qn0 := qtL.AppendEmpty() + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + qn0.SetQuantile(0) + qn0.SetValue(0) + qn50 := qtL.AppendEmpty() + qn50.SetQuantile(.5) + qn50.SetValue(0) + qn75 := qtL.AppendEmpty() + qn75.SetQuantile(.75) + qn75.SetValue(0) + qn90 := qtL.AppendEmpty() + qn90.SetQuantile(.9) + qn90.SetValue(0) + qn99 := qtL.AppendEmpty() + qn99.SetQuantile(.99) + qn99.SetValue(0) + point.SetTimestamp(pcommon.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + name: "summary with inconsistent timestamps", + labelsScrapes: []*labelsScrapes{ + { + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 11, value: 10, metric: "summary_count"}, + {at: 14, value: 15, metric: "summary_sum"}, + }, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + mp := newMetricFamily(tt.name, mc, zap.NewNop(), true) + for _, lbs := range tt.labelsScrapes { + for i, scrape := range lbs.scrapes { + lb := lbs.labels.Copy() + sRef, _ := getSeriesRef(nil, lb, mp.mtype) + err := mp.addSeries(sRef, scrape.metric, lb, scrape.at, scrape.value) + if tt.wantErr { + // The first scrape won't have an error + if i != 0 { + require.Error(t, err) + } + } else { + require.NoError(t, err) + } + } + } + if tt.wantErr { + // Don't check the result if we got an error + return + } + + require.Len(t, mp.groups, 1) + + sl := pmetric.NewMetricSlice() + mp.appendMetric(sl, prometheus.NewNormalizer(featuregate.GlobalRegistry())) + + require.Equal(t, 1, sl.Len(), "Exactly one metric expected") + metric := sl.At(0) + require.Equal(t, mc[tt.name].Help, metric.Description(), "Expected help metadata in metric description") + require.Equal(t, mc[tt.name].Unit, metric.Unit(), "Expected unit metadata in metric") + + sdpL := metric.Summary().DataPoints() + require.Equal(t, 1, sdpL.Len(), "Exactly one point expected") + got := sdpL.At(0) + want := tt.want() + require.Equal(t, want, got, "Expected the points to be equal") + }) + } +} + +func TestMetricGroupData_toNumberDataUnitTest(t *testing.T) { + type scrape struct { + at int64 + value float64 + metric string + } + tests := []struct { + name string + metricKind string + labels labels.Labels + scrapes []*scrape + intervalStartTimestampMs int64 + want func() pmetric.NumberDataPoint + }{ + { + metricKind: "counter", + name: "counter:: startTimestampMs from _created", + intervalStartTimestampMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 13, value: 33.7, metric: "value"}, + {at: 13, value: 150, metric: "value_created"}, + }, + want: func() pmetric.NumberDataPoint { + point := pmetric.NewNumberDataPoint() + point.SetDoubleValue(33.7) + + // the time in milliseconds -> nanoseconds. + point.SetTimestamp(pcommon.Timestamp(13 * time.Millisecond)) + point.SetStartTimestamp(timestampFromFloat64(150)) + + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + metricKind: "counter", + name: "counter:: startTimestampMs of 11", + intervalStartTimestampMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 13, value: 33.7, metric: "value"}, + }, + want: func() pmetric.NumberDataPoint { + point := pmetric.NewNumberDataPoint() + point.SetDoubleValue(33.7) + point.SetTimestamp(pcommon.Timestamp(13 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(13 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + name: "counter:: startTimestampMs of 0", + metricKind: "counter", + intervalStartTimestampMs: 0, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 28, value: 99.9, metric: "value"}, + }, + want: func() pmetric.NumberDataPoint { + point := pmetric.NewNumberDataPoint() + point.SetDoubleValue(99.9) + point.SetTimestamp(pcommon.Timestamp(28 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(28 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + mp := newMetricFamily(tt.metricKind, mc, zap.NewNop(), true) + for _, tv := range tt.scrapes { + lb := tt.labels.Copy() + sRef, _ := getSeriesRef(nil, lb, mp.mtype) + require.NoError(t, mp.addSeries(sRef, tv.metric, lb, tv.at, tv.value)) + } + + require.Len(t, mp.groups, 1) + + sl := pmetric.NewMetricSlice() + mp.appendMetric(sl, prometheus.NewNormalizer(featuregate.GlobalRegistry())) + + require.Equal(t, 1, sl.Len(), "Exactly one metric expected") + metric := sl.At(0) + require.Equal(t, mc[tt.metricKind].Help, metric.Description(), "Expected help metadata in metric description") + require.Equal(t, mc[tt.metricKind].Unit, metric.Unit(), "Expected unit metadata in metric") + + ndpL := metric.Sum().DataPoints() + require.Equal(t, 1, ndpL.Len(), "Exactly one point expected") + got := ndpL.At(0) + want := tt.want() + require.Equal(t, want, got, "Expected the points to be equal") + }) + } +} diff --git a/collector/receiver/prometheusreceiver/internal/metrics_adjuster.go b/collector/receiver/prometheusreceiver/internal/metrics_adjuster.go new file mode 100644 index 0000000..b016fac --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/metrics_adjuster.go @@ -0,0 +1,442 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" + +import ( + "errors" + "sync" + "time" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + semconv "go.opentelemetry.io/collector/semconv/v1.6.1" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" +) + +// Notes on garbage collection (gc): +// +// Job-level gc: +// The Prometheus receiver will likely execute in a long running service whose lifetime may exceed +// the lifetimes of many of the jobs that it is collecting from. In order to keep the JobsMap from +// leaking memory for entries of no-longer existing jobs, the JobsMap needs to remove entries that +// haven't been accessed for a long period of time. +// +// Timeseries-level gc: +// Some jobs that the Prometheus receiver is collecting from may export timeseries based on metrics +// from other jobs (e.g. cAdvisor). In order to keep the timeseriesMap from leaking memory for entries +// of no-longer existing jobs, the timeseriesMap for each job needs to remove entries that haven't +// been accessed for a long period of time. +// +// The gc strategy uses a standard mark-and-sweep approach - each time a timeseriesMap is accessed, +// it is marked. Similarly, each time a timeseriesInfo is accessed, it is also marked. +// +// At the end of each JobsMap.get(), if the last time the JobsMap was gc'd exceeds the 'gcInterval', +// the JobsMap is locked and any timeseriesMaps that are unmarked are removed from the JobsMap +// otherwise the timeseriesMap is gc'd +// +// The gc for the timeseriesMap is straightforward - the map is locked and, for each timeseriesInfo +// in the map, if it has not been marked, it is removed otherwise it is unmarked. +// +// Alternative Strategies +// 1. If the job-level gc doesn't run often enough, or runs too often, a separate go routine can +// be spawned at JobMap creation time that gc's at periodic intervals. This approach potentially +// adds more contention and latency to each scrape so the current approach is used. Note that +// the go routine will need to be cancelled upon Shutdown(). +// 2. If the gc of each timeseriesMap during the gc of the JobsMap causes too much contention, +// the gc of timeseriesMaps can be moved to the end of MetricsAdjuster().AdjustMetricSlice(). This +// approach requires adding 'lastGC' Time and (potentially) a gcInterval duration to +// timeseriesMap so the current approach is used instead. + +// timeseriesInfo contains the information necessary to adjust from the initial point and to detect resets. +type timeseriesInfo struct { + mark bool + + number numberInfo + histogram histogramInfo + summary summaryInfo +} + +type numberInfo struct { + startTime pcommon.Timestamp + previousValue float64 +} + +type histogramInfo struct { + startTime pcommon.Timestamp + previousCount uint64 + previousSum float64 +} + +type summaryInfo struct { + startTime pcommon.Timestamp + previousCount uint64 + previousSum float64 +} + +type timeseriesKey struct { + name string + attributes [16]byte + aggTemporality pmetric.AggregationTemporality +} + +// timeseriesMap maps from a timeseries instance (metric * label values) to the timeseries info for +// the instance. +type timeseriesMap struct { + sync.RWMutex + // The mutex is used to protect access to the member fields. It is acquired for the entirety of + // AdjustMetricSlice() and also acquired by gc(). + + mark bool + tsiMap map[timeseriesKey]*timeseriesInfo +} + +// Get the timeseriesInfo for the timeseries associated with the metric and label values. +func (tsm *timeseriesMap) get(metric pmetric.Metric, kv pcommon.Map) (*timeseriesInfo, bool) { + // This should only be invoked be functions called (directly or indirectly) by AdjustMetricSlice(). + // The lock protecting tsm.tsiMap is acquired there. + name := metric.Name() + key := timeseriesKey{ + name: name, + attributes: getAttributesSignature(kv), + } + if metric.Type() == pmetric.MetricTypeHistogram { + // There are 2 types of Histograms whose aggregation temporality needs distinguishing: + // * CumulativeHistogram + // * GaugeHistogram + key.aggTemporality = metric.Histogram().AggregationTemporality() + } + + tsm.mark = true + tsi, ok := tsm.tsiMap[key] + if !ok { + tsi = ×eriesInfo{} + tsm.tsiMap[key] = tsi + } + tsi.mark = true + return tsi, ok +} + +// Create a unique string signature for attributes values sorted by attribute keys. +func getAttributesSignature(m pcommon.Map) [16]byte { + clearedMap := pcommon.NewMap() + m.Range(func(k string, attrValue pcommon.Value) bool { + value := attrValue.Str() + if value != "" { + clearedMap.PutStr(k, value) + } + return true + }) + return pdatautil.MapHash(clearedMap) +} + +// Remove timeseries that have aged out. +func (tsm *timeseriesMap) gc() { + tsm.Lock() + defer tsm.Unlock() + // this shouldn't happen under the current gc() strategy + if !tsm.mark { + return + } + for ts, tsi := range tsm.tsiMap { + if !tsi.mark { + delete(tsm.tsiMap, ts) + } else { + tsi.mark = false + } + } + tsm.mark = false +} + +func newTimeseriesMap() *timeseriesMap { + return ×eriesMap{mark: true, tsiMap: map[timeseriesKey]*timeseriesInfo{}} +} + +// JobsMap maps from a job instance to a map of timeseries instances for the job. +type JobsMap struct { + sync.RWMutex + // The mutex is used to protect access to the member fields. It is acquired for most of + // get() and also acquired by gc(). + + gcInterval time.Duration + lastGC time.Time + jobsMap map[string]*timeseriesMap +} + +// NewJobsMap creates a new (empty) JobsMap. +func NewJobsMap(gcInterval time.Duration) *JobsMap { + return &JobsMap{gcInterval: gcInterval, lastGC: time.Now(), jobsMap: make(map[string]*timeseriesMap)} +} + +// Remove jobs and timeseries that have aged out. +func (jm *JobsMap) gc() { + jm.Lock() + defer jm.Unlock() + // once the structure is locked, confirm that gc() is still necessary + if time.Since(jm.lastGC) > jm.gcInterval { + for sig, tsm := range jm.jobsMap { + tsm.RLock() + tsmNotMarked := !tsm.mark + // take a read lock here, no need to get a full lock as we have a lock on the JobsMap + tsm.RUnlock() + if tsmNotMarked { + delete(jm.jobsMap, sig) + } else { + // a full lock will be obtained in here, if required. + tsm.gc() + } + } + jm.lastGC = time.Now() + } +} + +func (jm *JobsMap) maybeGC() { + // speculatively check if gc() is necessary, recheck once the structure is locked + jm.RLock() + defer jm.RUnlock() + if time.Since(jm.lastGC) > jm.gcInterval { + go jm.gc() + } +} + +func (jm *JobsMap) get(job, instance string) *timeseriesMap { + sig := job + ":" + instance + // a read locke is taken here as we will not need to modify jobsMap if the target timeseriesMap is available. + jm.RLock() + tsm, ok := jm.jobsMap[sig] + jm.RUnlock() + defer jm.maybeGC() + if ok { + return tsm + } + jm.Lock() + defer jm.Unlock() + // Now that we've got an exclusive lock, check once more to ensure an entry wasn't created in the interim + // and then create a new timeseriesMap if required. + tsm2, ok2 := jm.jobsMap[sig] + if ok2 { + return tsm2 + } + tsm2 = newTimeseriesMap() + jm.jobsMap[sig] = tsm2 + return tsm2 +} + +type MetricsAdjuster interface { + AdjustMetrics(metrics pmetric.Metrics) error +} + +// initialPointAdjuster takes a map from a metric instance to the initial point in the metrics instance +// and provides AdjustMetricSlice, which takes a sequence of metrics and adjust their start times based on +// the initial points. +type initialPointAdjuster struct { + jobsMap *JobsMap + logger *zap.Logger + useCreatedMetric bool +} + +// NewInitialPointAdjuster returns a new MetricsAdjuster that adjust metrics' start times based on the initial received points. +func NewInitialPointAdjuster(logger *zap.Logger, gcInterval time.Duration, useCreatedMetric bool) MetricsAdjuster { + return &initialPointAdjuster{ + jobsMap: NewJobsMap(gcInterval), + logger: logger, + useCreatedMetric: useCreatedMetric, + } +} + +// AdjustMetrics takes a sequence of metrics and adjust their start times based on the initial and +// previous points in the timeseriesMap. +func (a *initialPointAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { + // By contract metrics will have at least 1 data point, so for sure will have at least one ResourceMetrics. + + job, found := metrics.ResourceMetrics().At(0).Resource().Attributes().Get(semconv.AttributeServiceName) + if !found { + return errors.New("adjusting metrics without job") + } + + instance, found := metrics.ResourceMetrics().At(0).Resource().Attributes().Get(semconv.AttributeServiceInstanceID) + if !found { + return errors.New("adjusting metrics without instance") + } + tsm := a.jobsMap.get(job.Str(), instance.Str()) + + // The lock on the relevant timeseriesMap is held throughout the adjustment process to ensure that + // nothing else can modify the data used for adjustment. + tsm.Lock() + defer tsm.Unlock() + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rm := metrics.ResourceMetrics().At(i) + for j := 0; j < rm.ScopeMetrics().Len(); j++ { + ilm := rm.ScopeMetrics().At(j) + for k := 0; k < ilm.Metrics().Len(); k++ { + metric := ilm.Metrics().At(k) + switch dataType := metric.Type(); dataType { + case pmetric.MetricTypeGauge: + // gauges don't need to be adjusted so no additional processing is necessary + + case pmetric.MetricTypeHistogram: + a.adjustMetricHistogram(tsm, metric) + + case pmetric.MetricTypeSummary: + a.adjustMetricSummary(tsm, metric) + + case pmetric.MetricTypeSum: + a.adjustMetricSum(tsm, metric) + + default: + // this shouldn't happen + a.logger.Info("Adjust - skipping unexpected point", zap.String("type", dataType.String())) + } + } + } + } + return nil +} + +func (a *initialPointAdjuster) adjustMetricHistogram(tsm *timeseriesMap, current pmetric.Metric) { + histogram := current.Histogram() + if histogram.AggregationTemporality() != pmetric.AggregationTemporalityCumulative { + // Only dealing with CumulativeDistributions. + return + } + + currentPoints := histogram.DataPoints() + for i := 0; i < currentPoints.Len(); i++ { + currentDist := currentPoints.At(i) + + // start timestamp was set from _created + if a.useCreatedMetric && + !currentDist.Flags().NoRecordedValue() && + currentDist.StartTimestamp() < currentDist.Timestamp() { + continue + } + + tsi, found := tsm.get(current, currentDist.Attributes()) + if !found { + // initialize everything. + tsi.histogram.startTime = currentDist.StartTimestamp() + tsi.histogram.previousCount = currentDist.Count() + tsi.histogram.previousSum = currentDist.Sum() + continue + } + + if currentDist.Flags().NoRecordedValue() { + // TODO: Investigate why this does not reset. + currentDist.SetStartTimestamp(tsi.histogram.startTime) + continue + } + + if currentDist.Count() < tsi.histogram.previousCount || currentDist.Sum() < tsi.histogram.previousSum { + // reset re-initialize everything. + tsi.histogram.startTime = currentDist.StartTimestamp() + tsi.histogram.previousCount = currentDist.Count() + tsi.histogram.previousSum = currentDist.Sum() + continue + } + + // Update only previous values. + tsi.histogram.previousCount = currentDist.Count() + tsi.histogram.previousSum = currentDist.Sum() + currentDist.SetStartTimestamp(tsi.histogram.startTime) + } +} + +func (a *initialPointAdjuster) adjustMetricSum(tsm *timeseriesMap, current pmetric.Metric) { + currentPoints := current.Sum().DataPoints() + for i := 0; i < currentPoints.Len(); i++ { + currentSum := currentPoints.At(i) + + // start timestamp was set from _created + if a.useCreatedMetric && + !currentSum.Flags().NoRecordedValue() && + currentSum.StartTimestamp() < currentSum.Timestamp() { + continue + } + + tsi, found := tsm.get(current, currentSum.Attributes()) + if !found { + // initialize everything. + tsi.number.startTime = currentSum.StartTimestamp() + tsi.number.previousValue = currentSum.DoubleValue() + continue + } + + if currentSum.Flags().NoRecordedValue() { + // TODO: Investigate why this does not reset. + currentSum.SetStartTimestamp(tsi.number.startTime) + continue + } + + if currentSum.DoubleValue() < tsi.number.previousValue { + // reset re-initialize everything. + tsi.number.startTime = currentSum.StartTimestamp() + tsi.number.previousValue = currentSum.DoubleValue() + continue + } + + // Update only previous values. + tsi.number.previousValue = currentSum.DoubleValue() + currentSum.SetStartTimestamp(tsi.number.startTime) + } +} + +func (a *initialPointAdjuster) adjustMetricSummary(tsm *timeseriesMap, current pmetric.Metric) { + currentPoints := current.Summary().DataPoints() + + for i := 0; i < currentPoints.Len(); i++ { + currentSummary := currentPoints.At(i) + + // start timestamp was set from _created + if a.useCreatedMetric && + !currentSummary.Flags().NoRecordedValue() && + currentSummary.StartTimestamp() < currentSummary.Timestamp() { + continue + } + + tsi, found := tsm.get(current, currentSummary.Attributes()) + if !found { + // initialize everything. + tsi.summary.startTime = currentSummary.StartTimestamp() + tsi.summary.previousCount = currentSummary.Count() + tsi.summary.previousSum = currentSummary.Sum() + continue + } + + if currentSummary.Flags().NoRecordedValue() { + // TODO: Investigate why this does not reset. + currentSummary.SetStartTimestamp(tsi.summary.startTime) + continue + } + + if (currentSummary.Count() != 0 && + tsi.summary.previousCount != 0 && + currentSummary.Count() < tsi.summary.previousCount) || + (currentSummary.Sum() != 0 && + tsi.summary.previousSum != 0 && + currentSummary.Sum() < tsi.summary.previousSum) { + // reset re-initialize everything. + tsi.summary.startTime = currentSummary.StartTimestamp() + tsi.summary.previousCount = currentSummary.Count() + tsi.summary.previousSum = currentSummary.Sum() + continue + } + + // Update only previous values. + tsi.summary.previousCount = currentSummary.Count() + tsi.summary.previousSum = currentSummary.Sum() + currentSummary.SetStartTimestamp(tsi.summary.startTime) + } +} diff --git a/collector/receiver/prometheusreceiver/internal/metrics_adjuster_test.go b/collector/receiver/prometheusreceiver/internal/metrics_adjuster_test.go new file mode 100644 index 0000000..4da08a2 --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/metrics_adjuster_test.go @@ -0,0 +1,692 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + semconv "go.opentelemetry.io/collector/semconv/v1.8.0" + "go.uber.org/zap" +) + +var ( + tUnknown = timestampFromMs(0) + t1 = timestampFromMs(1) + t2 = timestampFromMs(2) + t3 = timestampFromMs(3) + t4 = timestampFromMs(4) + t5 = timestampFromMs(5) + + bounds0 = []float64{1, 2, 4} + percent0 = []float64{10, 50, 90} + + sum1 = "sum1" + gauge1 = "gauge1" + histogram1 = "histogram1" + summary1 = "summary1" + + k1v1k2v2 = []*kv{ + {"k1", "v1"}, + {"k2", "v2"}, + } + + k1v10k2v20 = []*kv{ + {"k1", "v10"}, + {"k2", "v20"}, + } + + k1v100k2v200 = []*kv{ + {"k1", "v100"}, + {"k2", "v200"}, + } + + emptyLabels []*kv + k1vEmpty = []*kv{{"k1", ""}} + k1vEmptyk2vEmptyk3vEmpty = []*kv{{"k1", ""}, {"k2", ""}, {"k3", ""}} +) + +func TestGauge(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Gauge: round 1 - gauge not adjusted", + metrics: metrics(gaugeMetric(gauge1, doublePoint(k1v1k2v2, t1, t1, 44))), + adjusted: metrics(gaugeMetric(gauge1, doublePoint(k1v1k2v2, t1, t1, 44))), + }, + { + description: "Gauge: round 2 - gauge not adjusted", + metrics: metrics(gaugeMetric(gauge1, doublePoint(k1v1k2v2, t2, t2, 66))), + adjusted: metrics(gaugeMetric(gauge1, doublePoint(k1v1k2v2, t2, t2, 66))), + }, + { + description: "Gauge: round 3 - value less than previous value - gauge is not adjusted", + metrics: metrics(gaugeMetric(gauge1, doublePoint(k1v1k2v2, t3, t3, 55))), + adjusted: metrics(gaugeMetric(gauge1, doublePoint(k1v1k2v2, t3, t3, 55))), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestSum(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Sum: round 1 - initial instance, start time is established", + metrics: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44))), + }, + { + description: "Sum: round 2 - instance adjusted based on round 1", + metrics: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t2, t2, 66))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t1, t2, 66))), + }, + { + description: "Sum: round 3 - instance reset (value less than previous value), start time is reset", + metrics: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t3, t3, 55))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t3, t3, 55))), + }, + { + description: "Sum: round 4 - instance adjusted based on round 3", + metrics: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t4, t4, 72))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t3, t4, 72))), + }, + { + description: "Sum: round 5 - instance adjusted based on round 4", + metrics: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t5, t5, 72))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t3, t5, 72))), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestSummaryNoCount(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Summary No Count: round 1 - initial instance, start time is established", + metrics: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 0, 40, percent0, []float64{1, 5, 8}))), + adjusted: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 0, 40, percent0, []float64{1, 5, 8}))), + }, + { + description: "Summary No Count: round 2 - instance adjusted based on round 1", + metrics: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t2, t2, 0, 70, percent0, []float64{7, 44, 9}))), + adjusted: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t2, 0, 70, percent0, []float64{7, 44, 9}))), + }, + { + description: "Summary No Count: round 3 - instance reset (count less than previous), start time is reset", + metrics: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t3, 0, 66, percent0, []float64{3, 22, 5}))), + adjusted: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t3, 0, 66, percent0, []float64{3, 22, 5}))), + }, + { + description: "Summary No Count: round 4 - instance adjusted based on round 3", + metrics: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t4, t4, 0, 96, percent0, []float64{9, 47, 8}))), + adjusted: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t4, 0, 96, percent0, []float64{9, 47, 8}))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestSummaryFlagNoRecordedValue(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Summary No Count: round 1 - initial instance, start time is established", + metrics: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 0, 40, percent0, []float64{1, 5, 8}))), + adjusted: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 0, 40, percent0, []float64{1, 5, 8}))), + }, + { + description: "Summary Flag NoRecordedValue: round 2 - instance adjusted based on round 1", + metrics: metrics(summaryMetric(summary1, summaryPointNoValue(k1v1k2v2, t2, t2))), + adjusted: metrics(summaryMetric(summary1, summaryPointNoValue(k1v1k2v2, t1, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestSummary(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Summary: round 1 - initial instance, start time is established", + metrics: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 10, 40, percent0, []float64{1, 5, 8})), + ), + adjusted: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 10, 40, percent0, []float64{1, 5, 8})), + ), + }, + { + description: "Summary: round 2 - instance adjusted based on round 1", + metrics: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t2, t2, 15, 70, percent0, []float64{7, 44, 9})), + ), + adjusted: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t2, 15, 70, percent0, []float64{7, 44, 9})), + ), + }, + { + description: "Summary: round 3 - instance reset (count less than previous), start time is reset", + metrics: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t3, 12, 66, percent0, []float64{3, 22, 5})), + ), + adjusted: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t3, 12, 66, percent0, []float64{3, 22, 5})), + ), + }, + { + description: "Summary: round 4 - instance adjusted based on round 3", + metrics: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t4, t4, 14, 96, percent0, []float64{9, 47, 8})), + ), + adjusted: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t4, 14, 96, percent0, []float64{9, 47, 8})), + ), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestHistogram(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Histogram: round 1 - initial instance, start time is established", + metrics: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7}))), + adjusted: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7}))), + }, { + description: "Histogram: round 2 - instance adjusted based on round 1", + metrics: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t2, t2, bounds0, []uint64{6, 3, 4, 8}))), + adjusted: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t2, bounds0, []uint64{6, 3, 4, 8}))), + }, { + description: "Histogram: round 3 - instance reset (value less than previous value), start time is reset", + metrics: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t3, bounds0, []uint64{5, 3, 2, 7}))), + adjusted: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t3, bounds0, []uint64{5, 3, 2, 7}))), + }, { + description: "Histogram: round 4 - instance adjusted based on round 3", + metrics: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t4, t4, bounds0, []uint64{7, 4, 2, 12}))), + adjusted: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t4, bounds0, []uint64{7, 4, 2, 12}))), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestHistogramFlagNoRecordedValue(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Histogram: round 1 - initial instance, start time is established", + metrics: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{7, 4, 2, 12}))), + adjusted: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{7, 4, 2, 12}))), + }, + { + description: "Histogram: round 2 - instance adjusted based on round 1", + metrics: metrics(histogramMetric(histogram1, histogramPointNoValue(k1v1k2v2, tUnknown, t2))), + adjusted: metrics(histogramMetric(histogram1, histogramPointNoValue(k1v1k2v2, t1, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestHistogramFlagNoRecordedValueFirstObservation(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Histogram: round 1 - initial instance, start time is unknown", + metrics: metrics(histogramMetric(histogram1, histogramPointNoValue(k1v1k2v2, tUnknown, t1))), + adjusted: metrics(histogramMetric(histogram1, histogramPointNoValue(k1v1k2v2, tUnknown, t1))), + }, + { + description: "Histogram: round 2 - instance unchanged", + metrics: metrics(histogramMetric(histogram1, histogramPointNoValue(k1v1k2v2, tUnknown, t2))), + adjusted: metrics(histogramMetric(histogram1, histogramPointNoValue(k1v1k2v2, tUnknown, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestSummaryFlagNoRecordedValueFirstObservation(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Summary: round 1 - initial instance, start time is unknown", + metrics: metrics(summaryMetric(summary1, summaryPointNoValue(k1v1k2v2, tUnknown, t1))), + adjusted: metrics(summaryMetric(summary1, summaryPointNoValue(k1v1k2v2, tUnknown, t1))), + }, + { + description: "Summary: round 2 - instance unchanged", + metrics: metrics(summaryMetric(summary1, summaryPointNoValue(k1v1k2v2, tUnknown, t2))), + adjusted: metrics(summaryMetric(summary1, summaryPointNoValue(k1v1k2v2, tUnknown, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestGaugeFlagNoRecordedValueFirstObservation(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Gauge: round 1 - initial instance, start time is unknown", + metrics: metrics(gaugeMetric(gauge1, doublePointNoValue(k1v1k2v2, tUnknown, t1))), + adjusted: metrics(gaugeMetric(gauge1, doublePointNoValue(k1v1k2v2, tUnknown, t1))), + }, + { + description: "Gauge: round 2 - instance unchanged", + metrics: metrics(gaugeMetric(gauge1, doublePointNoValue(k1v1k2v2, tUnknown, t2))), + adjusted: metrics(gaugeMetric(gauge1, doublePointNoValue(k1v1k2v2, tUnknown, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestSumFlagNoRecordedValueFirstObservation(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Sum: round 1 - initial instance, start time is unknown", + metrics: metrics(sumMetric("sum1", doublePointNoValue(k1v1k2v2, tUnknown, t1))), + adjusted: metrics(sumMetric("sum1", doublePointNoValue(k1v1k2v2, tUnknown, t1))), + }, + { + description: "Sum: round 2 - instance unchanged", + metrics: metrics(sumMetric("sum1", doublePointNoValue(k1v1k2v2, tUnknown, t2))), + adjusted: metrics(sumMetric("sum1", doublePointNoValue(k1v1k2v2, tUnknown, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestMultiMetrics(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "MultiMetrics: round 1 - combined round 1 of individual metrics", + metrics: metrics( + gaugeMetric(gauge1, doublePoint(k1v1k2v2, t1, t1, 44)), + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 10, 40, percent0, []float64{1, 5, 8})), + ), + adjusted: metrics( + gaugeMetric(gauge1, doublePoint(k1v1k2v2, t1, t1, 44)), + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 10, 40, percent0, []float64{1, 5, 8})), + ), + }, + { + description: "MultiMetrics: round 2 - combined round 2 of individual metrics", + metrics: metrics( + gaugeMetric(gauge1, doublePoint(k1v1k2v2, t2, t2, 66)), + sumMetric(sum1, doublePoint(k1v1k2v2, t2, t2, 66)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t2, t2, bounds0, []uint64{6, 3, 4, 8})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t2, t2, 15, 70, percent0, []float64{7, 44, 9})), + ), + adjusted: metrics( + gaugeMetric(gauge1, doublePoint(k1v1k2v2, t2, t2, 66)), + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t2, 66)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t2, bounds0, []uint64{6, 3, 4, 8})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t2, 15, 70, percent0, []float64{7, 44, 9})), + ), + }, + { + description: "MultiMetrics: round 3 - combined round 3 of individual metrics", + metrics: metrics( + gaugeMetric(gauge1, doublePoint(k1v1k2v2, t3, t3, 55)), + sumMetric(sum1, doublePoint(k1v1k2v2, t3, t3, 55)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t3, bounds0, []uint64{5, 3, 2, 7})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t3, 12, 66, percent0, []float64{3, 22, 5})), + ), + adjusted: metrics( + gaugeMetric(gauge1, doublePoint(k1v1k2v2, t3, t3, 55)), + sumMetric(sum1, doublePoint(k1v1k2v2, t3, t3, 55)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t3, bounds0, []uint64{5, 3, 2, 7})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t3, 12, 66, percent0, []float64{3, 22, 5})), + ), + }, + { + description: "MultiMetrics: round 4 - combined round 4 of individual metrics", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t4, t4, 72)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t4, t4, bounds0, []uint64{7, 4, 2, 12})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t4, t4, 14, 96, percent0, []float64{9, 47, 8})), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t3, t4, 72)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t4, bounds0, []uint64{7, 4, 2, 12})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t4, 14, 96, percent0, []float64{9, 47, 8})), + ), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestNewDataPointsAdded(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "New Datapoints: round 1 - two datapoints each", + metrics: metrics( + sumMetric(sum1, + doublePoint(k1v1k2v2, t1, t1, 44), + doublePoint(k1v100k2v200, t1, t1, 44)), + histogramMetric(histogram1, + histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7}), + histogramPoint(k1v100k2v200, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + summaryMetric(summary1, + summaryPoint(k1v1k2v2, t1, t1, 10, 40, percent0, []float64{1, 5, 8}), + summaryPoint(k1v100k2v200, t1, t1, 10, 40, percent0, []float64{1, 5, 8})), + ), + adjusted: metrics( + sumMetric(sum1, + doublePoint(k1v1k2v2, t1, t1, 44), + doublePoint(k1v100k2v200, t1, t1, 44)), + histogramMetric(histogram1, + histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7}), + histogramPoint(k1v100k2v200, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + summaryMetric(summary1, + summaryPoint(k1v1k2v2, t1, t1, 10, 40, percent0, []float64{1, 5, 8}), + summaryPoint(k1v100k2v200, t1, t1, 10, 40, percent0, []float64{1, 5, 8})), + ), + }, + { + description: "New Datapoints: round 2 - new datapoints unchanged, old datapoints adjusted", + metrics: metrics( + sumMetric(sum1, + doublePoint(k1v1k2v2, t2, t2, 44), + doublePoint(k1v10k2v20, t2, t2, 44), + doublePoint(k1v100k2v200, t2, t2, 44)), + histogramMetric(histogram1, + histogramPoint(k1v1k2v2, t2, t2, bounds0, []uint64{4, 2, 3, 7}), + histogramPoint(k1v10k2v20, t2, t2, bounds0, []uint64{4, 2, 3, 7}), + histogramPoint(k1v100k2v200, t2, t2, bounds0, []uint64{4, 2, 3, 7})), + summaryMetric(summary1, + summaryPoint(k1v1k2v2, t2, t2, 10, 40, percent0, []float64{1, 5, 8}), + summaryPoint(k1v10k2v20, t2, t2, 10, 40, percent0, []float64{1, 5, 8}), + summaryPoint(k1v100k2v200, t2, t2, 10, 40, percent0, []float64{1, 5, 8})), + ), + adjusted: metrics( + sumMetric(sum1, + doublePoint(k1v1k2v2, t1, t2, 44), + doublePoint(k1v10k2v20, t2, t2, 44), + doublePoint(k1v100k2v200, t1, t2, 44)), + histogramMetric(histogram1, + histogramPoint(k1v1k2v2, t1, t2, bounds0, []uint64{4, 2, 3, 7}), + histogramPoint(k1v10k2v20, t2, t2, bounds0, []uint64{4, 2, 3, 7}), + histogramPoint(k1v100k2v200, t1, t2, bounds0, []uint64{4, 2, 3, 7})), + summaryMetric(summary1, + summaryPoint(k1v1k2v2, t1, t2, 10, 40, percent0, []float64{1, 5, 8}), + summaryPoint(k1v10k2v20, t2, t2, 10, 40, percent0, []float64{1, 5, 8}), + summaryPoint(k1v100k2v200, t1, t2, 10, 40, percent0, []float64{1, 5, 8})), + ), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestMultiTimeseries(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "MultiTimeseries: round 1 - initial first instance, start time is established", + metrics: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44))), + }, + { + description: "MultiTimeseries: round 2 - first instance adjusted based on round 1, initial second instance", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t2, t2, 66)), + sumMetric(sum1, doublePoint(k1v10k2v20, t2, t2, 20.0)), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t2, 66)), + sumMetric(sum1, doublePoint(k1v10k2v20, t2, t2, 20.0)), + ), + }, + { + description: "MultiTimeseries: round 3 - first instance adjusted based on round 1, second based on round 2", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t3, t3, 88.0)), + sumMetric(sum1, doublePoint(k1v10k2v20, t3, t3, 49.0)), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t3, 88.0)), + sumMetric(sum1, doublePoint(k1v10k2v20, t2, t3, 49.0)), + ), + }, + { + description: "MultiTimeseries: round 4 - first instance reset, second instance adjusted based on round 2, initial third instance", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t4, t4, 87.0)), + sumMetric(sum1, doublePoint(k1v10k2v20, t4, t4, 57.0)), + sumMetric(sum1, doublePoint(k1v100k2v200, t4, t4, 10.0)), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t4, t4, 87.0)), + sumMetric(sum1, doublePoint(k1v10k2v20, t2, t4, 57.0)), + sumMetric(sum1, doublePoint(k1v100k2v200, t4, t4, 10.0)), + ), + }, + { + description: "MultiTimeseries: round 5 - first instance adjusted based on round 4, second on round 2, third on round 4", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t5, t5, 90.0)), + sumMetric(sum1, doublePoint(k1v10k2v20, t5, t5, 65.0)), + sumMetric(sum1, doublePoint(k1v100k2v200, t5, t5, 22.0)), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t4, t5, 90.0)), + sumMetric(sum1, doublePoint(k1v10k2v20, t2, t5, 65.0)), + sumMetric(sum1, doublePoint(k1v100k2v200, t4, t5, 22.0)), + ), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestEmptyLabels(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "EmptyLabels: round 1 - initial instance, implicitly empty labels, start time is established", + metrics: metrics(sumMetric(sum1, doublePoint(emptyLabels, t1, t1, 44))), + adjusted: metrics(sumMetric(sum1, doublePoint(emptyLabels, t1, t1, 44))), + }, + { + description: "EmptyLabels: round 2 - instance adjusted based on round 1", + metrics: metrics(sumMetric(sum1, doublePoint(emptyLabels, t2, t2, 66))), + adjusted: metrics(sumMetric(sum1, doublePoint(emptyLabels, t1, t2, 66))), + }, + { + description: "EmptyLabels: round 3 - one explicitly empty label, instance adjusted based on round 1", + metrics: metrics(sumMetric(sum1, doublePoint(k1vEmpty, t3, t3, 77))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1vEmpty, t1, t3, 77))), + }, + { + description: "EmptyLabels: round 4 - three explicitly empty labels, instance adjusted based on round 1", + metrics: metrics(sumMetric(sum1, doublePoint(k1vEmptyk2vEmptyk3vEmpty, t3, t3, 88))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1vEmptyk2vEmptyk3vEmpty, t1, t3, 88))), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestTsGC(t *testing.T) { + script1 := []*metricsAdjusterTest{ + { + description: "TsGC: round 1 - initial instances, start time is established", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44)), + sumMetric(sum1, doublePoint(k1v10k2v20, t1, t1, 20)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t1, t1, bounds0, []uint64{40, 20, 30, 70})), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44)), + sumMetric(sum1, doublePoint(k1v10k2v20, t1, t1, 20)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t1, t1, bounds0, []uint64{40, 20, 30, 70})), + ), + }, + } + + script2 := []*metricsAdjusterTest{ + { + description: "TsGC: round 2 - metrics first timeseries adjusted based on round 2, second timeseries not updated", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t2, t2, 88)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t2, t2, bounds0, []uint64{8, 7, 9, 14})), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t2, 88)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t2, bounds0, []uint64{8, 7, 9, 14})), + ), + }, + } + + script3 := []*metricsAdjusterTest{ + { + description: "TsGC: round 3 - metrics first timeseries adjusted based on round 2, second timeseries empty due to timeseries gc()", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t3, t3, 99)), + sumMetric(sum1, doublePoint(k1v10k2v20, t3, t3, 80)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t3, bounds0, []uint64{9, 8, 10, 15})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t3, t3, bounds0, []uint64{55, 66, 33, 77})), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t3, 99)), + sumMetric(sum1, doublePoint(k1v10k2v20, t3, t3, 80)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t3, bounds0, []uint64{9, 8, 10, 15})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t3, t3, bounds0, []uint64{55, 66, 33, 77})), + ), + }, + } + + ma := NewInitialPointAdjuster(zap.NewNop(), time.Minute, true) + + // run round 1 + runScript(t, ma, "job", "0", script1) + // gc the tsmap, unmarking all entries + ma.(*initialPointAdjuster).jobsMap.get("job", "0").gc() + // run round 2 - update metrics first timeseries only + runScript(t, ma, "job", "0", script2) + // gc the tsmap, collecting umarked entries + ma.(*initialPointAdjuster).jobsMap.get("job", "0").gc() + // run round 3 - verify that metrics second timeseries have been gc'd + runScript(t, ma, "job", "0", script3) +} + +func TestJobGC(t *testing.T) { + job1Script1 := []*metricsAdjusterTest{ + { + description: "JobGC: job 1, round 1 - initial instances, adjusted should be empty", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44)), + sumMetric(sum1, doublePoint(k1v10k2v20, t1, t1, 20)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t1, t1, bounds0, []uint64{40, 20, 30, 70})), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44)), + sumMetric(sum1, doublePoint(k1v10k2v20, t1, t1, 20)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t1, t1, bounds0, []uint64{40, 20, 30, 70})), + ), + }, + } + + job2Script1 := []*metricsAdjusterTest{ + { + description: "JobGC: job2, round 1 - no metrics adjusted, just trigger gc", + metrics: metrics(), + adjusted: metrics(), + }, + } + + job1Script2 := []*metricsAdjusterTest{ + { + description: "JobGC: job 1, round 2 - metrics timeseries empty due to job-level gc", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t4, t4, 99)), + sumMetric(sum1, doublePoint(k1v10k2v20, t4, t4, 80)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t4, t4, bounds0, []uint64{9, 8, 10, 15})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t4, t4, bounds0, []uint64{55, 66, 33, 77})), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t4, t4, 99)), + sumMetric(sum1, doublePoint(k1v10k2v20, t4, t4, 80)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t4, t4, bounds0, []uint64{9, 8, 10, 15})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t4, t4, bounds0, []uint64{55, 66, 33, 77})), + ), + }, + } + + gcInterval := 10 * time.Millisecond + ma := NewInitialPointAdjuster(zap.NewNop(), gcInterval, true) + + // run job 1, round 1 - all entries marked + runScript(t, ma, "job1", "0", job1Script1) + // sleep longer than gcInterval to enable job gc in the next run + time.Sleep(2 * gcInterval) + // run job 2, round1 - trigger job gc, unmarking all entries + runScript(t, ma, "job1", "1", job2Script1) + // sleep longer than gcInterval to enable job gc in the next run + time.Sleep(2 * gcInterval) + // re-run job 2, round1 - trigger job gc, removing unmarked entries + runScript(t, ma, "job1", "1", job2Script1) + // ensure that at least one jobsMap.gc() completed + ma.(*initialPointAdjuster).jobsMap.gc() + // run job 1, round 2 - verify that all job 1 timeseries have been gc'd + runScript(t, ma, "job1", "0", job1Script2) +} + +type metricsAdjusterTest struct { + description string + metrics pmetric.Metrics + adjusted pmetric.Metrics +} + +func runScript(t *testing.T, ma MetricsAdjuster, job, instance string, tests []*metricsAdjusterTest) { + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + adjusted := pmetric.NewMetrics() + test.metrics.CopyTo(adjusted) + // Add the instance/job to the input metrics. + adjusted.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceInstanceID, instance) + adjusted.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceName, job) + assert.NoError(t, ma.AdjustMetrics(adjusted)) + + // Add the instance/job to the expected metrics as well. + test.adjusted.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceInstanceID, instance) + test.adjusted.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceName, job) + assert.EqualValues(t, test.adjusted, adjusted) + }) + } +} + +func BenchmarkGetAttributesSignature(b *testing.B) { + attrs := pcommon.NewMap() + attrs.PutStr("key1", "some-random-test-value-1") + attrs.PutStr("key2", "some-random-test-value-2") + attrs.PutStr("key6", "some-random-test-value-6") + attrs.PutStr("key3", "some-random-test-value-3") + attrs.PutStr("key4", "some-random-test-value-4") + attrs.PutStr("key5", "some-random-test-value-5") + attrs.PutStr("key7", "some-random-test-value-7") + attrs.PutStr("key8", "some-random-test-value-8") + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + getAttributesSignature(attrs) + } +} diff --git a/collector/receiver/prometheusreceiver/internal/metricsutil_test.go b/collector/receiver/prometheusreceiver/internal/metricsutil_test.go new file mode 100644 index 0000000..ea29c0e --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/metricsutil_test.go @@ -0,0 +1,188 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +type kv struct { + Key, Value string +} + +func metrics(metrics ...pmetric.Metric) pmetric.Metrics { + md := pmetric.NewMetrics() + ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + for _, metric := range metrics { + destMetric := ms.AppendEmpty() + metric.CopyTo(destMetric) + } + + return md +} + +func histogramPointRaw(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.HistogramDataPoint { + hdp := pmetric.NewHistogramDataPoint() + hdp.SetStartTimestamp(startTimestamp) + hdp.SetTimestamp(timestamp) + + attrs := hdp.Attributes() + for _, kv := range attributes { + attrs.PutStr(kv.Key, kv.Value) + } + + return hdp +} + +func histogramPoint(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp, bounds []float64, counts []uint64) pmetric.HistogramDataPoint { + hdp := histogramPointRaw(attributes, startTimestamp, timestamp) + hdp.ExplicitBounds().FromRaw(bounds) + hdp.BucketCounts().FromRaw(counts) + + var sum float64 + var count uint64 + for i, bcount := range counts { + count += bcount + if i > 0 { + sum += float64(bcount) * bounds[i-1] + } + } + hdp.SetCount(count) + hdp.SetSum(sum) + + return hdp +} + +func histogramPointNoValue(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.HistogramDataPoint { + hdp := histogramPointRaw(attributes, startTimestamp, timestamp) + hdp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + + return hdp +} + +func histogramMetric(name string, points ...pmetric.HistogramDataPoint) pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName(name) + histogram := metric.SetEmptyHistogram() + histogram.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + + destPointL := histogram.DataPoints() + // By default the AggregationTemporality is Cumulative until it'll be changed by the caller. + for _, point := range points { + destPoint := destPointL.AppendEmpty() + point.CopyTo(destPoint) + } + + return metric +} + +func doublePointRaw(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.NumberDataPoint { + ndp := pmetric.NewNumberDataPoint() + ndp.SetStartTimestamp(startTimestamp) + ndp.SetTimestamp(timestamp) + + for _, kv := range attributes { + ndp.Attributes().PutStr(kv.Key, kv.Value) + } + + return ndp +} + +func doublePoint(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp, value float64) pmetric.NumberDataPoint { + ndp := doublePointRaw(attributes, startTimestamp, timestamp) + ndp.SetDoubleValue(value) + return ndp +} + +func doublePointNoValue(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.NumberDataPoint { + ndp := doublePointRaw(attributes, startTimestamp, timestamp) + ndp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + return ndp +} + +func gaugeMetric(name string, points ...pmetric.NumberDataPoint) pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName(name) + destPointL := metric.SetEmptyGauge().DataPoints() + for _, point := range points { + destPoint := destPointL.AppendEmpty() + point.CopyTo(destPoint) + } + + return metric +} + +func sumMetric(name string, points ...pmetric.NumberDataPoint) pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName(name) + sum := metric.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum.SetIsMonotonic(true) + + destPointL := sum.DataPoints() + for _, point := range points { + destPoint := destPointL.AppendEmpty() + point.CopyTo(destPoint) + } + + return metric +} + +func summaryPointRaw(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.SummaryDataPoint { + sdp := pmetric.NewSummaryDataPoint() + sdp.SetStartTimestamp(startTimestamp) + sdp.SetTimestamp(timestamp) + + for _, kv := range attributes { + sdp.Attributes().PutStr(kv.Key, kv.Value) + } + + return sdp +} + +func summaryPoint(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp, count uint64, sum float64, quantiles, values []float64) pmetric.SummaryDataPoint { + sdp := summaryPointRaw(attributes, startTimestamp, timestamp) + sdp.SetCount(count) + sdp.SetSum(sum) + + qvL := sdp.QuantileValues() + for i := 0; i < len(quantiles); i++ { + qvi := qvL.AppendEmpty() + qvi.SetQuantile(quantiles[i]) + qvi.SetValue(values[i]) + } + + return sdp +} + +func summaryPointNoValue(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.SummaryDataPoint { + sdp := summaryPointRaw(attributes, startTimestamp, timestamp) + sdp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + + return sdp +} + +func summaryMetric(name string, points ...pmetric.SummaryDataPoint) pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName(name) + destPointL := metric.SetEmptySummary().DataPoints() + for _, point := range points { + destPoint := destPointL.AppendEmpty() + point.CopyTo(destPoint) + } + + return metric +} diff --git a/collector/receiver/prometheusreceiver/internal/prom_to_otlp.go b/collector/receiver/prometheusreceiver/internal/prom_to_otlp.go new file mode 100644 index 0000000..d3251f3 --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/prom_to_otlp.go @@ -0,0 +1,104 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" + +import ( + "net" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "go.opentelemetry.io/collector/pdata/pcommon" + conventions "go.opentelemetry.io/collector/semconv/v1.6.1" +) + +// isDiscernibleHost checks if a host can be used as a value for the 'host.name' key. +// localhost-like hosts and unspecified (0.0.0.0) hosts are not discernible. +func isDiscernibleHost(host string) bool { + ip := net.ParseIP(host) + if ip != nil { + // An IP is discernible if + // - it's not local (e.g. belongs to 127.0.0.0/8 or ::1/128) and + // - it's not unspecified (e.g. the 0.0.0.0 address). + return !ip.IsLoopback() && !ip.IsUnspecified() + } + + if host == "localhost" { + return false + } + + // not an IP, not 'localhost', assume it is discernible. + return true +} + +// CreateResource creates the resource data added to OTLP payloads. +func CreateResource(job, instance string, serviceDiscoveryLabels labels.Labels) pcommon.Resource { + host, port, err := net.SplitHostPort(instance) + if err != nil { + host = instance + } + resource := pcommon.NewResource() + attrs := resource.Attributes() + attrs.PutStr(conventions.AttributeServiceName, job) + if isDiscernibleHost(host) { + attrs.PutStr(conventions.AttributeNetHostName, host) + } + attrs.PutStr(conventions.AttributeServiceInstanceID, instance) + attrs.PutStr(conventions.AttributeNetHostPort, port) + attrs.PutStr(conventions.AttributeHTTPScheme, serviceDiscoveryLabels.Get(model.SchemeLabel)) + + addKubernetesResource(attrs, serviceDiscoveryLabels) + + return resource +} + +// kubernetesDiscoveryToResourceAttributes maps from metadata labels discovered +// through the kubernetes implementation of service discovery to opentelemetry +// resource attribute keys. +var kubernetesDiscoveryToResourceAttributes = map[string]string{ + "__meta_kubernetes_pod_name": conventions.AttributeK8SPodName, + "__meta_kubernetes_pod_uid": conventions.AttributeK8SPodUID, + "__meta_kubernetes_pod_container_name": conventions.AttributeK8SContainerName, + "__meta_kubernetes_namespace": conventions.AttributeK8SNamespaceName, + // Only one of the node name service discovery labels will be present + "__meta_kubernetes_pod_node_name": conventions.AttributeK8SNodeName, + "__meta_kubernetes_node_name": conventions.AttributeK8SNodeName, + "__meta_kubernetes_endpoint_node_name": conventions.AttributeK8SNodeName, +} + +// addKubernetesResource adds resource information detected by prometheus' +// kubernetes service discovery. +func addKubernetesResource(attrs pcommon.Map, serviceDiscoveryLabels labels.Labels) { + for sdKey, attributeKey := range kubernetesDiscoveryToResourceAttributes { + if attr := serviceDiscoveryLabels.Get(sdKey); attr != "" { + attrs.PutStr(attributeKey, attr) + } + } + controllerName := serviceDiscoveryLabels.Get("__meta_kubernetes_pod_controller_name") + controllerKind := serviceDiscoveryLabels.Get("__meta_kubernetes_pod_controller_kind") + if controllerKind != "" && controllerName != "" { + switch controllerKind { + case "ReplicaSet": + attrs.PutStr(conventions.AttributeK8SReplicaSetName, controllerName) + case "DaemonSet": + attrs.PutStr(conventions.AttributeK8SDaemonSetName, controllerName) + case "StatefulSet": + attrs.PutStr(conventions.AttributeK8SStatefulSetName, controllerName) + case "Job": + attrs.PutStr(conventions.AttributeK8SJobName, controllerName) + case "CronJob": + attrs.PutStr(conventions.AttributeK8SCronJobName, controllerName) + } + } +} diff --git a/collector/receiver/prometheusreceiver/internal/prom_to_otlp_test.go b/collector/receiver/prometheusreceiver/internal/prom_to_otlp_test.go new file mode 100644 index 0000000..b944088 --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/prom_to_otlp_test.go @@ -0,0 +1,289 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "testing" + + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + conventions "go.opentelemetry.io/collector/semconv/v1.6.1" +) + +type jobInstanceDefinition struct { + job, instance, host, scheme, port string +} + +type k8sResourceDefinition struct { + podName, podUID, container, node, rs, ds, ss, job, cronjob, ns string +} + +func makeK8sResource(jobInstance *jobInstanceDefinition, def *k8sResourceDefinition) pcommon.Resource { + resource := makeResourceWithJobInstanceScheme(jobInstance, true) + attrs := resource.Attributes() + if def.podName != "" { + attrs.PutStr(conventions.AttributeK8SPodName, def.podName) + } + if def.podUID != "" { + attrs.PutStr(conventions.AttributeK8SPodUID, def.podUID) + } + if def.container != "" { + attrs.PutStr(conventions.AttributeK8SContainerName, def.container) + } + if def.node != "" { + attrs.PutStr(conventions.AttributeK8SNodeName, def.node) + } + if def.rs != "" { + attrs.PutStr(conventions.AttributeK8SReplicaSetName, def.rs) + } + if def.ds != "" { + attrs.PutStr(conventions.AttributeK8SDaemonSetName, def.ds) + } + if def.ss != "" { + attrs.PutStr(conventions.AttributeK8SStatefulSetName, def.ss) + } + if def.job != "" { + attrs.PutStr(conventions.AttributeK8SJobName, def.job) + } + if def.cronjob != "" { + attrs.PutStr(conventions.AttributeK8SCronJobName, def.cronjob) + } + if def.ns != "" { + attrs.PutStr(conventions.AttributeK8SNamespaceName, def.ns) + } + return resource +} + +func makeResourceWithJobInstanceScheme(def *jobInstanceDefinition, hasHost bool) pcommon.Resource { + resource := pcommon.NewResource() + attrs := resource.Attributes() + // Using hardcoded values to assert on outward expectations so that + // when variables change, these tests will fail and we'll have reports. + attrs.PutStr("service.name", def.job) + if hasHost { + attrs.PutStr("net.host.name", def.host) + } + attrs.PutStr("service.instance.id", def.instance) + attrs.PutStr("net.host.port", def.port) + attrs.PutStr("http.scheme", def.scheme) + return resource +} + +func TestCreateNodeAndResourcePromToOTLP(t *testing.T) { + tests := []struct { + name, job string + instance string + sdLabels labels.Labels + want pcommon.Resource + }{ + { + name: "all attributes proper", + job: "job", instance: "hostname:8888", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "http"}), + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, true), + }, + { + name: "missing port", + job: "job", instance: "myinstance", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "https"}), + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "myinstance", "myinstance", "https", "", + }, true), + }, + { + name: "blank scheme", + job: "job", instance: "myinstance:443", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: ""}), + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "myinstance:443", "myinstance", "", "443", + }, true), + }, + { + name: "blank instance, blank scheme", + job: "job", instance: "", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: ""}), + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "", "", "", "", + }, true), + }, + { + name: "blank instance, non-blank scheme", + job: "job", instance: "", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "http"}), + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "", "", "http", "", + }, true), + }, + { + name: "0.0.0.0 address", + job: "job", instance: "0.0.0.0:8888", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "http"}), + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "0.0.0.0:8888", "", "http", "8888", + }, false), + }, + { + name: "localhost", + job: "job", instance: "localhost:8888", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "http"}), + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "localhost:8888", "", "http", "8888", + }, false), + }, + { + name: "kubernetes daemonset pod", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_pod_name", Value: "my-pod-23491"}, + labels.Label{Name: "__meta_kubernetes_pod_uid", Value: "84279wretgu89dg489q2"}, + labels.Label{Name: "__meta_kubernetes_pod_container_name", Value: "my-container"}, + labels.Label{Name: "__meta_kubernetes_pod_node_name", Value: "k8s-node-123"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_name", Value: "my-pod"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_kind", Value: "DaemonSet"}, + labels.Label{Name: "__meta_kubernetes_namespace", Value: "kube-system"}, + ), + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + podName: "my-pod-23491", + podUID: "84279wretgu89dg489q2", + container: "my-container", + node: "k8s-node-123", + ds: "my-pod", + ns: "kube-system", + }), + }, + { + name: "kubernetes replicaset pod", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_pod_name", Value: "my-pod-23491"}, + labels.Label{Name: "__meta_kubernetes_pod_uid", Value: "84279wretgu89dg489q2"}, + labels.Label{Name: "__meta_kubernetes_pod_container_name", Value: "my-container"}, + labels.Label{Name: "__meta_kubernetes_pod_node_name", Value: "k8s-node-123"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_name", Value: "my-pod"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_kind", Value: "ReplicaSet"}, + labels.Label{Name: "__meta_kubernetes_namespace", Value: "kube-system"}, + ), + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + podName: "my-pod-23491", + podUID: "84279wretgu89dg489q2", + container: "my-container", + node: "k8s-node-123", + rs: "my-pod", + ns: "kube-system", + }), + }, + { + name: "kubernetes statefulset pod", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_pod_name", Value: "my-pod-23491"}, + labels.Label{Name: "__meta_kubernetes_pod_uid", Value: "84279wretgu89dg489q2"}, + labels.Label{Name: "__meta_kubernetes_pod_container_name", Value: "my-container"}, + labels.Label{Name: "__meta_kubernetes_pod_node_name", Value: "k8s-node-123"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_name", Value: "my-pod"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_kind", Value: "StatefulSet"}, + labels.Label{Name: "__meta_kubernetes_namespace", Value: "kube-system"}, + ), + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + podName: "my-pod-23491", + podUID: "84279wretgu89dg489q2", + container: "my-container", + node: "k8s-node-123", + ss: "my-pod", + ns: "kube-system", + }), + }, + { + name: "kubernetes job pod", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_pod_name", Value: "my-pod-23491"}, + labels.Label{Name: "__meta_kubernetes_pod_uid", Value: "84279wretgu89dg489q2"}, + labels.Label{Name: "__meta_kubernetes_pod_container_name", Value: "my-container"}, + labels.Label{Name: "__meta_kubernetes_pod_node_name", Value: "k8s-node-123"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_name", Value: "my-pod"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_kind", Value: "Job"}, + labels.Label{Name: "__meta_kubernetes_namespace", Value: "kube-system"}, + ), + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + podName: "my-pod-23491", + podUID: "84279wretgu89dg489q2", + container: "my-container", + node: "k8s-node-123", + job: "my-pod", + ns: "kube-system", + }), + }, + { + name: "kubernetes cronjob pod", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_pod_name", Value: "my-pod-23491"}, + labels.Label{Name: "__meta_kubernetes_pod_uid", Value: "84279wretgu89dg489q2"}, + labels.Label{Name: "__meta_kubernetes_pod_container_name", Value: "my-container"}, + labels.Label{Name: "__meta_kubernetes_pod_node_name", Value: "k8s-node-123"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_name", Value: "my-pod"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_kind", Value: "CronJob"}, + labels.Label{Name: "__meta_kubernetes_namespace", Value: "kube-system"}, + ), + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + podName: "my-pod-23491", + podUID: "84279wretgu89dg489q2", + container: "my-container", + node: "k8s-node-123", + cronjob: "my-pod", + ns: "kube-system", + }), + }, + { + name: "kubernetes node (e.g. kubelet)", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_node_name", Value: "k8s-node-123"}, + ), + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + node: "k8s-node-123", + }), + }, + { + name: "kubernetes service endpoint", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_endpoint_node_name", Value: "k8s-node-123"}, + ), + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + node: "k8s-node-123", + }), + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + got := CreateResource(tt.job, tt.instance, tt.sdLabels) + require.Equal(t, tt.want.Attributes().AsRaw(), got.Attributes().AsRaw()) + }) + } +} diff --git a/collector/receiver/prometheusreceiver/internal/staleness_end_to_end_test.go b/collector/receiver/prometheusreceiver/internal/staleness_end_to_end_test.go new file mode 100644 index 0000000..24a17d1 --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/staleness_end_to_end_test.go @@ -0,0 +1,251 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal_test + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/prompb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/confmap/provider/fileprovider" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/otelcol" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/batchprocessor" + "go.opentelemetry.io/collector/receiver" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver" + + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter" +) + +// Test that staleness markers are emitted for timeseries that intermittently disappear. +// This test runs the entire collector and end-to-end scrapes then checks with the +// Prometheus remotewrite exporter that staleness markers are emitted per timeseries. +// See https://github.com/open-telemetry/opentelemetry-collector/issues/3413 +func TestStalenessMarkersEndToEnd(t *testing.T) { + if testing.Short() { + t.Skip("This test can take a long time") + } + + ctx, cancel := context.WithCancel(context.Background()) + + // 1. Setup the server that sends series that intermittently appear and disappear. + n := &atomic.Uint64{} + scrapeServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + // Increment the scrape count atomically per scrape. + i := n.Add(1) + + select { + case <-ctx.Done(): + return + default: + } + + // Alternate metrics per scrape so that every one of + // them will be reported as stale. + if i%2 == 0 { + fmt.Fprintf(rw, ` +# HELP jvm_memory_bytes_used Used bytes of a given JVM memory area. +# TYPE jvm_memory_bytes_used gauge +jvm_memory_bytes_used{area="heap"} %.1f`, float64(i)) + } else { + fmt.Fprintf(rw, ` +# HELP jvm_memory_pool_bytes_used Used bytes of a given JVM memory pool. +# TYPE jvm_memory_pool_bytes_used gauge +jvm_memory_pool_bytes_used{pool="CodeHeap 'non-nmethods'"} %.1f`, float64(i)) + } + })) + defer scrapeServer.Close() + + serverURL, err := url.Parse(scrapeServer.URL) + require.NoError(t, err) + + // 2. Set up the Prometheus RemoteWrite endpoint. + prweUploads := make(chan *prompb.WriteRequest) + prweServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + // Snappy decode the uploads. + payload, rerr := io.ReadAll(req.Body) + require.NoError(t, rerr) + + recv := make([]byte, len(payload)) + decoded, derr := snappy.Decode(recv, payload) + require.NoError(t, derr) + + writeReq := new(prompb.WriteRequest) + require.NoError(t, proto.Unmarshal(decoded, writeReq)) + + select { + case <-ctx.Done(): + return + case prweUploads <- writeReq: + } + })) + defer prweServer.Close() + + // 3. Set the OpenTelemetry Prometheus receiver. + cfg := fmt.Sprintf(` +receivers: + prometheus: + config: + scrape_configs: + - job_name: 'test' + scrape_interval: 2ms + static_configs: + - targets: [%q] + +processors: + batch: +exporters: + prometheusremotewrite: + endpoint: %q + tls: + insecure: true + +service: + pipelines: + metrics: + receivers: [prometheus] + processors: [batch] + exporters: [prometheusremotewrite]`, serverURL.Host, prweServer.URL) + + confFile, err := os.CreateTemp(os.TempDir(), "conf-") + require.Nil(t, err) + defer os.Remove(confFile.Name()) + _, err = confFile.Write([]byte(cfg)) + require.Nil(t, err) + // 4. Run the OpenTelemetry Collector. + receivers, err := receiver.MakeFactoryMap(prometheusreceiver.NewFactory()) + require.Nil(t, err) + exporters, err := exporter.MakeFactoryMap(prometheusremotewriteexporter.NewFactory()) + require.Nil(t, err) + processors, err := processor.MakeFactoryMap(batchprocessor.NewFactory()) + require.Nil(t, err) + + factories := otelcol.Factories{ + Receivers: receivers, + Exporters: exporters, + Processors: processors, + } + + fmp := fileprovider.New() + configProvider, err := otelcol.NewConfigProvider( + otelcol.ConfigProviderSettings{ + ResolverSettings: confmap.ResolverSettings{ + URIs: []string{confFile.Name()}, + Providers: map[string]confmap.Provider{fmp.Scheme(): fmp}, + }, + }) + require.NoError(t, err) + + appSettings := otelcol.CollectorSettings{ + Factories: factories, + ConfigProvider: configProvider, + BuildInfo: component.BuildInfo{ + Command: "otelcol", + Description: "OpenTelemetry Collector", + Version: "tests", + }, + LoggingOptions: []zap.Option{ + // Turn off the verbose logging from the collector. + zap.WrapCore(func(zapcore.Core) zapcore.Core { + return zapcore.NewNopCore() + }), + }, + } + + app, err := otelcol.NewCollector(appSettings) + require.Nil(t, err) + + go func() { + assert.NoError(t, app.Run(context.Background())) + }() + defer app.Shutdown() + + // Wait until the collector has actually started. + for notYetStarted := true; notYetStarted; { + state := app.GetState() + switch state { + case otelcol.StateRunning, otelcol.StateClosed, otelcol.StateClosing: + notYetStarted = false + } + time.Sleep(10 * time.Millisecond) + } + + // 5. Let's wait on 10 fetches. + var wReqL []*prompb.WriteRequest + for i := 0; i < 10; i++ { + wReqL = append(wReqL, <-prweUploads) + } + defer cancel() + + // 6. Assert that we encounter the stale markers aka special NaNs for the various time series. + staleMarkerCount := 0 + totalSamples := 0 + require.True(t, len(wReqL) > 0, "Expecting at least one WriteRequest") + for i, wReq := range wReqL { + name := fmt.Sprintf("WriteRequest#%d", i) + require.True(t, len(wReq.Timeseries) > 0, "Expecting at least 1 timeSeries for:: "+name) + for j, ts := range wReq.Timeseries { + fullName := fmt.Sprintf("%s/TimeSeries#%d", name, j) + assert.True(t, len(ts.Samples) > 0, "Expected at least 1 Sample in:: "+fullName) + + // We are strictly counting series directly included in the scrapes, and no + // internal timeseries like "up" nor "scrape_seconds" etc. + metricName := "" + for _, label := range ts.Labels { + if label.Name == "__name__" { + metricName = label.Value + } + } + if !strings.HasPrefix(metricName, "jvm") { + continue + } + + for _, sample := range ts.Samples { + totalSamples++ + if value.IsStaleNaN(sample.Value) { + staleMarkerCount++ + } + } + } + } + + require.True(t, totalSamples > 0, "Expected at least 1 sample") + // On every alternative scrape the prior scrape will be reported as sale. + // Expect at least: + // * The first scrape will NOT return stale markers + // * (N-1 / alternatives) = ((10-1) / 2) = ~40% chance of stale markers being emitted. + chance := float64(staleMarkerCount) / float64(totalSamples) + require.True(t, chance >= 0.4, fmt.Sprintf("Expected at least one stale marker: %.3f", chance)) +} diff --git a/collector/receiver/prometheusreceiver/internal/starttimemetricadjuster.go b/collector/receiver/prometheusreceiver/internal/starttimemetricadjuster.go new file mode 100644 index 0000000..ddb5660 --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/starttimemetricadjuster.go @@ -0,0 +1,128 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" + +import ( + "errors" + "regexp" + + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" +) + +var ( + errNoStartTimeMetrics = errors.New("start_time metric is missing") + errNoDataPointsStartTimeMetric = errors.New("start time metric with no data points") + errUnsupportedTypeStartTimeMetric = errors.New("unsupported data type for start time metric") +) + +type startTimeMetricAdjuster struct { + startTimeMetricRegex *regexp.Regexp + logger *zap.Logger +} + +// NewStartTimeMetricAdjuster returns a new MetricsAdjuster that adjust metrics' start times based on a start time metric. +func NewStartTimeMetricAdjuster(logger *zap.Logger, startTimeMetricRegex *regexp.Regexp) MetricsAdjuster { + return &startTimeMetricAdjuster{ + startTimeMetricRegex: startTimeMetricRegex, + logger: logger, + } +} + +func (stma *startTimeMetricAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { + startTime, err := stma.getStartTime(metrics) + if err != nil { + return err + } + + startTimeTs := timestampFromFloat64(startTime) + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rm := metrics.ResourceMetrics().At(i) + for j := 0; j < rm.ScopeMetrics().Len(); j++ { + ilm := rm.ScopeMetrics().At(j) + for k := 0; k < ilm.Metrics().Len(); k++ { + metric := ilm.Metrics().At(k) + switch metric.Type() { + case pmetric.MetricTypeGauge: + continue + + case pmetric.MetricTypeSum: + dataPoints := metric.Sum().DataPoints() + for l := 0; l < dataPoints.Len(); l++ { + dp := dataPoints.At(l) + dp.SetStartTimestamp(startTimeTs) + } + + case pmetric.MetricTypeSummary: + dataPoints := metric.Summary().DataPoints() + for l := 0; l < dataPoints.Len(); l++ { + dp := dataPoints.At(l) + dp.SetStartTimestamp(startTimeTs) + } + + case pmetric.MetricTypeHistogram: + dataPoints := metric.Histogram().DataPoints() + for l := 0; l < dataPoints.Len(); l++ { + dp := dataPoints.At(l) + dp.SetStartTimestamp(startTimeTs) + } + + default: + stma.logger.Warn("Unknown metric type", zap.String("type", metric.Type().String())) + } + } + } + } + + return nil +} + +func (stma *startTimeMetricAdjuster) getStartTime(metrics pmetric.Metrics) (float64, error) { + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rm := metrics.ResourceMetrics().At(i) + for j := 0; j < rm.ScopeMetrics().Len(); j++ { + ilm := rm.ScopeMetrics().At(j) + for k := 0; k < ilm.Metrics().Len(); k++ { + metric := ilm.Metrics().At(k) + if stma.matchStartTimeMetric(metric.Name()) { + switch metric.Type() { + case pmetric.MetricTypeGauge: + if metric.Gauge().DataPoints().Len() == 0 { + return 0.0, errNoDataPointsStartTimeMetric + } + return metric.Gauge().DataPoints().At(0).DoubleValue(), nil + + case pmetric.MetricTypeSum: + if metric.Sum().DataPoints().Len() == 0 { + return 0.0, errNoDataPointsStartTimeMetric + } + return metric.Sum().DataPoints().At(0).DoubleValue(), nil + + default: + return 0, errUnsupportedTypeStartTimeMetric + } + } + } + } + } + return 0.0, errNoStartTimeMetrics +} +func (stma *startTimeMetricAdjuster) matchStartTimeMetric(metricName string) bool { + if stma.startTimeMetricRegex != nil { + return stma.startTimeMetricRegex.MatchString(metricName) + } + + return metricName == startTimeMetricName +} diff --git a/collector/receiver/prometheusreceiver/internal/starttimemetricadjuster_test.go b/collector/receiver/prometheusreceiver/internal/starttimemetricadjuster_test.go new file mode 100644 index 0000000..0d4e1c6 --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/starttimemetricadjuster_test.go @@ -0,0 +1,159 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" +) + +func TestStartTimeMetricMatch(t *testing.T) { + const startTime = pcommon.Timestamp(123 * 1e9) + const currentTime = pcommon.Timestamp(126 * 1e9) + const matchBuilderStartTime = 124 + + tests := []struct { + name string + inputs pmetric.Metrics + startTimeMetricRegex *regexp.Regexp + expectedStartTime pcommon.Timestamp + expectedErr error + }{ + { + name: "regexp_match_sum_metric", + inputs: metrics( + sumMetric("test_sum_metric", doublePoint(nil, startTime, currentTime, 16)), + histogramMetric("test_histogram_metric", histogramPoint(nil, startTime, currentTime, []float64{1, 2}, []uint64{2, 3, 4})), + summaryMetric("test_summary_metric", summaryPoint(nil, startTime, currentTime, 10, 100, []float64{10, 50, 90}, []float64{9, 15, 48})), + sumMetric("example_process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime)), + sumMetric("process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime+1)), + ), + startTimeMetricRegex: regexp.MustCompile("^.*_process_start_time_seconds$"), + expectedStartTime: timestampFromFloat64(matchBuilderStartTime), + }, + { + name: "match_default_sum_start_time_metric", + inputs: metrics( + sumMetric("test_sum_metric", doublePoint(nil, startTime, currentTime, 16)), + histogramMetric("test_histogram_metric", histogramPoint(nil, startTime, currentTime, []float64{1, 2}, []uint64{2, 3, 4})), + summaryMetric("test_summary_metric", summaryPoint(nil, startTime, currentTime, 10, 100, []float64{10, 50, 90}, []float64{9, 15, 48})), + sumMetric("example_process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime)), + sumMetric("process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime+1)), + ), + expectedStartTime: timestampFromFloat64(matchBuilderStartTime + 1), + }, + { + name: "regexp_match_gauge_metric", + inputs: metrics( + sumMetric("test_sum_metric", doublePoint(nil, startTime, currentTime, 16)), + histogramMetric("test_histogram_metric", histogramPoint(nil, startTime, currentTime, []float64{1, 2}, []uint64{2, 3, 4})), + summaryMetric("test_summary_metric", summaryPoint(nil, startTime, currentTime, 10, 100, []float64{10, 50, 90}, []float64{9, 15, 48})), + gaugeMetric("example_process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime)), + gaugeMetric("process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime+1)), + ), + startTimeMetricRegex: regexp.MustCompile("^.*_process_start_time_seconds$"), + expectedStartTime: timestampFromFloat64(matchBuilderStartTime), + }, + { + name: "match_default_gauge_start_time_metric", + inputs: metrics( + sumMetric("test_sum_metric", doublePoint(nil, startTime, currentTime, 16)), + histogramMetric("test_histogram_metric", histogramPoint(nil, startTime, currentTime, []float64{1, 2}, []uint64{2, 3, 4})), + summaryMetric("test_summary_metric", summaryPoint(nil, startTime, currentTime, 10, 100, []float64{10, 50, 90}, []float64{9, 15, 48})), + gaugeMetric("example_process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime)), + gaugeMetric("process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime+1)), + ), + expectedStartTime: timestampFromFloat64(matchBuilderStartTime + 1), + }, + { + name: "empty gauge start time metrics", + inputs: metrics( + gaugeMetric("process_start_time_seconds"), + ), + expectedErr: errNoDataPointsStartTimeMetric, + }, + { + name: "empty sum start time metrics", + inputs: metrics( + sumMetric("process_start_time_seconds"), + ), + expectedErr: errNoDataPointsStartTimeMetric, + }, + { + name: "unsupported type start time metric", + inputs: metrics( + histogramMetric("process_start_time_seconds"), + ), + expectedErr: errUnsupportedTypeStartTimeMetric, + }, + { + name: "regexp_nomatch", + inputs: metrics( + sumMetric("subprocess_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime)), + ), + startTimeMetricRegex: regexp.MustCompile("^.+_process_start_time_seconds$"), + expectedErr: errNoStartTimeMetrics, + }, + { + name: "nomatch_default_start_time_metric", + inputs: metrics( + gaugeMetric("subprocess_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime)), + ), + expectedErr: errNoStartTimeMetrics, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stma := NewStartTimeMetricAdjuster(zap.NewNop(), tt.startTimeMetricRegex) + if tt.expectedErr != nil { + assert.ErrorIs(t, stma.AdjustMetrics(tt.inputs), tt.expectedErr) + return + } + assert.NoError(t, stma.AdjustMetrics(tt.inputs)) + for i := 0; i < tt.inputs.ResourceMetrics().Len(); i++ { + rm := tt.inputs.ResourceMetrics().At(i) + for j := 0; j < rm.ScopeMetrics().Len(); j++ { + ilm := rm.ScopeMetrics().At(j) + for k := 0; k < ilm.Metrics().Len(); k++ { + metric := ilm.Metrics().At(k) + switch metric.Type() { + case pmetric.MetricTypeSum: + dps := metric.Sum().DataPoints() + for l := 0; l < dps.Len(); l++ { + assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) + } + case pmetric.MetricTypeSummary: + dps := metric.Summary().DataPoints() + for l := 0; l < dps.Len(); l++ { + assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) + } + case pmetric.MetricTypeHistogram: + dps := metric.Histogram().DataPoints() + for l := 0; l < dps.Len(); l++ { + assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) + } + } + } + } + } + }) + } +} diff --git a/collector/receiver/prometheusreceiver/internal/transaction.go b/collector/receiver/prometheusreceiver/internal/transaction.go new file mode 100644 index 0000000..df6b95d --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/transaction.go @@ -0,0 +1,293 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" + +import ( + "context" + "errors" + "fmt" + "sort" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/scrape" + "github.com/prometheus/prometheus/storage" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver" + "go.uber.org/zap" + + prometheustranslator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" +) + +const ( + targetMetricName = "target_info" +) + +type transaction struct { + isNew bool + ctx context.Context + families map[string]*metricFamily + mc scrape.MetricMetadataStore + sink consumer.Metrics + externalLabels labels.Labels + nodeResource pcommon.Resource + logger *zap.Logger + metricAdjuster MetricsAdjuster + obsrecv *obsreport.Receiver + // Used as buffer to calculate series ref hash. + bufBytes []byte + normalizer *prometheustranslator.Normalizer + preserveUntyped bool +} + +func newTransaction( + ctx context.Context, + metricAdjuster MetricsAdjuster, + sink consumer.Metrics, + externalLabels labels.Labels, + settings receiver.CreateSettings, + obsrecv *obsreport.Receiver, + registry *featuregate.Registry, + preserveUntyped bool) *transaction { + return &transaction{ + ctx: ctx, + families: make(map[string]*metricFamily), + isNew: true, + sink: sink, + metricAdjuster: metricAdjuster, + externalLabels: externalLabels, + logger: settings.Logger, + obsrecv: obsrecv, + bufBytes: make([]byte, 0, 1024), + normalizer: prometheustranslator.NewNormalizer(registry), + preserveUntyped: preserveUntyped, + } +} + +// Append always returns 0 to disable label caching. +func (t *transaction) Append(ref storage.SeriesRef, ls labels.Labels, atMs int64, val float64) (storage.SeriesRef, error) { + select { + case <-t.ctx.Done(): + return 0, errTransactionAborted + default: + } + + if len(t.externalLabels) != 0 { + ls = append(ls, t.externalLabels...) + sort.Sort(ls) + } + + if t.isNew { + if err := t.initTransaction(ls); err != nil { + return 0, err + } + } + + // Any datapoint with duplicate labels MUST be rejected per: + // * https://github.com/open-telemetry/wg-prometheus/issues/44 + // * https://github.com/open-telemetry/opentelemetry-collector/issues/3407 + // as Prometheus rejects such too as of version 2.16.0, released on 2020-02-13. + if dupLabel, hasDup := ls.HasDuplicateLabelNames(); hasDup { + return 0, fmt.Errorf("invalid sample: non-unique label names: %q", dupLabel) + } + + metricName := ls.Get(model.MetricNameLabel) + if metricName == "" { + return 0, errMetricNameNotFound + } + + // See https://www.prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series + // up: 1 if the instance is healthy, i.e. reachable, or 0 if the scrape failed. + // But it can also be a staleNaN, which is inserted when the target goes away. + if metricName == scrapeUpMetricName && val != 1.0 && !value.IsStaleNaN(val) { + if val == 0.0 { + t.logger.Warn("Failed to scrape Prometheus endpoint", + zap.Int64("scrape_timestamp", atMs), + zap.Stringer("target_labels", ls)) + } else { + t.logger.Warn("The 'up' metric contains invalid value", + zap.Float64("value", val), + zap.Int64("scrape_timestamp", atMs), + zap.Stringer("target_labels", ls)) + } + } + + // For the `target_info` metric we need to convert it to resource attributes. + if metricName == targetMetricName { + return 0, t.AddTargetInfo(ls) + } + + curMF := t.getOrCreateMetricFamily(metricName) + + return 0, curMF.addSeries(t.getSeriesRef(ls, curMF.mtype), metricName, ls, atMs, val) +} + +func (t *transaction) getOrCreateMetricFamily(mn string) *metricFamily { + curMf, ok := t.families[mn] + if !ok { + fn := mn + if _, ok := t.mc.GetMetadata(mn); !ok { + fn = normalizeMetricName(mn) + } + if mf, ok := t.families[fn]; ok && mf.includesMetric(mn) { + curMf = mf + } else { + curMf = newMetricFamily(mn, t.mc, t.logger, t.preserveUntyped) + t.families[curMf.name] = curMf + } + } + return curMf +} + +func (t *transaction) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + select { + case <-t.ctx.Done(): + return 0, errTransactionAborted + default: + } + + if t.isNew { + if err := t.initTransaction(l); err != nil { + return 0, err + } + } + + l = l.WithoutEmpty() + + if dupLabel, hasDup := l.HasDuplicateLabelNames(); hasDup { + return 0, fmt.Errorf("invalid sample: non-unique label names: %q", dupLabel) + } + + mn := l.Get(model.MetricNameLabel) + if mn == "" { + return 0, errMetricNameNotFound + } + + mf := t.getOrCreateMetricFamily(mn) + mf.addExemplar(t.getSeriesRef(l, mf.mtype), e) + + return 0, nil +} + +func (t *transaction) AppendHistogram(ref storage.SeriesRef, l labels.Labels, atMs int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + //TODO: implement this func + return 0, nil +} + +func (t *transaction) getSeriesRef(ls labels.Labels, mtype pmetric.MetricType) uint64 { + var hash uint64 + hash, t.bufBytes = getSeriesRef(t.bufBytes, ls, mtype) + return hash +} + +// getMetrics returns all metrics to the given slice. +// The only error returned by this function is errNoDataToBuild. +func (t *transaction) getMetrics(resource pcommon.Resource) (pmetric.Metrics, error) { + if len(t.families) == 0 { + return pmetric.Metrics{}, errNoDataToBuild + } + + md := pmetric.NewMetrics() + rms := md.ResourceMetrics().AppendEmpty() + resource.CopyTo(rms.Resource()) + metrics := rms.ScopeMetrics().AppendEmpty().Metrics() + + for _, mf := range t.families { + mf.appendMetric(metrics, t.normalizer) + } + + return md, nil +} + +func (t *transaction) initTransaction(labels labels.Labels) error { + target, ok := scrape.TargetFromContext(t.ctx) + if !ok { + return errors.New("unable to find target in context") + } + t.mc, ok = scrape.MetricMetadataStoreFromContext(t.ctx) + if !ok { + return errors.New("unable to find MetricMetadataStore in context") + } + + job, instance := labels.Get(model.JobLabel), labels.Get(model.InstanceLabel) + if job == "" || instance == "" { + return errNoJobInstance + } + t.nodeResource = CreateResource(job, instance, target.DiscoveredLabels()) + t.isNew = false + return nil +} + +func (t *transaction) Commit() error { + if t.isNew { + return nil + } + + ctx := t.obsrecv.StartMetricsOp(t.ctx) + md, err := t.getMetrics(t.nodeResource) + if err != nil { + t.obsrecv.EndMetricsOp(ctx, dataformat, 0, err) + return err + } + + numPoints := md.DataPointCount() + if numPoints == 0 { + return nil + } + + if err = t.metricAdjuster.AdjustMetrics(md); err != nil { + t.obsrecv.EndMetricsOp(ctx, dataformat, numPoints, err) + return err + } + + err = t.sink.ConsumeMetrics(ctx, md) + t.obsrecv.EndMetricsOp(ctx, dataformat, numPoints, err) + return err +} + +func (t *transaction) Rollback() error { + return nil +} + +func (t *transaction) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { + //TODO: implement this func + return 0, nil +} + +func (t *transaction) AddTargetInfo(labels labels.Labels) error { + attrs := t.nodeResource.Attributes() + + for _, lbl := range labels { + if lbl.Name == model.JobLabel || lbl.Name == model.InstanceLabel || lbl.Name == model.MetricNameLabel { + continue + } + + attrs.PutStr(lbl.Name, lbl.Value) + } + + return nil +} + +func getSeriesRef(bytes []byte, ls labels.Labels, mtype pmetric.MetricType) (uint64, []byte) { + return ls.HashWithoutLabels(bytes, getSortedNotUsefulLabels(mtype)...) +} diff --git a/collector/receiver/prometheusreceiver/internal/transaction_test.go b/collector/receiver/prometheusreceiver/internal/transaction_test.go new file mode 100644 index 0000000..f8719f2 --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/transaction_test.go @@ -0,0 +1,1489 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/scrape" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/receivertest" +) + +const ( + startTimestamp = pcommon.Timestamp(1555366608340000000) + ts = int64(1555366610000) + interval = int64(15 * 1000) + tsNanos = pcommon.Timestamp(ts * 1e6) + tsPlusIntervalNanos = pcommon.Timestamp((ts + interval) * 1e6) +) + +var ( + target = scrape.NewTarget( + // processedLabels contain label values after processing (e.g. relabeling) + labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + }), + // discoveredLabels contain labels prior to any processing + labels.FromMap(map[string]string{ + model.AddressLabel: "address:8080", + model.SchemeLabel: "http", + }), + nil) + + scrapeCtx = scrape.ContextWithMetricMetadataStore( + scrape.ContextWithTarget(context.Background(), target), + testMetadataStore(testMetadata)) +) + +func TestTransactionCommitWithoutAdding(t *testing.T) { + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + assert.NoError(t, tr.Commit()) +} + +func TestTransactionRollbackDoesNothing(t *testing.T) { + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + assert.NoError(t, tr.Rollback()) +} + +func TestTransactionUpdateMetadataDoesNothing(t *testing.T) { + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + _, err := tr.UpdateMetadata(0, labels.New(), metadata.Metadata{}) + assert.NoError(t, err) +} + +func TestTransactionAppendNoTarget(t *testing.T) { + badLabels := labels.FromStrings(model.MetricNameLabel, "counter_test") + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + _, err := tr.Append(0, badLabels, time.Now().Unix()*1000, 1.0) + assert.Error(t, err) +} + +func TestTransactionAppendNoMetricName(t *testing.T) { + jobNotFoundLb := labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test2", + }) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + _, err := tr.Append(0, jobNotFoundLb, time.Now().Unix()*1000, 1.0) + assert.ErrorIs(t, err, errMetricNameNotFound) + + assert.ErrorIs(t, tr.Commit(), errNoDataToBuild) +} + +func TestTransactionAppendEmptyMetricName(t *testing.T) { + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + _, err := tr.Append(0, labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test2", + model.MetricNameLabel: "", + }), time.Now().Unix()*1000, 1.0) + assert.ErrorIs(t, err, errMetricNameNotFound) +} + +func TestTransactionAppendResource(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + _, err := tr.Append(0, labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test", + model.MetricNameLabel: "counter_test", + }), time.Now().Unix()*1000, 1.0) + assert.NoError(t, err) + _, err = tr.Append(0, labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test", + model.MetricNameLabel: startTimeMetricName, + }), time.Now().UnixMilli(), 1.0) + assert.NoError(t, err) + assert.NoError(t, tr.Commit()) + expectedResource := CreateResource("test", "localhost:8080", labels.FromStrings(model.SchemeLabel, "http")) + mds := sink.AllMetrics() + require.Len(t, mds, 1) + gotResource := mds[0].ResourceMetrics().At(0).Resource() + require.Equal(t, expectedResource, gotResource) +} + +func TestTransactionCommitErrorWhenAdjusterError(t *testing.T) { + goodLabels := labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test", + model.MetricNameLabel: "counter_test", + }) + sink := new(consumertest.MetricsSink) + adjusterErr := errors.New("adjuster error") + tr := newTransaction(scrapeCtx, &errorAdjuster{err: adjusterErr}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + _, err := tr.Append(0, goodLabels, time.Now().Unix()*1000, 1.0) + assert.NoError(t, err) + assert.ErrorIs(t, tr.Commit(), adjusterErr) +} + +// Ensure that we reject duplicate label keys. See https://github.com/open-telemetry/wg-prometheus/issues/44. +func TestTransactionAppendDuplicateLabels(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + + dupLabels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "counter_test", + "a", "1", + "a", "6", + "z", "9", + ) + + _, err := tr.Append(0, dupLabels, 1917, 1.0) + require.Error(t, err) + assert.Contains(t, err.Error(), `invalid sample: non-unique label names: "a"`) +} + +func TestTransactionAppendHistogramNoLe(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + + goodLabels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "hist_test_bucket", + ) + + _, err := tr.Append(0, goodLabels, 1917, 1.0) + require.ErrorIs(t, err, errEmptyLeLabel) +} + +func TestTransactionAppendSummaryNoQuantile(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + + goodLabels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "summary_test", + ) + + _, err := tr.Append(0, goodLabels, 1917, 1.0) + require.ErrorIs(t, err, errEmptyQuantileLabel) +} + +func TestAppendExemplarWithNoMetricName(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + + labels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + ) + + _, err := tr.AppendExemplar(0, labels, exemplar.Exemplar{Value: 0}) + assert.Equal(t, errMetricNameNotFound, err) +} + +func TestAppendExemplarWithEmptyMetricName(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + + labels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "", + ) + _, err := tr.AppendExemplar(0, labels, exemplar.Exemplar{Value: 0}) + assert.Equal(t, errMetricNameNotFound, err) +} + +func TestAppendExemplarWithDuplicateLabels(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + + labels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "", + "a", "b", + "a", "c", + ) + _, err := tr.AppendExemplar(0, labels, exemplar.Exemplar{Value: 0}) + require.Error(t, err) + assert.Contains(t, err.Error(), `invalid sample: non-unique label names: "a"`) +} + +func TestAppendExemplarWithoutAddingMetric(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + + labels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "counter_test", + "a", "b", + ) + _, err := tr.AppendExemplar(0, labels, exemplar.Exemplar{Value: 0}) + assert.NoError(t, err) +} + +func TestAppendExemplarWithNoLabels(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + + _, err := tr.AppendExemplar(0, nil, exemplar.Exemplar{Value: 0}) + assert.Equal(t, errNoJobInstance, err) +} + +func TestAppendExemplarWithEmptyLabelArray(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + + _, err := tr.AppendExemplar(0, []labels.Label{}, exemplar.Exemplar{Value: 0}) + assert.Equal(t, errNoJobInstance, err) +} + +func nopObsRecv(t *testing.T) *obsreport.Receiver { + obsrecv, err := obsreport.NewReceiver(obsreport.ReceiverSettings{ + ReceiverID: component.NewID("prometheus"), + Transport: transport, + ReceiverCreateSettings: receivertest.NewNopCreateSettings(), + }) + require.NoError(t, err) + return obsrecv +} + +func TestMetricBuilderCounters(t *testing.T) { + tests := []buildTestData{ + { + name: "single-item", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("counter_test", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("counter_test") + sum := m0.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum.SetIsMonotonic(true) + pt0 := sum.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "single-item-with-exemplars", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint( + "counter_test", + 100, + []exemplar.Exemplar{ + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: model.MetricNameLabel, Value: "counter_test"}, {Name: model.JobLabel, Value: "job"}, {Name: model.InstanceLabel, Value: "instance"}, {Name: "foo", Value: "bar"}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: ""}, {Name: "span_id", Value: ""}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "10a47365b8aa04e08291fab9deca84db6170"}, {Name: "span_id", Value: "719cee4a669fd7d109ff"}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "174137cab66dc880"}, {Name: "span_id", Value: "dfa4597a9d"}}, + }, + }, + "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("counter_test") + sum := m0.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum.SetIsMonotonic(true) + pt0 := sum.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + e0 := pt0.Exemplars().AppendEmpty() + e0.SetTimestamp(timestampFromMs(1663113420863)) + e0.SetDoubleValue(1) + e0.FilteredAttributes().PutStr(model.MetricNameLabel, "counter_test") + e0.FilteredAttributes().PutStr(model.JobLabel, "job") + e0.FilteredAttributes().PutStr(model.InstanceLabel, "instance") + e0.FilteredAttributes().PutStr("foo", "bar") + + e1 := pt0.Exemplars().AppendEmpty() + e1.SetTimestamp(timestampFromMs(1663113420863)) + e1.SetDoubleValue(1) + e1.FilteredAttributes().PutStr("foo", "bar") + + e2 := pt0.Exemplars().AppendEmpty() + e2.SetTimestamp(timestampFromMs(1663113420863)) + e2.SetDoubleValue(1) + e2.FilteredAttributes().PutStr("foo", "bar") + e2.SetTraceID([16]byte{0x10, 0xa4, 0x73, 0x65, 0xb8, 0xaa, 0x04, 0xe0, 0x82, 0x91, 0xfa, 0xb9, 0xde, 0xca, 0x84, 0xdb}) + e2.SetSpanID([8]byte{0x71, 0x9c, 0xee, 0x4a, 0x66, 0x9f, 0xd7, 0xd1}) + + e3 := pt0.Exemplars().AppendEmpty() + e3.SetTimestamp(timestampFromMs(1663113420863)) + e3.SetDoubleValue(1) + e3.FilteredAttributes().PutStr("foo", "bar") + e3.SetTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x41, 0x37, 0xca, 0xb6, 0x6d, 0xc8, 0x80}) + e3.SetSpanID([8]byte{0x00, 0x00, 0x00, 0xdf, 0xa4, 0x59, 0x7a, 0x9d}) + + return []pmetric.Metrics{md0} + }, + }, + { + name: "two-items", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("counter_test", 150, nil, "foo", "bar"), + createDataPoint("counter_test", 25, nil, "foo", "other"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("counter_test") + sum := m0.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum.SetIsMonotonic(true) + pt0 := sum.DataPoints().AppendEmpty() + pt0.SetDoubleValue(150.0) + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + pt1 := sum.DataPoints().AppendEmpty() + pt1.SetDoubleValue(25.0) + pt1.SetStartTimestamp(startTimestamp) + pt1.SetTimestamp(tsNanos) + pt1.Attributes().PutStr("foo", "other") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "two-metrics", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("counter_test", 150, nil, "foo", "bar"), + createDataPoint("counter_test", 25, nil, "foo", "other"), + createDataPoint("counter_test2", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("counter_test") + sum0 := m0.SetEmptySum() + sum0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum0.SetIsMonotonic(true) + pt0 := sum0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(150.0) + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + pt1 := sum0.DataPoints().AppendEmpty() + pt1.SetDoubleValue(25.0) + pt1.SetStartTimestamp(startTimestamp) + pt1.SetTimestamp(tsNanos) + pt1.Attributes().PutStr("foo", "other") + + m1 := mL0.AppendEmpty() + m1.SetName("counter_test2") + sum1 := m1.SetEmptySum() + sum1.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum1.SetIsMonotonic(true) + pt2 := sum1.DataPoints().AppendEmpty() + pt2.SetDoubleValue(100.0) + pt2.SetStartTimestamp(startTimestamp) + pt2.SetTimestamp(tsNanos) + pt2.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "metrics-with-poor-names", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("poor_name_count", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("poor_name_count") + sum := m0.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum.SetIsMonotonic(true) + pt0 := sum.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.run(t) + }) + } +} + +func TestMetricBuilderGauges(t *testing.T) { + tests := []buildTestData{ + { + name: "one-gauge", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 100, nil, "foo", "bar"), + }, + }, + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 90, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("gauge_test") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + md1 := pmetric.NewMetrics() + mL1 := md1.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m1 := mL1.AppendEmpty() + m1.SetName("gauge_test") + gauge1 := m1.SetEmptyGauge() + pt1 := gauge1.DataPoints().AppendEmpty() + pt1.SetDoubleValue(90.0) + pt1.SetStartTimestamp(0) + pt1.SetTimestamp(tsPlusIntervalNanos) + pt1.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0, md1} + }, + }, + { + name: "one-gauge-with-exemplars", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint( + "gauge_test", + 100, + []exemplar.Exemplar{ + { + Value: 2, + Ts: 1663350815890, + Labels: []labels.Label{{Name: model.MetricNameLabel, Value: "counter_test"}, {Name: model.JobLabel, Value: "job"}, {Name: model.InstanceLabel, Value: "instance"}, {Name: "foo", Value: "bar"}}, + }, + { + Value: 2, + Ts: 1663350815890, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: ""}, {Name: "span_id", Value: ""}}, + }, + { + Value: 2, + Ts: 1663350815890, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "10a47365b8aa04e08291fab9deca84db6170"}, {Name: "span_id", Value: "719cee4a669fd7d109ff"}}, + }, + { + Value: 2, + Ts: 1663350815890, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "174137cab66dc880"}, {Name: "span_id", Value: "dfa4597a9d"}}, + }, + }, + "foo", "bar"), + }, + }, + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 90, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("gauge_test") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + e0 := pt0.Exemplars().AppendEmpty() + e0.SetTimestamp(timestampFromMs(1663350815890)) + e0.SetDoubleValue(2) + e0.FilteredAttributes().PutStr(model.MetricNameLabel, "counter_test") + e0.FilteredAttributes().PutStr(model.JobLabel, "job") + e0.FilteredAttributes().PutStr(model.InstanceLabel, "instance") + e0.FilteredAttributes().PutStr("foo", "bar") + + e1 := pt0.Exemplars().AppendEmpty() + e1.SetTimestamp(timestampFromMs(1663350815890)) + e1.SetDoubleValue(2) + e1.FilteredAttributes().PutStr("foo", "bar") + + e2 := pt0.Exemplars().AppendEmpty() + e2.SetTimestamp(timestampFromMs(1663350815890)) + e2.SetDoubleValue(2) + e2.FilteredAttributes().PutStr("foo", "bar") + e2.SetTraceID([16]byte{0x10, 0xa4, 0x73, 0x65, 0xb8, 0xaa, 0x04, 0xe0, 0x82, 0x91, 0xfa, 0xb9, 0xde, 0xca, 0x84, 0xdb}) + e2.SetSpanID([8]byte{0x71, 0x9c, 0xee, 0x4a, 0x66, 0x9f, 0xd7, 0xd1}) + + e3 := pt0.Exemplars().AppendEmpty() + e3.SetTimestamp(timestampFromMs(1663350815890)) + e3.SetDoubleValue(2) + e3.FilteredAttributes().PutStr("foo", "bar") + e3.SetTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x41, 0x37, 0xca, 0xb6, 0x6d, 0xc8, 0x80}) + e3.SetSpanID([8]byte{0x00, 0x00, 0x00, 0xdf, 0xa4, 0x59, 0x7a, 0x9d}) + + md1 := pmetric.NewMetrics() + mL1 := md1.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m1 := mL1.AppendEmpty() + m1.SetName("gauge_test") + gauge1 := m1.SetEmptyGauge() + pt1 := gauge1.DataPoints().AppendEmpty() + pt1.SetDoubleValue(90.0) + pt1.SetStartTimestamp(0) + pt1.SetTimestamp(tsPlusIntervalNanos) + pt1.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0, md1} + }, + }, + { + name: "gauge-with-different-tags", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 100, nil, "foo", "bar"), + createDataPoint("gauge_test", 200, nil, "bar", "foo"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("gauge_test") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + pt1 := gauge0.DataPoints().AppendEmpty() + pt1.SetDoubleValue(200.0) + pt1.SetStartTimestamp(0) + pt1.SetTimestamp(tsNanos) + pt1.Attributes().PutStr("bar", "foo") + + return []pmetric.Metrics{md0} + }, + }, + { + // TODO: A decision need to be made. If we want to have the behavior which can generate different tag key + // sets because metrics come and go + name: "gauge-comes-and-go-with-different-tagset", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 100, nil, "foo", "bar"), + createDataPoint("gauge_test", 200, nil, "bar", "foo"), + }, + }, + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 20, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("gauge_test") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + pt1 := gauge0.DataPoints().AppendEmpty() + pt1.SetDoubleValue(200.0) + pt1.SetStartTimestamp(0) + pt1.SetTimestamp(tsNanos) + pt1.Attributes().PutStr("bar", "foo") + + md1 := pmetric.NewMetrics() + mL1 := md1.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m1 := mL1.AppendEmpty() + m1.SetName("gauge_test") + gauge1 := m1.SetEmptyGauge() + pt2 := gauge1.DataPoints().AppendEmpty() + pt2.SetDoubleValue(20.0) + pt2.SetStartTimestamp(0) + pt2.SetTimestamp(tsPlusIntervalNanos) + pt2.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0, md1} + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.run(t) + }) + } +} + +func TestMetricBuilderUntyped(t *testing.T) { + tests := []buildTestData{ + { + name: "one-unknown", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("unknown_test", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("unknown_test") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + pt0.Attributes().PutBool(GCPOpsAgentUntypedMetricKey, true) + + return []pmetric.Metrics{md0} + }, + }, + { + name: "no-type-hint", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("something_not_exists", 100, nil, "foo", "bar"), + createDataPoint("theother_not_exists", 200, nil, "foo", "bar"), + createDataPoint("theother_not_exists", 300, nil, "bar", "foo"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("something_not_exists") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + pt0.Attributes().PutBool(GCPOpsAgentUntypedMetricKey, true) + + m1 := mL0.AppendEmpty() + m1.SetName("theother_not_exists") + gauge1 := m1.SetEmptyGauge() + pt1 := gauge1.DataPoints().AppendEmpty() + pt1.SetDoubleValue(200.0) + pt1.SetTimestamp(tsNanos) + pt1.Attributes().PutStr("foo", "bar") + pt1.Attributes().PutBool(GCPOpsAgentUntypedMetricKey, true) + + pt2 := gauge1.DataPoints().AppendEmpty() + pt2.SetDoubleValue(300.0) + pt2.SetTimestamp(tsNanos) + pt2.Attributes().PutStr("bar", "foo") + pt2.Attributes().PutBool(GCPOpsAgentUntypedMetricKey, true) + + return []pmetric.Metrics{md0} + }, + }, + { + name: "untype-metric-poor-names", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("some_count", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("some_count") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + pt0.Attributes().PutBool(GCPOpsAgentUntypedMetricKey, true) + + return []pmetric.Metrics{md0} + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.run(t) + }) + } +} + +func TestMetricBuilderHistogram(t *testing.T) { + tests := []buildTestData{ + { + name: "single item", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(10) + pt0.SetSum(99) + pt0.ExplicitBounds().FromRaw([]float64{10, 20}) + pt0.BucketCounts().FromRaw([]uint64{1, 1, 8}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "single item with exemplars", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint( + "hist_test_bucket", + 1, + []exemplar.Exemplar{ + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: model.MetricNameLabel, Value: "counter_test"}, {Name: model.JobLabel, Value: "job"}, {Name: model.InstanceLabel, Value: "instance"}, {Name: "foo", Value: "bar"}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: ""}, {Name: "span_id", Value: ""}, {Name: "le", Value: "20"}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "10a47365b8aa04e08291fab9deca84db6170"}, {Name: "traceid", Value: "e3688e1aa2961786"}, {Name: "span_id", Value: "719cee4a669fd7d109ff"}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "174137cab66dc880"}, {Name: "span_id", Value: "dfa4597a9d"}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "174137cab66dc88"}, {Name: "span_id", Value: "dfa4597a9"}}, + }, + }, + "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(10) + pt0.SetSum(99) + pt0.ExplicitBounds().FromRaw([]float64{10, 20}) + pt0.BucketCounts().FromRaw([]uint64{1, 1, 8}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + e0 := pt0.Exemplars().AppendEmpty() + e0.SetTimestamp(timestampFromMs(1663113420863)) + e0.SetDoubleValue(1) + e0.FilteredAttributes().PutStr(model.MetricNameLabel, "counter_test") + e0.FilteredAttributes().PutStr(model.JobLabel, "job") + e0.FilteredAttributes().PutStr(model.InstanceLabel, "instance") + e0.FilteredAttributes().PutStr("foo", "bar") + + e1 := pt0.Exemplars().AppendEmpty() + e1.SetTimestamp(timestampFromMs(1663113420863)) + e1.SetDoubleValue(1) + e1.FilteredAttributes().PutStr("foo", "bar") + e1.FilteredAttributes().PutStr("le", "20") + + e2 := pt0.Exemplars().AppendEmpty() + e2.SetTimestamp(timestampFromMs(1663113420863)) + e2.SetDoubleValue(1) + e2.FilteredAttributes().PutStr("foo", "bar") + e2.FilteredAttributes().PutStr("traceid", "e3688e1aa2961786") + e2.SetTraceID([16]byte{0x10, 0xa4, 0x73, 0x65, 0xb8, 0xaa, 0x04, 0xe0, 0x82, 0x91, 0xfa, 0xb9, 0xde, 0xca, 0x84, 0xdb}) + e2.SetSpanID([8]byte{0x71, 0x9c, 0xee, 0x4a, 0x66, 0x9f, 0xd7, 0xd1}) + + e3 := pt0.Exemplars().AppendEmpty() + e3.SetTimestamp(timestampFromMs(1663113420863)) + e3.SetDoubleValue(1) + e3.FilteredAttributes().PutStr("foo", "bar") + e3.SetTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x41, 0x37, 0xca, 0xb6, 0x6d, 0xc8, 0x80}) + e3.SetSpanID([8]byte{0x00, 0x00, 0x00, 0xdf, 0xa4, 0x59, 0x7a, 0x9d}) + + e4 := pt0.Exemplars().AppendEmpty() + e4.SetTimestamp(timestampFromMs(1663113420863)) + e4.SetDoubleValue(1) + e4.FilteredAttributes().PutStr("foo", "bar") + e4.FilteredAttributes().PutStr("trace_id", "174137cab66dc88") + e4.FilteredAttributes().PutStr("span_id", "dfa4597a9") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "multi-groups", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + createDataPoint("hist_test_bucket", 1, nil, "key2", "v2", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "key2", "v2", "le", "20"), + createDataPoint("hist_test_bucket", 3, nil, "key2", "v2", "le", "+inf"), + createDataPoint("hist_test_sum", 50, nil, "key2", "v2"), + createDataPoint("hist_test_count", 3, nil, "key2", "v2"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(10) + pt0.SetSum(99) + pt0.ExplicitBounds().FromRaw([]float64{10, 20}) + pt0.BucketCounts().FromRaw([]uint64{1, 1, 8}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + pt1 := hist0.DataPoints().AppendEmpty() + pt1.SetCount(3) + pt1.SetSum(50) + pt1.ExplicitBounds().FromRaw([]float64{10, 20}) + pt1.BucketCounts().FromRaw([]uint64{1, 1, 1}) + pt1.SetTimestamp(tsNanos) + pt1.SetStartTimestamp(startTimestamp) + pt1.Attributes().PutStr("key2", "v2") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "multi-groups-and-families", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + createDataPoint("hist_test_bucket", 1, nil, "key2", "v2", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "key2", "v2", "le", "20"), + createDataPoint("hist_test_bucket", 3, nil, "key2", "v2", "le", "+inf"), + createDataPoint("hist_test_sum", 50, nil, "key2", "v2"), + createDataPoint("hist_test_count", 3, nil, "key2", "v2"), + createDataPoint("hist_test2_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test2_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test2_bucket", 3, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test2_sum", 50, nil, "foo", "bar"), + createDataPoint("hist_test2_count", 3, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(10) + pt0.SetSum(99) + pt0.ExplicitBounds().FromRaw([]float64{10, 20}) + pt0.BucketCounts().FromRaw([]uint64{1, 1, 8}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + pt1 := hist0.DataPoints().AppendEmpty() + pt1.SetCount(3) + pt1.SetSum(50) + pt1.ExplicitBounds().FromRaw([]float64{10, 20}) + pt1.BucketCounts().FromRaw([]uint64{1, 1, 1}) + pt1.SetTimestamp(tsNanos) + pt1.SetStartTimestamp(startTimestamp) + pt1.Attributes().PutStr("key2", "v2") + + m1 := mL0.AppendEmpty() + m1.SetName("hist_test2") + hist1 := m1.SetEmptyHistogram() + hist1.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt2 := hist1.DataPoints().AppendEmpty() + pt2.SetCount(3) + pt2.SetSum(50) + pt2.ExplicitBounds().FromRaw([]float64{10, 20}) + pt2.BucketCounts().FromRaw([]uint64{1, 1, 1}) + pt2.SetTimestamp(tsNanos) + pt2.SetStartTimestamp(startTimestamp) + pt2.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "unordered-buckets", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(10) + pt0.SetSum(99) + pt0.ExplicitBounds().FromRaw([]float64{10, 20}) + pt0.BucketCounts().FromRaw([]uint64{1, 1, 8}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + // this won't likely happen in real env, as prometheus wont generate histogram with less than 3 buckets + name: "only-one-bucket", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 3, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_count", 3, nil, "foo", "bar"), + createDataPoint("hist_test_sum", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(3) + pt0.SetSum(100) + pt0.BucketCounts().FromRaw([]uint64{3}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + // this won't likely happen in real env, as prometheus wont generate histogram with less than 3 buckets + name: "only-one-bucket-noninf", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 3, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_count", 3, nil, "foo", "bar"), + createDataPoint("hist_test_sum", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + return []pmetric.Metrics{md0} + }, + }, + { + name: "no-sum", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 3, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_count", 3, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(3) + pt0.ExplicitBounds().FromRaw([]float64{10, 20}) + pt0.BucketCounts().FromRaw([]uint64{1, 1, 1}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "corrupted-no-buckets", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_sum", 99, nil), + createDataPoint("hist_test_count", 10, nil), + }, + }, + }, + wants: func() []pmetric.Metrics { + return []pmetric.Metrics{pmetric.NewMetrics()} + }, + }, + { + name: "corrupted-no-count", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 3, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + return []pmetric.Metrics{pmetric.NewMetrics()} + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.run(t) + }) + } +} + +func TestMetricBuilderSummary(t *testing.T) { + tests := []buildTestData{ + { + name: "no-sum-and-count", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("summary_test", 5, nil, "foo", "bar", "quantile", "1"), + }, + }, + }, + wants: func() []pmetric.Metrics { + return []pmetric.Metrics{pmetric.NewMetrics()} + }, + }, + { + name: "no-count", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("summary_test", 1, nil, "foo", "bar", "quantile", "0.5"), + createDataPoint("summary_test", 2, nil, "foo", "bar", "quantile", "0.75"), + createDataPoint("summary_test", 5, nil, "foo", "bar", "quantile", "1"), + createDataPoint("summary_test_sum", 500, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + return []pmetric.Metrics{pmetric.NewMetrics()} + }, + }, + { + name: "no-sum", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("summary_test", 1, nil, "foo", "bar", "quantile", "0.5"), + createDataPoint("summary_test", 2, nil, "foo", "bar", "quantile", "0.75"), + createDataPoint("summary_test", 5, nil, "foo", "bar", "quantile", "1"), + createDataPoint("summary_test_count", 500, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("summary_test") + sum0 := m0.SetEmptySummary() + pt0 := sum0.DataPoints().AppendEmpty() + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.SetCount(500) + pt0.SetSum(0.0) + pt0.Attributes().PutStr("foo", "bar") + qvL := pt0.QuantileValues() + q50 := qvL.AppendEmpty() + q50.SetQuantile(.50) + q50.SetValue(1.0) + q75 := qvL.AppendEmpty() + q75.SetQuantile(.75) + q75.SetValue(2.0) + q100 := qvL.AppendEmpty() + q100.SetQuantile(1) + q100.SetValue(5.0) + return []pmetric.Metrics{md0} + }, + }, + { + name: "empty-quantiles", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("summary_test_sum", 100, nil, "foo", "bar"), + createDataPoint("summary_test_count", 500, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("summary_test") + sum0 := m0.SetEmptySummary() + pt0 := sum0.DataPoints().AppendEmpty() + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.SetCount(500) + pt0.SetSum(100.0) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "regular-summary", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("summary_test", 1, nil, "foo", "bar", "quantile", "0.5"), + createDataPoint("summary_test", 2, nil, "foo", "bar", "quantile", "0.75"), + createDataPoint("summary_test", 5, nil, "foo", "bar", "quantile", "1"), + createDataPoint("summary_test_sum", 100, nil, "foo", "bar"), + createDataPoint("summary_test_count", 500, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("summary_test") + sum0 := m0.SetEmptySummary() + pt0 := sum0.DataPoints().AppendEmpty() + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.SetCount(500) + pt0.SetSum(100.0) + pt0.Attributes().PutStr("foo", "bar") + qvL := pt0.QuantileValues() + q50 := qvL.AppendEmpty() + q50.SetQuantile(.50) + q50.SetValue(1.0) + q75 := qvL.AppendEmpty() + q75.SetQuantile(.75) + q75.SetValue(2.0) + q100 := qvL.AppendEmpty() + q100.SetQuantile(1) + q100.SetValue(5.0) + + return []pmetric.Metrics{md0} + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.run(t) + }) + } + +} + +type buildTestData struct { + name string + inputs []*testScrapedPage + wants func() []pmetric.Metrics +} + +func (tt buildTestData) run(t *testing.T) { + wants := tt.wants() + assert.EqualValues(t, len(wants), len(tt.inputs)) + st := ts + for i, page := range tt.inputs { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), featuregate.GlobalRegistry(), true) + for _, pt := range page.pts { + // set ts for testing + pt.t = st + _, err := tr.Append(0, pt.lb, pt.t, pt.v) + assert.NoError(t, err) + + for _, e := range pt.exemplars { + _, err := tr.AppendExemplar(0, pt.lb, e) + assert.NoError(t, err) + } + } + assert.NoError(t, tr.Commit()) + mds := sink.AllMetrics() + if wants[i].ResourceMetrics().Len() == 0 { + // Receiver does not emit empty metrics, so will not have anything in the sink. + require.Len(t, mds, 0) + st += interval + continue + } + require.Len(t, mds, 1) + assertEquivalentMetrics(t, wants[i], mds[0]) + st += interval + } +} + +type errorAdjuster struct { + err error +} + +func (ea *errorAdjuster) AdjustMetrics(pmetric.Metrics) error { + return ea.err +} + +type startTimeAdjuster struct { + startTime pcommon.Timestamp +} + +func (s *startTimeAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rm := metrics.ResourceMetrics().At(i) + for j := 0; j < rm.ScopeMetrics().Len(); j++ { + ilm := rm.ScopeMetrics().At(j) + for k := 0; k < ilm.Metrics().Len(); k++ { + metric := ilm.Metrics().At(k) + switch metric.Type() { + case pmetric.MetricTypeSum: + dps := metric.Sum().DataPoints() + for l := 0; l < dps.Len(); l++ { + dps.At(l).SetStartTimestamp(s.startTime) + } + case pmetric.MetricTypeSummary: + dps := metric.Summary().DataPoints() + for l := 0; l < dps.Len(); l++ { + dps.At(l).SetStartTimestamp(s.startTime) + } + case pmetric.MetricTypeHistogram: + dps := metric.Histogram().DataPoints() + for l := 0; l < dps.Len(); l++ { + dps.At(l).SetStartTimestamp(s.startTime) + } + } + } + } + } + return nil +} + +type testDataPoint struct { + lb labels.Labels + t int64 + v float64 + exemplars []exemplar.Exemplar +} + +type testScrapedPage struct { + pts []*testDataPoint +} + +func createDataPoint(mname string, value float64, es []exemplar.Exemplar, tagPairs ...string) *testDataPoint { + var lbls []string + lbls = append(lbls, tagPairs...) + lbls = append(lbls, model.MetricNameLabel, mname) + lbls = append(lbls, model.JobLabel, "job") + lbls = append(lbls, model.InstanceLabel, "instance") + + return &testDataPoint{ + lb: labels.FromStrings(lbls...), + t: ts, + v: value, + exemplars: es, + } +} + +func assertEquivalentMetrics(t *testing.T, want, got pmetric.Metrics) { + require.Equal(t, want.ResourceMetrics().Len(), got.ResourceMetrics().Len()) + if want.ResourceMetrics().Len() == 0 { + return + } + for i := 0; i < want.ResourceMetrics().Len(); i++ { + wantSm := want.ResourceMetrics().At(i).ScopeMetrics() + gotSm := got.ResourceMetrics().At(i).ScopeMetrics() + require.Equal(t, wantSm.Len(), gotSm.Len()) + if wantSm.Len() == 0 { + return + } + + for j := 0; j < wantSm.Len(); j++ { + wantMs := wantSm.At(j).Metrics() + gotMs := gotSm.At(j).Metrics() + require.Equal(t, wantMs.Len(), gotMs.Len()) + + wmap := map[string]pmetric.Metric{} + gmap := map[string]pmetric.Metric{} + + for k := 0; k < wantMs.Len(); k++ { + wi := wantMs.At(k) + wmap[wi.Name()] = wi + gi := gotMs.At(k) + gmap[gi.Name()] = gi + } + assert.EqualValues(t, wmap, gmap) + } + } + +} diff --git a/collector/receiver/prometheusreceiver/internal/util.go b/collector/receiver/prometheusreceiver/internal/util.go new file mode 100644 index 0000000..ec356f3 --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/util.go @@ -0,0 +1,137 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" + +import ( + "errors" + "sort" + "strconv" + "strings" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +const ( + metricsSuffixCount = "_count" + metricsSuffixBucket = "_bucket" + metricsSuffixSum = "_sum" + metricSuffixTotal = "_total" + metricSuffixInfo = "_info" + metricSuffixCreated = "_created" + startTimeMetricName = "process_start_time_seconds" + scrapeUpMetricName = "up" + + transport = "http" + dataformat = "prometheus" +) + +var ( + trimmableSuffixes = []string{metricsSuffixBucket, metricsSuffixCount, metricsSuffixSum, metricSuffixTotal, metricSuffixInfo, metricSuffixCreated} + errNoDataToBuild = errors.New("there's no data to build") + errNoBoundaryLabel = errors.New("given metricType has no 'le' or 'quantile' label") + errEmptyQuantileLabel = errors.New("'quantile' label on summary metric missing is empty") + errEmptyLeLabel = errors.New("'le' label on histogram metric id missing or empty") + errMetricNameNotFound = errors.New("metricName not found from labels") + errTransactionAborted = errors.New("transaction aborted") + errNoJobInstance = errors.New("job or instance cannot be found from labels") + + notUsefulLabelsHistogram = sortString([]string{model.MetricNameLabel, model.InstanceLabel, model.SchemeLabel, model.MetricsPathLabel, model.JobLabel, model.BucketLabel}) + notUsefulLabelsSummary = sortString([]string{model.MetricNameLabel, model.InstanceLabel, model.SchemeLabel, model.MetricsPathLabel, model.JobLabel, model.QuantileLabel}) + notUsefulLabelsOther = sortString([]string{model.MetricNameLabel, model.InstanceLabel, model.SchemeLabel, model.MetricsPathLabel, model.JobLabel}) +) + +func sortString(strs []string) []string { + sort.Strings(strs) + return strs +} + +func getSortedNotUsefulLabels(mType pmetric.MetricType) []string { + switch mType { + case pmetric.MetricTypeHistogram: + return notUsefulLabelsHistogram + case pmetric.MetricTypeSummary: + return notUsefulLabelsSummary + default: + return notUsefulLabelsOther + } +} + +func timestampFromFloat64(ts float64) pcommon.Timestamp { + secs := int64(ts) + nanos := int64((ts - float64(secs)) * 1e9) + return pcommon.Timestamp(secs*1e9 + nanos) +} + +func timestampFromMs(timeAtMs int64) pcommon.Timestamp { + return pcommon.Timestamp(timeAtMs * 1e6) +} + +func getBoundary(metricType pmetric.MetricType, labels labels.Labels) (float64, error) { + val := "" + switch metricType { + case pmetric.MetricTypeHistogram: + val = labels.Get(model.BucketLabel) + if val == "" { + return 0, errEmptyLeLabel + } + case pmetric.MetricTypeSummary: + val = labels.Get(model.QuantileLabel) + if val == "" { + return 0, errEmptyQuantileLabel + } + default: + return 0, errNoBoundaryLabel + } + + return strconv.ParseFloat(val, 64) +} + +// convToMetricType returns the data type and if it is monotonic +func convToMetricType(metricType textparse.MetricType) (pmetric.MetricType, bool) { + switch metricType { + case textparse.MetricTypeCounter: + // always use float64, as it's the internal data type used in prometheus + return pmetric.MetricTypeSum, true + // textparse.MetricTypeUnknown is converted to gauge by default to prevent Prometheus untyped metrics from being dropped + case textparse.MetricTypeGauge, textparse.MetricTypeUnknown: + return pmetric.MetricTypeGauge, false + case textparse.MetricTypeHistogram: + return pmetric.MetricTypeHistogram, true + // dropping support for gaugehistogram for now until we have an official spec of its implementation + // a draft can be found in: https://docs.google.com/document/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit#heading=h.1cvzqd4ksd23 + // case textparse.MetricTypeGaugeHistogram: + // return + case textparse.MetricTypeSummary: + return pmetric.MetricTypeSummary, true + case textparse.MetricTypeInfo, textparse.MetricTypeStateset: + return pmetric.MetricTypeSum, false + default: + // including: textparse.MetricTypeGaugeHistogram + return pmetric.MetricTypeEmpty, false + } +} + +func normalizeMetricName(name string) string { + for _, s := range trimmableSuffixes { + if strings.HasSuffix(name, s) && name != s { + return strings.TrimSuffix(name, s) + } + } + return name +} diff --git a/collector/receiver/prometheusreceiver/internal/util_test.go b/collector/receiver/prometheusreceiver/internal/util_test.go new file mode 100644 index 0000000..afb98ab --- /dev/null +++ b/collector/receiver/prometheusreceiver/internal/util_test.go @@ -0,0 +1,190 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" + +import ( + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/scrape" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +var testMetadata = map[string]scrape.MetricMetadata{ + "counter_test": {Metric: "counter_test", Type: textparse.MetricTypeCounter, Help: "", Unit: ""}, + "counter_test2": {Metric: "counter_test2", Type: textparse.MetricTypeCounter, Help: "", Unit: ""}, + "gauge_test": {Metric: "gauge_test", Type: textparse.MetricTypeGauge, Help: "", Unit: ""}, + "gauge_test2": {Metric: "gauge_test2", Type: textparse.MetricTypeGauge, Help: "", Unit: ""}, + "hist_test": {Metric: "hist_test", Type: textparse.MetricTypeHistogram, Help: "", Unit: ""}, + "hist_test2": {Metric: "hist_test2", Type: textparse.MetricTypeHistogram, Help: "", Unit: ""}, + "ghist_test": {Metric: "ghist_test", Type: textparse.MetricTypeGaugeHistogram, Help: "", Unit: ""}, + "summary_test": {Metric: "summary_test", Type: textparse.MetricTypeSummary, Help: "", Unit: ""}, + "summary_test2": {Metric: "summary_test2", Type: textparse.MetricTypeSummary, Help: "", Unit: ""}, + "unknown_test": {Metric: "unknown_test", Type: textparse.MetricTypeUnknown, Help: "", Unit: ""}, + "poor_name": {Metric: "poor_name", Type: textparse.MetricTypeGauge, Help: "", Unit: ""}, + "poor_name_count": {Metric: "poor_name_count", Type: textparse.MetricTypeCounter, Help: "", Unit: ""}, + "scrape_foo": {Metric: "scrape_foo", Type: textparse.MetricTypeCounter, Help: "", Unit: ""}, + "example_process_start_time_seconds": {Metric: "example_process_start_time_seconds", + Type: textparse.MetricTypeGauge, Help: "", Unit: ""}, + "process_start_time_seconds": {Metric: "process_start_time_seconds", + Type: textparse.MetricTypeGauge, Help: "", Unit: ""}, + "subprocess_start_time_seconds": {Metric: "subprocess_start_time_seconds", + Type: textparse.MetricTypeGauge, Help: "", Unit: ""}, +} + +func TestTimestampFromMs(t *testing.T) { + assert.Equal(t, pcommon.Timestamp(0), timestampFromMs(0)) + assert.Equal(t, pcommon.NewTimestampFromTime(time.UnixMilli(1662679535432)), timestampFromMs(1662679535432)) +} + +func TestTimestampFromFloat64(t *testing.T) { + assert.Equal(t, pcommon.Timestamp(0), timestampFromFloat64(0)) + // Because of float64 conversion, we check only that we are within 100ns error. + assert.InEpsilon(t, uint64(1662679535040000000), uint64(timestampFromFloat64(1662679535.040)), 100) +} + +func TestConvToMetricType(t *testing.T) { + tests := []struct { + name string + mtype textparse.MetricType + want pmetric.MetricType + wantMonotonic bool + }{ + { + name: "textparse.counter", + mtype: textparse.MetricTypeCounter, + want: pmetric.MetricTypeSum, + wantMonotonic: true, + }, + { + name: "textparse.gauge", + mtype: textparse.MetricTypeGauge, + want: pmetric.MetricTypeGauge, + wantMonotonic: false, + }, + { + name: "textparse.unknown", + mtype: textparse.MetricTypeUnknown, + want: pmetric.MetricTypeGauge, + wantMonotonic: false, + }, + { + name: "textparse.histogram", + mtype: textparse.MetricTypeHistogram, + want: pmetric.MetricTypeHistogram, + wantMonotonic: true, + }, + { + name: "textparse.summary", + mtype: textparse.MetricTypeSummary, + want: pmetric.MetricTypeSummary, + wantMonotonic: true, + }, + { + name: "textparse.metric_type_info", + mtype: textparse.MetricTypeInfo, + want: pmetric.MetricTypeSum, + wantMonotonic: false, + }, + { + name: "textparse.metric_state_set", + mtype: textparse.MetricTypeStateset, + want: pmetric.MetricTypeSum, + wantMonotonic: false, + }, + { + name: "textparse.metric_gauge_hostogram", + mtype: textparse.MetricTypeGaugeHistogram, + want: pmetric.MetricTypeEmpty, + wantMonotonic: false, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + got, monotonic := convToMetricType(tt.mtype) + require.Equal(t, got.String(), tt.want.String()) + require.Equal(t, monotonic, tt.wantMonotonic) + }) + } +} + +func TestGetBoundary(t *testing.T) { + tests := []struct { + name string + mtype pmetric.MetricType + labels labels.Labels + wantValue float64 + wantErr error + }{ + { + name: "cumulative histogram with bucket label", + mtype: pmetric.MetricTypeHistogram, + labels: labels.FromStrings(model.BucketLabel, "0.256"), + wantValue: 0.256, + }, + { + name: "gauge histogram with bucket label", + mtype: pmetric.MetricTypeHistogram, + labels: labels.FromStrings(model.BucketLabel, "11.71"), + wantValue: 11.71, + }, + { + name: "summary with bucket label", + mtype: pmetric.MetricTypeSummary, + labels: labels.FromStrings(model.BucketLabel, "11.71"), + wantErr: errEmptyQuantileLabel, + }, + { + name: "summary with quantile label", + mtype: pmetric.MetricTypeSummary, + labels: labels.FromStrings(model.QuantileLabel, "92.88"), + wantValue: 92.88, + }, + { + name: "gauge histogram mismatched with bucket label", + mtype: pmetric.MetricTypeSummary, + labels: labels.FromStrings(model.BucketLabel, "11.71"), + wantErr: errEmptyQuantileLabel, + }, + { + name: "other data types without matches", + mtype: pmetric.MetricTypeGauge, + labels: labels.FromStrings(model.BucketLabel, "11.71"), + wantErr: errNoBoundaryLabel, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + value, err := getBoundary(tt.mtype, tt.labels) + if tt.wantErr != nil { + assert.ErrorIs(t, err, tt.wantErr) + return + } + + assert.NoError(t, err) + assert.Equal(t, value, tt.wantValue) + }) + } +} diff --git a/collector/receiver/prometheusreceiver/metrics_receiver.go b/collector/receiver/prometheusreceiver/metrics_receiver.go new file mode 100644 index 0000000..6306764 --- /dev/null +++ b/collector/receiver/prometheusreceiver/metrics_receiver.go @@ -0,0 +1,325 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver // import "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver" + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + "regexp" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/mitchellh/hashstructure/v2" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + promHTTP "github.com/prometheus/prometheus/discovery/http" + "github.com/prometheus/prometheus/scrape" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/receiver" + "go.uber.org/zap" + "gopkg.in/yaml.v2" + + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" +) + +const ( + defaultGCInterval = 2 * time.Minute + gcIntervalDelta = 1 * time.Minute +) + +// pReceiver is the type that provides Prometheus scraper/receiver functionality. +type pReceiver struct { + cfg *Config + consumer consumer.Metrics + cancelFunc context.CancelFunc + targetAllocatorStop chan struct{} + configLoaded chan struct{} + loadConfigOnce sync.Once + + settings receiver.CreateSettings + registry *featuregate.Registry + scrapeManager *scrape.Manager + discoveryManager *discovery.Manager +} + +// New creates a new prometheus.Receiver reference. +func newPrometheusReceiver(set receiver.CreateSettings, cfg *Config, next consumer.Metrics, registry *featuregate.Registry) *pReceiver { + pr := &pReceiver{ + cfg: cfg, + consumer: next, + settings: set, + configLoaded: make(chan struct{}), + targetAllocatorStop: make(chan struct{}), + registry: registry, + } + return pr +} + +// Start is the method that starts Prometheus scraping. It +// is controlled by having previously defined a Configuration using perhaps New. +func (r *pReceiver) Start(_ context.Context, host component.Host) error { + discoveryCtx, cancel := context.WithCancel(context.Background()) + r.cancelFunc = cancel + + logger := internal.NewZapToGokitLogAdapter(r.settings.Logger) + + // add scrape configs defined by the collector configs + baseCfg := r.cfg.PrometheusConfig + + err := r.initPrometheusComponents(discoveryCtx, host, logger) + if err != nil { + r.settings.Logger.Error("Failed to initPrometheusComponents Prometheus components", zap.Error(err)) + return err + } + + err = r.applyCfg(baseCfg) + if err != nil { + r.settings.Logger.Error("Failed to apply new scrape configuration", zap.Error(err)) + return err + } + + allocConf := r.cfg.TargetAllocator + if allocConf != nil { + err = r.startTargetAllocator(allocConf, baseCfg) + if err != nil { + return err + } + } + + r.loadConfigOnce.Do(func() { + close(r.configLoaded) + }) + + return nil +} + +func (r *pReceiver) startTargetAllocator(allocConf *targetAllocator, baseCfg *config.Config) error { + r.settings.Logger.Info("Starting target allocator discovery") + // immediately sync jobs, not waiting for the first tick + savedHash, err := r.syncTargetAllocator(uint64(0), allocConf, baseCfg) + if err != nil { + return err + } + go func() { + targetAllocatorIntervalTicker := time.NewTicker(allocConf.Interval) + for { + select { + case <-targetAllocatorIntervalTicker.C: + hash, newErr := r.syncTargetAllocator(savedHash, allocConf, baseCfg) + if newErr != nil { + r.settings.Logger.Error(newErr.Error()) + continue + } + savedHash = hash + case <-r.targetAllocatorStop: + targetAllocatorIntervalTicker.Stop() + r.settings.Logger.Info("Stopping target allocator") + return + } + } + }() + return nil +} + +// syncTargetAllocator request jobs from targetAllocator and update underlying receiver, if the response does not match the provided compareHash. +// baseDiscoveryCfg can be used to provide additional ScrapeConfigs which will be added to the retrieved jobs. +func (r *pReceiver) syncTargetAllocator(compareHash uint64, allocConf *targetAllocator, baseCfg *config.Config) (uint64, error) { + r.settings.Logger.Debug("Syncing target allocator jobs") + scrapeConfigsResponse, err := r.getScrapeConfigsResponse(allocConf.Endpoint) + if err != nil { + r.settings.Logger.Error("Failed to retrieve job list", zap.Error(err)) + return 0, err + } + + hash, err := hashstructure.Hash(scrapeConfigsResponse, hashstructure.FormatV2, nil) + if err != nil { + r.settings.Logger.Error("Failed to hash job list", zap.Error(err)) + return 0, err + } + if hash == compareHash { + // no update needed + return hash, nil + } + + // Clear out the current configurations + baseCfg.ScrapeConfigs = []*config.ScrapeConfig{} + + for jobName, scrapeConfig := range scrapeConfigsResponse { + var httpSD promHTTP.SDConfig + if allocConf.HTTPSDConfig == nil { + httpSD = promHTTP.SDConfig{ + RefreshInterval: model.Duration(30 * time.Second), + } + } else { + httpSD = *allocConf.HTTPSDConfig + } + escapedJob := url.QueryEscape(jobName) + httpSD.URL = fmt.Sprintf("%s/jobs/%s/targets?collector_id=%s", allocConf.Endpoint, escapedJob, allocConf.CollectorID) + httpSD.HTTPClientConfig.FollowRedirects = false + scrapeConfig.ServiceDiscoveryConfigs = discovery.Configs{ + &httpSD, + } + + baseCfg.ScrapeConfigs = append(baseCfg.ScrapeConfigs, scrapeConfig) + } + + err = r.applyCfg(baseCfg) + if err != nil { + r.settings.Logger.Error("Failed to apply new scrape configuration", zap.Error(err)) + return 0, err + } + + return hash, nil +} + +// instantiateShard inserts the SHARD environment variable in the returned configuration +func (r *pReceiver) instantiateShard(body []byte) []byte { + shard, ok := os.LookupEnv("SHARD") + if !ok { + shard = "0" + } + return bytes.ReplaceAll(body, []byte("$(SHARD)"), []byte(shard)) +} + +func (r *pReceiver) getScrapeConfigsResponse(baseURL string) (map[string]*config.ScrapeConfig, error) { + scrapeConfigsURL := fmt.Sprintf("%s/scrape_configs", baseURL) + _, err := url.Parse(scrapeConfigsURL) // check if valid + if err != nil { + return nil, err + } + + resp, err := http.Get(scrapeConfigsURL) //nolint + if err != nil { + return nil, err + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + jobToScrapeConfig := map[string]*config.ScrapeConfig{} + envReplacedBody := r.instantiateShard(body) + err = yaml.Unmarshal(envReplacedBody, &jobToScrapeConfig) + if err != nil { + return nil, err + } + err = resp.Body.Close() + if err != nil { + return nil, err + } + return jobToScrapeConfig, nil +} + +func (r *pReceiver) applyCfg(cfg *config.Config) error { + if err := r.scrapeManager.ApplyConfig(cfg); err != nil { + return err + } + + discoveryCfg := make(map[string]discovery.Configs) + for _, scrapeConfig := range cfg.ScrapeConfigs { + discoveryCfg[scrapeConfig.JobName] = scrapeConfig.ServiceDiscoveryConfigs + r.settings.Logger.Info("Scrape job added", zap.String("jobName", scrapeConfig.JobName)) + } + if err := r.discoveryManager.ApplyConfig(discoveryCfg); err != nil { + return err + } + return nil +} + +func (r *pReceiver) initPrometheusComponents(ctx context.Context, host component.Host, logger log.Logger) error { + r.discoveryManager = discovery.NewManager(ctx, logger) + + go func() { + r.settings.Logger.Info("Starting discovery manager") + if err := r.discoveryManager.Run(); err != nil { + r.settings.Logger.Error("Discovery manager failed", zap.Error(err)) + host.ReportFatalError(err) + } + }() + + var startTimeMetricRegex *regexp.Regexp + if r.cfg.StartTimeMetricRegex != "" { + var err error + startTimeMetricRegex, err = regexp.Compile(r.cfg.StartTimeMetricRegex) + if err != nil { + return err + } + } + + store, err := internal.NewAppendable( + r.consumer, + r.settings, + gcInterval(r.cfg.PrometheusConfig), + r.cfg.UseStartTimeMetric, + r.cfg.PreserveUntyped, + startTimeMetricRegex, + useCreatedMetricGate.IsEnabled(), + r.cfg.PrometheusConfig.GlobalConfig.ExternalLabels, + r.registry, + ) + if err != nil { + return err + } + r.scrapeManager = scrape.NewManager(&scrape.Options{PassMetadataInContext: true}, logger, store) + + go func() { + // The scrape manager needs to wait for the configuration to be loaded before beginning + <-r.configLoaded + r.settings.Logger.Info("Starting scrape manager") + if err := r.scrapeManager.Run(r.discoveryManager.SyncCh()); err != nil { + r.settings.Logger.Error("Scrape manager failed", zap.Error(err)) + host.ReportFatalError(err) + } + }() + return nil +} + +// gcInterval returns the longest scrape interval used by a scrape config, +// plus a delta to prevent race conditions. +// This ensures jobs are not garbage collected between scrapes. +func gcInterval(cfg *config.Config) time.Duration { + gcInterval := defaultGCInterval + if time.Duration(cfg.GlobalConfig.ScrapeInterval)+gcIntervalDelta > gcInterval { + gcInterval = time.Duration(cfg.GlobalConfig.ScrapeInterval) + gcIntervalDelta + } + for _, scrapeConfig := range cfg.ScrapeConfigs { + if time.Duration(scrapeConfig.ScrapeInterval)+gcIntervalDelta > gcInterval { + gcInterval = time.Duration(scrapeConfig.ScrapeInterval) + gcIntervalDelta + } + } + return gcInterval +} + +// Shutdown stops and cancels the underlying Prometheus scrapers. +func (r *pReceiver) Shutdown(context.Context) error { + if r.cancelFunc != nil { + r.cancelFunc() + } + if r.scrapeManager != nil { + r.scrapeManager.Stop() + } + close(r.targetAllocatorStop) + return nil +} diff --git a/collector/receiver/prometheusreceiver/metrics_receiver_helper_test.go b/collector/receiver/prometheusreceiver/metrics_receiver_helper_test.go new file mode 100644 index 0000000..2560649 --- /dev/null +++ b/collector/receiver/prometheusreceiver/metrics_receiver_helper_test.go @@ -0,0 +1,689 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "context" + "fmt" + "log" + "math" + "net/http" + "net/http/httptest" + "net/url" + "sync" + "sync/atomic" + "testing" + "time" + + gokitlog "github.com/go-kit/log" + promcfg "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/scrape" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/receivertest" + "gopkg.in/yaml.v2" + + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" +) + +type mockPrometheusResponse struct { + code int + data string + useOpenMetrics bool +} + +type mockPrometheus struct { + mu sync.Mutex // mu protects the fields below. + endpoints map[string][]mockPrometheusResponse + accessIndex map[string]*atomic.Int32 + wg *sync.WaitGroup + srv *httptest.Server +} + +func newMockPrometheus(endpoints map[string][]mockPrometheusResponse) *mockPrometheus { + accessIndex := make(map[string]*atomic.Int32) + wg := &sync.WaitGroup{} + wg.Add(len(endpoints)) + for k := range endpoints { + accessIndex[k] = &atomic.Int32{} + } + mp := &mockPrometheus{ + wg: wg, + accessIndex: accessIndex, + endpoints: endpoints, + } + srv := httptest.NewServer(mp) + mp.srv = srv + return mp +} + +func (mp *mockPrometheus) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + mp.mu.Lock() + defer mp.mu.Unlock() + iptr, ok := mp.accessIndex[req.URL.Path] + if !ok { + rw.WriteHeader(404) + return + } + index := int(iptr.Load()) + iptr.Add(1) + pages := mp.endpoints[req.URL.Path] + if index >= len(pages) { + if index == len(pages) { + mp.wg.Done() + } + rw.WriteHeader(404) + return + } + if pages[index].useOpenMetrics { + rw.Header().Set("Content-Type", "application/openmetrics-text") + } + rw.WriteHeader(pages[index].code) + _, _ = rw.Write([]byte(pages[index].data)) +} + +func (mp *mockPrometheus) Close() { + mp.srv.Close() +} + +// ------------------------- +// EndToEnd Test and related +// ------------------------- + +var ( + expectedScrapeMetricCount = 5 +) + +type testData struct { + name string + relabeledJob string // Used when relabeling or honor_labels changes the target to something other than 'name'. + pages []mockPrometheusResponse + attributes pcommon.Map + validateScrapes bool + normalizedName bool + validateFunc func(t *testing.T, td *testData, result []pmetric.ResourceMetrics) +} + +// setupMockPrometheus to create a mocked prometheus based on targets, returning the server and a prometheus exporting +// config +func setupMockPrometheus(tds ...*testData) (*mockPrometheus, *promcfg.Config, error) { + jobs := make([]map[string]interface{}, 0, len(tds)) + endpoints := make(map[string][]mockPrometheusResponse) + var metricPaths []string + for _, t := range tds { + metricPath := fmt.Sprintf("/%s/metrics", t.name) + endpoints[metricPath] = t.pages + metricPaths = append(metricPaths, metricPath) + } + mp := newMockPrometheus(endpoints) + u, _ := url.Parse(mp.srv.URL) + for i := 0; i < len(tds); i++ { + job := make(map[string]interface{}) + job["job_name"] = tds[i].name + job["metrics_path"] = metricPaths[i] + job["scrape_interval"] = "1s" + job["scrape_timeout"] = "500ms" + job["static_configs"] = []map[string]interface{}{{"targets": []string{u.Host}}} + jobs = append(jobs, job) + } + if len(jobs) != len(tds) { + log.Fatal("len(jobs) != len(targets), make sure job names are unique") + } + configP := make(map[string]interface{}) + configP["scrape_configs"] = jobs + cfg, err := yaml.Marshal(&configP) + if err != nil { + return mp, nil, err + } + // update attributes value (will use for validation) + l := []labels.Label{{Name: "__scheme__", Value: "http"}} + for _, t := range tds { + t.attributes = internal.CreateResource(t.name, u.Host, l).Attributes() + } + pCfg, err := promcfg.Load(string(cfg), false, gokitlog.NewNopLogger()) + return mp, pCfg, err +} + +func waitForScrapeResults(t *testing.T, targets []*testData, cms *consumertest.MetricsSink) { + assert.Eventually(t, func() bool { + // This is the receiver's pov as to what should have been collected from the server + metrics := cms.AllMetrics() + pResults := splitMetricsByTarget(metrics) + for _, target := range targets { + want := 0 + name := target.name + if target.relabeledJob != "" { + name = target.relabeledJob + } + scrapes := pResults[name] + // count the number of pages we expect for a target endpoint + for _, p := range target.pages { + if p.code != 404 { + // only count target pages that are not 404, matching mock ServerHTTP func response logic + want++ + } + + } + if len(scrapes) < want { + // If we don't have enough scrapes yet lets return false and wait for another tick + return false + } + } + return true + }, 30*time.Second, 500*time.Millisecond) +} + +func verifyNumValidScrapeResults(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + want := 0 + for _, p := range td.pages { + if p.code == 200 { + want++ + } + } + require.LessOrEqual(t, want, len(resourceMetrics), "want at least %d valid scrapes, but got %d", want, len(resourceMetrics)) +} + +func verifyNumTotalScrapeResults(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + want := 0 + for _, p := range td.pages { + if p.code == 200 || p.code == 500 { + want++ + } + } + require.LessOrEqual(t, want, len(resourceMetrics), "want at least %d total scrapes, but got %d", want, len(resourceMetrics)) +} + +func getMetrics(rm pmetric.ResourceMetrics) []pmetric.Metric { + var metrics []pmetric.Metric + ilms := rm.ScopeMetrics() + for j := 0; j < ilms.Len(); j++ { + metricSlice := ilms.At(j).Metrics() + for i := 0; i < metricSlice.Len(); i++ { + metrics = append(metrics, metricSlice.At(i)) + } + } + return metrics +} + +func metricsCount(resourceMetric pmetric.ResourceMetrics) int { + metricsCount := 0 + ilms := resourceMetric.ScopeMetrics() + for j := 0; j < ilms.Len(); j++ { + ilm := ilms.At(j) + metricsCount += ilm.Metrics().Len() + } + return metricsCount +} + +func getValidScrapes(t *testing.T, rms []pmetric.ResourceMetrics, normalizedNames bool) []pmetric.ResourceMetrics { + var out []pmetric.ResourceMetrics + // rms will include failed scrapes and scrapes that received no metrics but have internal scrape metrics, filter those out + for i := 0; i < len(rms); i++ { + allMetrics := getMetrics(rms[i]) + if expectedScrapeMetricCount < len(allMetrics) && countScrapeMetrics(allMetrics, normalizedNames) == expectedScrapeMetricCount { + if isFirstFailedScrape(allMetrics, normalizedNames) { + continue + } + assertUp(t, 1, allMetrics) + out = append(out, rms[i]) + } else { + assertUp(t, 0, allMetrics) + } + } + return out +} + +func isFirstFailedScrape(metrics []pmetric.Metric, normalizedNames bool) bool { + for _, m := range metrics { + if m.Name() == "up" { + if m.Gauge().DataPoints().At(0).DoubleValue() == 1 { // assumed up will not have multiple datapoints + return false + } + } + } + + for _, m := range metrics { + if isDefaultMetrics(m, normalizedNames) { + continue + } + + switch m.Type() { + case pmetric.MetricTypeGauge: + for i := 0; i < m.Gauge().DataPoints().Len(); i++ { + if !m.Gauge().DataPoints().At(i).Flags().NoRecordedValue() { + return false + } + } + case pmetric.MetricTypeSum: + for i := 0; i < m.Sum().DataPoints().Len(); i++ { + if !m.Sum().DataPoints().At(i).Flags().NoRecordedValue() { + return false + } + } + case pmetric.MetricTypeHistogram: + for i := 0; i < m.Histogram().DataPoints().Len(); i++ { + if !m.Histogram().DataPoints().At(i).Flags().NoRecordedValue() { + return false + } + } + case pmetric.MetricTypeSummary: + for i := 0; i < m.Summary().DataPoints().Len(); i++ { + if !m.Summary().DataPoints().At(i).Flags().NoRecordedValue() { + return false + } + } + } + } + return true +} + +func assertUp(t *testing.T, expected float64, metrics []pmetric.Metric) { + for _, m := range metrics { + if m.Name() == "up" { + assert.Equal(t, expected, m.Gauge().DataPoints().At(0).DoubleValue()) // (assumed up will not have multiple datapoints) + return + } + } + t.Error("No 'up' metric found") +} + +func countScrapeMetricsRM(got pmetric.ResourceMetrics, normalizedNames bool) int { + n := 0 + ilms := got.ScopeMetrics() + for j := 0; j < ilms.Len(); j++ { + ilm := ilms.At(j) + for i := 0; i < ilm.Metrics().Len(); i++ { + if isDefaultMetrics(ilm.Metrics().At(i), normalizedNames) { + n++ + } + } + } + return n +} + +func countScrapeMetrics(metrics []pmetric.Metric, normalizedNames bool) int { + n := 0 + for _, m := range metrics { + if isDefaultMetrics(m, normalizedNames) { + n++ + } + } + return n +} + +func isDefaultMetrics(m pmetric.Metric, normalizedNames bool) bool { + switch m.Name() { + case "up", "scrape_samples_scraped", "scrape_samples_post_metric_relabeling", "scrape_series_added": + return true + + // if normalizedNames is true, we expect unit `_seconds` to be trimmed. + case "scrape_duration_seconds": + return !normalizedNames + case "scrape_duration": + return normalizedNames + default: + } + return false +} + +type metricTypeComparator func(*testing.T, pmetric.Metric) +type numberPointComparator func(*testing.T, pmetric.NumberDataPoint) +type histogramPointComparator func(*testing.T, pmetric.HistogramDataPoint) +type summaryPointComparator func(*testing.T, pmetric.SummaryDataPoint) + +type dataPointExpectation struct { + numberPointComparator []numberPointComparator + histogramPointComparator []histogramPointComparator + summaryPointComparator []summaryPointComparator +} + +type testExpectation func(*testing.T, pmetric.ResourceMetrics) + +func doCompare(t *testing.T, name string, want pcommon.Map, got pmetric.ResourceMetrics, expectations []testExpectation) { + doCompareNormalized(t, name, want, got, expectations, false) +} + +func doCompareNormalized(t *testing.T, name string, want pcommon.Map, got pmetric.ResourceMetrics, expectations []testExpectation, normalizedNames bool) { + t.Run(name, func(t *testing.T) { + assert.Equal(t, expectedScrapeMetricCount, countScrapeMetricsRM(got, normalizedNames)) + assert.Equal(t, want.Len(), got.Resource().Attributes().Len()) + for k, v := range want.AsRaw() { + val, ok := got.Resource().Attributes().Get(k) + assert.True(t, ok, "%q attribute is missing", k) + if ok { + assert.EqualValues(t, v, val.AsString()) + } + } + for _, e := range expectations { + e(t, got) + } + }) +} + +func assertMetricPresent(name string, metricTypeExpectations metricTypeComparator, dataPointExpectations []dataPointExpectation) testExpectation { + return func(t *testing.T, rm pmetric.ResourceMetrics) { + allMetrics := getMetrics(rm) + var present bool + for _, m := range allMetrics { + if name != m.Name() { + continue + } + + present = true + metricTypeExpectations(t, m) + for i, de := range dataPointExpectations { + switch m.Type() { + case pmetric.MetricTypeGauge: + for _, npc := range de.numberPointComparator { + require.Equal(t, m.Gauge().DataPoints().Len(), len(dataPointExpectations), "Expected number of data-points in Gauge metric '%s' does not match to testdata", name) + npc(t, m.Gauge().DataPoints().At(i)) + } + case pmetric.MetricTypeSum: + for _, npc := range de.numberPointComparator { + require.Equal(t, m.Sum().DataPoints().Len(), len(dataPointExpectations), "Expected number of data-points in Sum metric '%s' does not match to testdata", name) + npc(t, m.Sum().DataPoints().At(i)) + } + case pmetric.MetricTypeHistogram: + for _, hpc := range de.histogramPointComparator { + require.Equal(t, m.Histogram().DataPoints().Len(), len(dataPointExpectations), "Expected number of data-points in Histogram metric '%s' does not match to testdata", name) + hpc(t, m.Histogram().DataPoints().At(i)) + } + case pmetric.MetricTypeSummary: + for _, spc := range de.summaryPointComparator { + require.Equal(t, m.Summary().DataPoints().Len(), len(dataPointExpectations), "Expected number of data-points in Summary metric '%s' does not match to testdata", name) + spc(t, m.Summary().DataPoints().At(i)) + } + } + } + } + require.True(t, present, "expected metric '%s' is not present", name) + } +} + +func assertMetricAbsent(name string) testExpectation { + return func(t *testing.T, rm pmetric.ResourceMetrics) { + allMetrics := getMetrics(rm) + for _, m := range allMetrics { + assert.NotEqual(t, name, m.Name(), "Metric is present, but was expected absent") + } + } +} + +func compareMetricType(typ pmetric.MetricType) metricTypeComparator { + return func(t *testing.T, metric pmetric.Metric) { + assert.Equal(t, typ.String(), metric.Type().String(), "Metric type does not match") + } +} + +func compareMetricIsMonotonic(isMonotonic bool) metricTypeComparator { + return func(t *testing.T, metric pmetric.Metric) { + assert.Equal(t, pmetric.MetricTypeSum.String(), metric.Type().String(), "IsMonotonic only exists for sums") + assert.Equal(t, isMonotonic, metric.Sum().IsMonotonic(), "IsMonotonic does not match") + } +} + +func compareAttributes(attributes map[string]string) numberPointComparator { + return func(t *testing.T, numberDataPoint pmetric.NumberDataPoint) { + req := assert.Equal(t, len(attributes), numberDataPoint.Attributes().Len(), "Attributes length do not match") + if req { + for k, v := range attributes { + val, ok := numberDataPoint.Attributes().Get(k) + require.True(t, ok) + assert.Equal(t, v, val.AsString(), "Attributes do not match") + } + } + } +} + +func compareSummaryAttributes(attributes map[string]string) summaryPointComparator { + return func(t *testing.T, summaryDataPoint pmetric.SummaryDataPoint) { + req := assert.Equal(t, len(attributes), summaryDataPoint.Attributes().Len(), "Summary attributes length do not match") + if req { + for k, v := range attributes { + val, ok := summaryDataPoint.Attributes().Get(k) + require.True(t, ok) + assert.Equal(t, v, val.AsString(), "Summary attributes value do not match") + } + } + } +} + +func assertAttributesAbsent() numberPointComparator { + return func(t *testing.T, numberDataPoint pmetric.NumberDataPoint) { + assert.Equal(t, 0, numberDataPoint.Attributes().Len(), "Attributes length should be 0") + } +} + +func compareHistogramAttributes(attributes map[string]string) histogramPointComparator { + return func(t *testing.T, histogramDataPoint pmetric.HistogramDataPoint) { + req := assert.Equal(t, len(attributes), histogramDataPoint.Attributes().Len(), "Histogram attributes length do not match") + if req { + for k, v := range attributes { + val, ok := histogramDataPoint.Attributes().Get(k) + require.True(t, ok) + assert.Equal(t, v, val.AsString(), "Histogram attributes value do not match") + } + } + } +} + +func assertNumberPointFlagNoRecordedValue() numberPointComparator { + return func(t *testing.T, numberDataPoint pmetric.NumberDataPoint) { + assert.True(t, numberDataPoint.Flags().NoRecordedValue(), + "Datapoint flag for staleness marker not found as expected") + } +} + +func assertHistogramPointFlagNoRecordedValue() histogramPointComparator { + return func(t *testing.T, histogramDataPoint pmetric.HistogramDataPoint) { + assert.True(t, histogramDataPoint.Flags().NoRecordedValue(), + "Datapoint flag for staleness marker not found as expected") + } +} + +func assertSummaryPointFlagNoRecordedValue() summaryPointComparator { + return func(t *testing.T, summaryDataPoint pmetric.SummaryDataPoint) { + assert.True(t, summaryDataPoint.Flags().NoRecordedValue(), + "Datapoint flag for staleness marker not found as expected") + } +} + +func compareStartTimestamp(startTimeStamp pcommon.Timestamp) numberPointComparator { + return func(t *testing.T, numberDataPoint pmetric.NumberDataPoint) { + assert.Equal(t, startTimeStamp.String(), numberDataPoint.StartTimestamp().String(), "Start-Timestamp does not match") + } +} + +func compareTimestamp(timeStamp pcommon.Timestamp) numberPointComparator { + return func(t *testing.T, numberDataPoint pmetric.NumberDataPoint) { + assert.Equal(t, timeStamp.String(), numberDataPoint.Timestamp().String(), "Timestamp does not match") + } +} + +func compareHistogramTimestamp(timeStamp pcommon.Timestamp) histogramPointComparator { + return func(t *testing.T, histogramDataPoint pmetric.HistogramDataPoint) { + assert.Equal(t, timeStamp.String(), histogramDataPoint.Timestamp().String(), "Histogram Timestamp does not match") + } +} + +func compareHistogramStartTimestamp(timeStamp pcommon.Timestamp) histogramPointComparator { + return func(t *testing.T, histogramDataPoint pmetric.HistogramDataPoint) { + assert.Equal(t, timeStamp.String(), histogramDataPoint.StartTimestamp().String(), "Histogram Start-Timestamp does not match") + } +} + +func compareSummaryTimestamp(timeStamp pcommon.Timestamp) summaryPointComparator { + return func(t *testing.T, summaryDataPoint pmetric.SummaryDataPoint) { + assert.Equal(t, timeStamp.String(), summaryDataPoint.Timestamp().String(), "Summary Timestamp does not match") + } +} + +func compareSummaryStartTimestamp(timeStamp pcommon.Timestamp) summaryPointComparator { + return func(t *testing.T, summaryDataPoint pmetric.SummaryDataPoint) { + assert.Equal(t, timeStamp.String(), summaryDataPoint.StartTimestamp().String(), "Summary Start-Timestamp does not match") + } +} + +func compareDoubleValue(doubleVal float64) numberPointComparator { + return func(t *testing.T, numberDataPoint pmetric.NumberDataPoint) { + assert.Equal(t, doubleVal, numberDataPoint.DoubleValue(), "Metric double value does not match") + } +} + +func assertNormalNan() numberPointComparator { + return func(t *testing.T, numberDataPoint pmetric.NumberDataPoint) { + assert.True(t, math.Float64bits(numberDataPoint.DoubleValue()) == value.NormalNaN, + "Metric double value is not normalNaN as expected") + } +} + +func compareHistogram(count uint64, sum float64, buckets []uint64) histogramPointComparator { + return func(t *testing.T, histogramDataPoint pmetric.HistogramDataPoint) { + assert.Equal(t, count, histogramDataPoint.Count(), "Histogram count value does not match") + assert.Equal(t, sum, histogramDataPoint.Sum(), "Histogram sum value does not match") + assert.Equal(t, buckets, histogramDataPoint.BucketCounts().AsRaw(), "Histogram bucket count values do not match") + } +} + +func compareSummary(count uint64, sum float64, quantiles [][]float64) summaryPointComparator { + return func(t *testing.T, summaryDataPoint pmetric.SummaryDataPoint) { + assert.Equal(t, count, summaryDataPoint.Count(), "Summary count value does not match") + assert.Equal(t, sum, summaryDataPoint.Sum(), "Summary sum value does not match") + req := assert.Equal(t, len(quantiles), summaryDataPoint.QuantileValues().Len()) + if req { + for i := 0; i < summaryDataPoint.QuantileValues().Len(); i++ { + assert.Equal(t, quantiles[i][0], summaryDataPoint.QuantileValues().At(i).Quantile(), + "Summary quantile do not match") + if math.IsNaN(quantiles[i][1]) { + assert.True(t, math.Float64bits(summaryDataPoint.QuantileValues().At(i).Value()) == value.NormalNaN, + "Summary quantile value is not normalNaN as expected") + } else { + assert.Equal(t, quantiles[i][1], summaryDataPoint.QuantileValues().At(i).Value(), + "Summary quantile values do not match") + } + } + } + } +} + +// starts prometheus receiver with custom config, retrieves metrics from MetricsSink +func testComponent(t *testing.T, targets []*testData, useStartTimeMetric bool, startTimeMetricRegex string, registry *featuregate.Registry, cfgMuts ...func(*promcfg.Config)) { + ctx := context.Background() + mp, cfg, err := setupMockPrometheus(targets...) + for _, cfgMut := range cfgMuts { + cfgMut(cfg) + } + require.Nilf(t, err, "Failed to create Prometheus config: %v", err) + defer mp.Close() + + cms := new(consumertest.MetricsSink) + receiver := newPrometheusReceiver(receivertest.NewNopCreateSettings(), &Config{ + PrometheusConfig: cfg, + UseStartTimeMetric: useStartTimeMetric, + StartTimeMetricRegex: startTimeMetricRegex, + PreserveUntyped: true, // This adds a metric label for untyped metrics. Enabling this allows testing for it. + }, cms, registry) + + require.NoError(t, receiver.Start(ctx, componenttest.NewNopHost())) + // verify state after shutdown is called + t.Cleanup(func() { + // verify state after shutdown is called + assert.Lenf(t, flattenTargets(receiver.scrapeManager.TargetsAll()), len(targets), "expected %v targets to be running", len(targets)) + require.NoError(t, receiver.Shutdown(context.Background())) + assert.Len(t, flattenTargets(receiver.scrapeManager.TargetsAll()), 0, "expected scrape manager to have no targets") + }) + + // waitgroup Wait() is strictly from a server POV indicating the sufficient number and type of requests have been seen + mp.wg.Wait() + + // Note:waitForScrapeResult is an attempt to address a possible race between waitgroup Done() being called in the ServerHTTP function + // and when the receiver actually processes the http request responses into metrics. + // this is a eventually timeout,tick that just waits for some condition. + // however the condition to wait for may be suboptimal and may need to be adjusted. + waitForScrapeResults(t, targets, cms) + + // This begins the processing of the scrapes collected by the receiver + metrics := cms.AllMetrics() + // split and store results by target name + pResults := splitMetricsByTarget(metrics) + lres, lep := len(pResults), len(mp.endpoints) + // There may be an additional scrape entry between when the mock server provided + // all responses and when we capture the metrics. It will be ignored later. + assert.GreaterOrEqualf(t, lep, lres, "want at least %d targets, but got %v\n", lep, lres) + + // loop to validate outputs for each targets + // Stop once we have evaluated all expected results, any others are superfluous. + for _, target := range targets[:lep] { + t.Run(target.name, func(t *testing.T) { + name := target.name + if target.relabeledJob != "" { + name = target.relabeledJob + } + scrapes := pResults[name] + if !target.validateScrapes { + scrapes = getValidScrapes(t, pResults[name], target.normalizedName) + } + target.validateFunc(t, target, scrapes) + }) + } +} + +// flattenTargets takes a map of jobs to target and flattens to a list of targets +func flattenTargets(targets map[string][]*scrape.Target) []*scrape.Target { + var flatTargets []*scrape.Target + for _, target := range targets { + flatTargets = append(flatTargets, target...) + } + return flatTargets +} + +func splitMetricsByTarget(metrics []pmetric.Metrics) map[string][]pmetric.ResourceMetrics { + pResults := make(map[string][]pmetric.ResourceMetrics) + for _, md := range metrics { + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + name, _ := rms.At(i).Resource().Attributes().Get("service.name") + pResults[name.AsString()] = append(pResults[name.AsString()], rms.At(i)) + } + } + return pResults +} + +func getTS(ms pmetric.MetricSlice) pcommon.Timestamp { + if ms.Len() == 0 { + return 0 + } + m := ms.At(0) + switch m.Type() { + case pmetric.MetricTypeGauge: + return m.Gauge().DataPoints().At(0).Timestamp() + case pmetric.MetricTypeSum: + return m.Sum().DataPoints().At(0).Timestamp() + case pmetric.MetricTypeHistogram: + return m.Histogram().DataPoints().At(0).Timestamp() + case pmetric.MetricTypeSummary: + return m.Summary().DataPoints().At(0).Timestamp() + case pmetric.MetricTypeExponentialHistogram: + return m.ExponentialHistogram().DataPoints().At(0).Timestamp() + } + return 0 +} diff --git a/collector/receiver/prometheusreceiver/metrics_receiver_honor_timestamp_test.go b/collector/receiver/prometheusreceiver/metrics_receiver_honor_timestamp_test.go new file mode 100644 index 0000000..691c6d2 --- /dev/null +++ b/collector/receiver/prometheusreceiver/metrics_receiver_honor_timestamp_test.go @@ -0,0 +1,525 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "fmt" + "sync" + "testing" + "time" + + promcfg "github.com/prometheus/prometheus/config" + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +var ( + timeNow = time.Now() + ts1 = timeNow.Add(10 * time.Second).UnixMilli() + ts2 = timeNow.Add(20 * time.Second).UnixMilli() + ts3 = timeNow.Add(30 * time.Second).UnixMilli() + ts4 = timeNow.Add(40 * time.Second).UnixMilli() + ts5 = timeNow.Add(50 * time.Second).UnixMilli() + ts6 = timeNow.Add(60 * time.Second).UnixMilli() + ts7 = timeNow.Add(70 * time.Second).UnixMilli() + ts8 = timeNow.Add(80 * time.Second).UnixMilli() + ts9 = timeNow.Add(90 * time.Second).UnixMilli() + ts10 = timeNow.Add(100 * time.Second).UnixMilli() + ts11 = timeNow.Add(110 * time.Second).UnixMilli() + ts12 = timeNow.Add(120 * time.Second).UnixMilli() + ts13 = timeNow.Add(130 * time.Second).UnixMilli() + ts14 = timeNow.Add(140 * time.Second).UnixMilli() + ts15 = timeNow.Add(150 * time.Second).UnixMilli() +) + +var onlyOnce sync.Once + +// honorTimestampsPage2 has lower values for metrics than honorTimestampsPage1. +// So, the start_timestamp should get reset. +var honorTimestampsPage1 = ` +# HELP go_threads Number of OS threads created +# TYPE go_thread gauge +go_threads 19 %v + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 100 %v +http_requests_total{method="post",code="400"} 5 %v + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 1000 %v +http_request_duration_seconds_bucket{le="0.5"} 1500 %v +http_request_duration_seconds_bucket{le="1"} 2000 %v +http_request_duration_seconds_bucket{le="+Inf"} 2500 %v +http_request_duration_seconds_sum 5000 %v +http_request_duration_seconds_count 2500 %v + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 %v +rpc_duration_seconds{quantile="0.9"} 5 %v +rpc_duration_seconds{quantile="0.99"} 8 %v +rpc_duration_seconds_sum 5000 %v +rpc_duration_seconds_count 1000 %v +` + +var honorTimestampsPage2 = ` +# HELP go_threads Number of OS threads created +# TYPE go_thread gauge +go_threads 18 %v + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 99 %v +http_requests_total{method="post",code="400"} 3 %v + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 900 %v +http_request_duration_seconds_bucket{le="0.5"} 1400 %v +http_request_duration_seconds_bucket{le="1"} 1900 %v +http_request_duration_seconds_bucket{le="+Inf"} 2400 %v +http_request_duration_seconds_sum 4950 %v +http_request_duration_seconds_count 2400 %v + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 %v +rpc_duration_seconds{quantile="0.9"} 6 %v +rpc_duration_seconds{quantile="0.99"} 8 %v +rpc_duration_seconds_sum 4980 %v +rpc_duration_seconds_count 900 %v +` + +// honorTimestampsPage3 has higher value than previous scrape. +// So, start_timestamp should not be reset for honorTimestampsPage3 +var honorTimestampsPage3 = ` +# HELP go_threads Number of OS threads created +# TYPE go_thread gauge +go_threads 19 %v + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 100 %v +http_requests_total{method="post",code="400"} 5 %v + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 1000 %v +http_request_duration_seconds_bucket{le="0.5"} 1500 %v +http_request_duration_seconds_bucket{le="1"} 2000 %v +http_request_duration_seconds_bucket{le="+Inf"} 2500 %v +http_request_duration_seconds_sum 5000 %v +http_request_duration_seconds_count 2500 %v + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 %v +rpc_duration_seconds{quantile="0.9"} 5 %v +rpc_duration_seconds{quantile="0.99"} 8 %v +rpc_duration_seconds_sum 5000 %v +rpc_duration_seconds_count 1000 %v +` + +// TestHonorTimeStampsWithTrue validates honor_timestamp configuration +// where all metricFamilies (and each datapoint) in the testdata has explicit timestamps. +// TestHonorTimeStampsWithTrue does not check for scenario where timestamp is not provided +// to all metric families in a scrape- as those situations are rare though valid. + +// TestHonorTimeStampsWithTrue has testdata such that +// For valid data- Start_timestamps should not be ahead of point_timestamps. +// TestHonorTimeStampsWithTrue validates: +// - For initial scrape, start_timestamp is same as point timestamp, +// - Start_timestamp should be the explicit timestamp of first-time a particular metric is seen +// - Start_timestamp should get reset if current scrape has lower value than previous scrape + +func TestHonorTimeStampsWithTrue(t *testing.T) { + setMetricsTimestamp() + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: honorTimestampsPage1}, + {code: 200, data: honorTimestampsPage2}, + {code: 200, data: honorTimestampsPage3}, + }, + validateFunc: verifyHonorTimeStampsTrue, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry()) +} + +// TestHonorTimeStampsWithFalse validates that with honor_timestamp config set to false, +// valid testdata provided with explicit timestamps does not get honored. +func TestHonorTimeStampsWithFalse(t *testing.T) { + setMetricsTimestamp() + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: honorTimestampsPage1}, + {code: 200, data: honorTimestampsPage2}, + }, + validateFunc: verifyHonorTimeStampsFalse, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry(), func(cfg *promcfg.Config) { + for _, scrapeConfig := range cfg.ScrapeConfigs { + scrapeConfig.HonorTimestamps = false + } + }) +} + +func setMetricsTimestamp() { + onlyOnce.Do(func() { + honorTimestampsPage1 = fmt.Sprintf(honorTimestampsPage1, + ts1, // timestamp for gauge + ts2, ts3, // timestamp for counter + ts4, ts4, ts4, ts4, ts4, ts4, // timestamp for histogram + ts5, ts5, ts5, ts5, ts5, // timestamp for summary + ) + honorTimestampsPage2 = fmt.Sprintf(honorTimestampsPage2, + ts6, // timestamp for gauge + ts7, ts8, // timestamp for counter + ts9, ts9, ts9, ts9, ts9, ts9, // timestamp for histogram + ts10, ts10, ts10, ts10, ts10, // timestamp for summary + ) + honorTimestampsPage3 = fmt.Sprintf(honorTimestampsPage3, + ts11, // timestamp for gauge + ts12, ts13, // timestamp for counter + ts14, ts14, ts14, ts14, ts14, ts14, // timestamp for histogram + ts15, ts15, ts15, ts15, ts15, // timestamp for summary + ) + }) +} + +func verifyHonorTimeStampsTrue(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + // m1 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m1)) + + wantAttributes := td.attributes + e1 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts1))), + compareDoubleValue(19), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts2))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts2))), + compareDoubleValue(100), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts3))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts3))), + compareDoubleValue(5), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts4))), + compareHistogramTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts4))), + compareHistogram(2500, 5000, []uint64{1000, 500, 500, 500}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts5))), + compareSummaryTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts5))), + compareSummary(1000, 5000, [][]float64{{0.01, 1}, {0.9, 5}, {0.99, 8}}), + }, + }, + }), + } + doCompare(t, "scrape-honorTimestamp-1", wantAttributes, m1, e1) + + m2 := resourceMetrics[1] + // m1 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m2)) + + e2 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts6))), + compareDoubleValue(18), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts7))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts7))), + compareDoubleValue(99), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts8))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts8))), + compareDoubleValue(3), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts9))), + compareHistogramTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts9))), + compareHistogram(2400, 4950, []uint64{900, 500, 500, 500}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts10))), + compareSummaryTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts10))), + compareSummary(900, 4980, [][]float64{{0.01, 1}, {0.9, 6}, {0.99, 8}}), + }, + }, + }), + } + doCompare(t, "scrape-honorTimestamp-2", wantAttributes, m2, e2) + + m3 := resourceMetrics[2] + // m1 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m3)) + + e3 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts11))), + compareDoubleValue(19), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts7))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts12))), + compareDoubleValue(100), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts8))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts13))), + compareDoubleValue(5), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts9))), + compareHistogramTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts14))), + compareHistogram(2500, 5000, []uint64{1000, 500, 500, 500}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts10))), + compareSummaryTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts15))), + compareSummary(1000, 5000, [][]float64{{0.01, 1}, {0.9, 5}, {0.99, 8}}), + }, + }, + }), + } + doCompare(t, "scrape-honorTimestamp-3", wantAttributes, m3, e3) +} + +func verifyHonorTimeStampsFalse(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + + // m1 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m1)) + + wantAttributes := td.attributes + + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(19), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts1), + compareDoubleValue(100), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts1), + compareDoubleValue(5), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts1), + compareHistogramTimestamp(ts1), + compareHistogram(2500, 5000, []uint64{1000, 500, 500, 500}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts1), + compareSummary(1000, 5000, [][]float64{{0.01, 1}, {0.9, 5}, {0.99, 8}}), + }, + }, + }), + } + doCompare(t, "scrape-honorTimestamp-1", wantAttributes, m1, e1) + + m2 := resourceMetrics[1] + // m2 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m2)) + + metricsScrape2 := m2.ScopeMetrics().At(0).Metrics() + ts2 := getTS(metricsScrape2) + e2 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts2), + compareDoubleValue(18), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts2), + compareTimestamp(ts2), + compareDoubleValue(99), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts2), + compareTimestamp(ts2), + compareDoubleValue(3), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts2), + compareHistogramTimestamp(ts2), + compareHistogram(2400, 4950, []uint64{900, 500, 500, 500}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts2), + compareSummaryTimestamp(ts2), + compareSummary(900, 4980, [][]float64{{0.01, 1}, {0.9, 6}, {0.99, 8}}), + }, + }, + }), + } + doCompare(t, "scrape-honorTimestamp-2", wantAttributes, m2, e2) +} diff --git a/collector/receiver/prometheusreceiver/metrics_receiver_labels_test.go b/collector/receiver/prometheusreceiver/metrics_receiver_labels_test.go new file mode 100644 index 0000000..59e84a7 --- /dev/null +++ b/collector/receiver/prometheusreceiver/metrics_receiver_labels_test.go @@ -0,0 +1,733 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "testing" + + promcfg "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +const targetExternalLabels = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 19` + +func TestExternalLabels(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: targetExternalLabels}, + }, + validateFunc: verifyExternalLabels, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry(), func(cfg *promcfg.Config) { + cfg.GlobalConfig.ExternalLabels = labels.FromStrings("key", "value") + }) +} + +func verifyExternalLabels(t *testing.T, td *testData, rms []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, rms) + require.Greater(t, len(rms), 0, "At least one resource metric should be present") + + wantAttributes := td.attributes + metrics1 := rms[0].ScopeMetrics().At(0).Metrics() + ts1 := metrics1.At(0).Gauge().DataPoints().At(0).Timestamp() + doCompare(t, "scrape-externalLabels", wantAttributes, rms[0], []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(19), + compareAttributes(map[string]string{"key": "value"}), + }, + }, + }), + }) +} + +const targetLabelLimit1 = ` +# HELP test_gauge0 This is my gauge +# TYPE test_gauge0 gauge +test_gauge0{label1="value1",label2="value2"} 10 +` + +func verifyLabelLimitTarget1(t *testing.T, td *testData, rms []pmetric.ResourceMetrics) { + // each sample in the scraped metrics is within the configured label_limit, scrape should be successful + verifyNumValidScrapeResults(t, td, rms) + require.Greater(t, len(rms), 0, "At least one resource metric should be present") + + want := td.attributes + metrics1 := rms[0].ScopeMetrics().At(0).Metrics() + ts1 := metrics1.At(0).Gauge().DataPoints().At(0).Timestamp() + + doCompare(t, "scrape-labelLimit", want, rms[0], []testExpectation{ + assertMetricPresent("test_gauge0", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(10), + compareAttributes(map[string]string{"label1": "value1", "label2": "value2"}), + }, + }, + }, + ), + }) +} + +const targetLabelLimit2 = ` +# HELP test_gauge0 This is my gauge +# TYPE test_gauge0 gauge +test_gauge0{label1="value1",label2="value2",label3="value3"} 10 +` + +func verifyFailedScrape(t *testing.T, _ *testData, rms []pmetric.ResourceMetrics) { + // Scrape should be unsuccessful since limit is exceeded in target2 + for _, rm := range rms { + metrics := getMetrics(rm) + assertUp(t, 0, metrics) + } +} + +func TestLabelLimitConfig(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: targetLabelLimit1}, + }, + validateFunc: verifyLabelLimitTarget1, + }, + { + name: "target2", + pages: []mockPrometheusResponse{ + {code: 200, data: targetLabelLimit2}, + }, + validateFunc: verifyFailedScrape, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry(), func(cfg *promcfg.Config) { + // set label limit in scrape_config + for _, scrapeCfg := range cfg.ScrapeConfigs { + scrapeCfg.LabelLimit = 5 + } + }) +} + +const targetLabelLimits1 = ` +# HELP test_gauge0 This is my gauge +# TYPE test_gauge0 gauge +test_gauge0{label1="value1",label2="value2"} 10 + +# HELP test_counter0 This is my counter +# TYPE test_counter0 counter +test_counter0{label1="value1",label2="value2"} 1 + +# HELP test_histogram0 This is my histogram +# TYPE test_histogram0 histogram +test_histogram0_bucket{label1="value1",label2="value2",le="0.1"} 1000 +test_histogram0_bucket{label1="value1",label2="value2",le="0.5"} 1500 +test_histogram0_bucket{label1="value1",label2="value2",le="1"} 2000 +test_histogram0_bucket{label1="value1",label2="value2",le="+Inf"} 2500 +test_histogram0_sum{label1="value1",label2="value2"} 5000 +test_histogram0_count{label1="value1",label2="value2"} 2500 + +# HELP test_summary0 This is my summary +# TYPE test_summary0 summary +test_summary0{label1="value1",label2="value2",quantile="0.1"} 1 +test_summary0{label1="value1",label2="value2",quantile="0.5"} 5 +test_summary0{label1="value1",label2="value2",quantile="0.99"} 8 +test_summary0_sum{label1="value1",label2="value2"} 5000 +test_summary0_count{label1="value1",label2="value2"} 1000 +` + +func verifyLabelConfigTarget1(t *testing.T, td *testData, rms []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, rms) + require.Greater(t, len(rms), 0, "At least one resource metric should be present") + + want := td.attributes + metrics1 := rms[0].ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + + e1 := []testExpectation{ + assertMetricPresent("test_counter0", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts1), + compareDoubleValue(1), + compareAttributes(map[string]string{"label1": "value1", "label2": "value2"}), + }, + }, + }), + assertMetricPresent("test_gauge0", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(10), + compareAttributes(map[string]string{"label1": "value1", "label2": "value2"}), + }, + }, + }), + assertMetricPresent("test_histogram0", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts1), + compareHistogramTimestamp(ts1), + compareHistogram(2500, 5000, []uint64{1000, 500, 500, 500}), + compareHistogramAttributes(map[string]string{"label1": "value1", "label2": "value2"}), + }, + }, + }), + assertMetricPresent("test_summary0", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts1), + compareSummary(1000, 5000, [][]float64{{0.1, 1}, {0.5, 5}, {0.99, 8}}), + compareSummaryAttributes(map[string]string{"label1": "value1", "label2": "value2"}), + }, + }, + }), + } + doCompare(t, "scrape-label-config-test", want, rms[0], e1) +} + +const targetLabelNameLimit = ` +# HELP test_gauge0 This is my gauge +# TYPE test_gauge0 gauge +test_gauge0{label1="value1",labelNameExceedingLimit="value2"} 10 + +# HELP test_counter0 This is my counter +# TYPE test_counter0 counter +test_counter0{label1="value1",label2="value2"} 1 +` + +func TestLabelNameLimitConfig(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: targetLabelLimits1}, + }, + validateFunc: verifyLabelConfigTarget1, + }, + { + name: "target2", + pages: []mockPrometheusResponse{ + {code: 200, data: targetLabelNameLimit}, + }, + validateFunc: verifyFailedScrape, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry(), func(cfg *promcfg.Config) { + // set label limit in scrape_config + for _, scrapeCfg := range cfg.ScrapeConfigs { + scrapeCfg.LabelNameLengthLimit = 20 + } + }) +} + +const targetLabelValueLimit = ` +# HELP test_gauge0 This is my gauge +# TYPE test_gauge0 gauge +test_gauge0{label1="value1",label2="label-value-exceeding-limit"} 10 + +# HELP test_counter0 This is my counter +# TYPE test_counter0 counter +test_counter0{label1="value1",label2="value2"} 1 +` + +func TestLabelValueLimitConfig(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: targetLabelLimits1}, + }, + validateFunc: verifyLabelConfigTarget1, + }, + { + name: "target2", + pages: []mockPrometheusResponse{ + {code: 200, data: targetLabelValueLimit}, + }, + validateFunc: verifyFailedScrape, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry(), func(cfg *promcfg.Config) { + // set label name limit in scrape_config + for _, scrapeCfg := range cfg.ScrapeConfigs { + scrapeCfg.LabelValueLengthLimit = 25 + } + }) +} + +// for all metric types, testLabel has empty value +const emptyLabelValuesTarget1 = ` +# HELP test_gauge0 This is my gauge +# TYPE test_gauge0 gauge +test_gauge0{id="1",testLabel=""} 19 + +# HELP test_counter0 This is my counter +# TYPE test_counter0 counter +test_counter0{id="1",testLabel=""} 100 + +# HELP test_histogram0 This is my histogram +# TYPE test_histogram0 histogram +test_histogram0_bucket{id="1",testLabel="",le="0.1"} 1000 +test_histogram0_bucket{id="1",testLabel="",le="0.5"} 1500 +test_histogram0_bucket{id="1",testLabel="",le="1"} 2000 +test_histogram0_bucket{id="1",testLabel="",le="+Inf"} 2500 +test_histogram0_sum{id="1",testLabel=""} 5000 +test_histogram0_count{id="1",testLabel=""} 2500 + +# HELP test_summary0 This is my summary +# TYPE test_summary0 summary +test_summary0{id="1",testLabel="",quantile="0.1"} 1 +test_summary0{id="1",testLabel="",quantile="0.5"} 5 +test_summary0{id="1",testLabel="",quantile="0.99"} 8 +test_summary0_sum{id="1",testLabel=""} 5000 +test_summary0_count{id="1",testLabel=""} 1000 +` + +func verifyEmptyLabelValuesTarget1(t *testing.T, td *testData, rms []pmetric.ResourceMetrics) { + require.Greater(t, len(rms), 0, "At least one resource metric should be present") + + want := td.attributes + metrics1 := rms[0].ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + + e1 := []testExpectation{ + assertMetricPresent("test_gauge0", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(19), + compareAttributes(map[string]string{"id": "1"}), + }, + }, + }), + assertMetricPresent("test_counter0", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(100), + compareAttributes(map[string]string{"id": "1"}), + }, + }, + }), + assertMetricPresent("test_histogram0", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts1), + compareHistogramTimestamp(ts1), + compareHistogram(2500, 5000, []uint64{1000, 500, 500, 500}), + compareHistogramAttributes(map[string]string{"id": "1"}), + }, + }, + }), + assertMetricPresent("test_summary0", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts1), + compareSummary(1000, 5000, [][]float64{{0.1, 1}, {0.5, 5}, {0.99, 8}}), + compareSummaryAttributes(map[string]string{"id": "1"}), + }, + }, + }), + } + doCompare(t, "scrape-empty-label-values-1", want, rms[0], e1) +} + +// target has two time series for both gauge and counter, only one time series has a value for the label "testLabel" +const emptyLabelValuesTarget2 = ` +# HELP test_gauge0 This is my gauge. +# TYPE test_gauge0 gauge +test_gauge0{id="1",testLabel=""} 19 +test_gauge0{id="2",testLabel="foobar"} 2 + +# HELP test_counter0 This is my counter +# TYPE test_counter0 counter +test_counter0{id="1",testLabel=""} 100 +test_counter0{id="2",testLabel="foobar"} 110 +` + +func verifyEmptyLabelValuesTarget2(t *testing.T, td *testData, rms []pmetric.ResourceMetrics) { + require.Greater(t, len(rms), 0, "At least one resource metric should be present") + + want := td.attributes + metrics1 := rms[0].ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + + e1 := []testExpectation{ + assertMetricPresent("test_gauge0", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(19), + compareAttributes(map[string]string{"id": "1"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(2), + compareAttributes(map[string]string{"id": "2", "testLabel": "foobar"}), + }, + }, + }), + assertMetricPresent("test_counter0", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(100), + compareAttributes(map[string]string{"id": "1"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(110), + compareAttributes(map[string]string{"id": "2", "testLabel": "foobar"}), + }, + }, + }), + } + doCompare(t, "scrape-empty-label-values-2", want, rms[0], e1) +} + +func TestEmptyLabelValues(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: emptyLabelValuesTarget1}, + }, + validateFunc: verifyEmptyLabelValuesTarget1, + }, + { + name: "target2", + pages: []mockPrometheusResponse{ + {code: 200, data: emptyLabelValuesTarget2}, + }, + validateFunc: verifyEmptyLabelValuesTarget2, + }, + } + testComponent(t, targets, false, "", featuregate.GlobalRegistry()) +} + +const honorLabelsTarget = ` +# HELP test_gauge0 This is my gauge +# TYPE test_gauge0 gauge +test_gauge0{instance="hostname:8080",job="honor_labels_test",testLabel="value1"} 1 +` + +func verifyHonorLabelsFalse(t *testing.T, td *testData, rms []pmetric.ResourceMetrics) { + want := td.attributes + require.Greater(t, len(rms), 0, "At least one resource metric should be present") + + metrics1 := rms[0].ScopeMetrics().At(0).Metrics() + ts1 := metrics1.At(0).Gauge().DataPoints().At(0).Timestamp() + + doCompare(t, "honor_labels_false", want, rms[0], []testExpectation{ + assertMetricPresent("test_gauge0", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(1), + // job and instance labels must be prefixed with "exported_" + compareAttributes(map[string]string{"exported_job": "honor_labels_test", "exported_instance": "hostname:8080", "testLabel": "value1"}), + }, + }, + }), + }) +} + +// for all scalar metric types there are no labels +const emptyLabelsTarget1 = ` +# HELP test_gauge0 This is my gauge +# TYPE test_gauge0 gauge +test_gauge0 19 + +# HELP test_counter0 This is my counter +# TYPE test_counter0 counter +test_counter0 100 +` + +func verifyEmptyLabelsTarget1(t *testing.T, td *testData, rms []pmetric.ResourceMetrics) { + require.Greater(t, len(rms), 0, "At least one resource metric should be present") + + want := td.attributes + metrics1 := rms[0].ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + + e1 := []testExpectation{ + assertMetricPresent( + "test_gauge0", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(19), + compareAttributes(map[string]string{}), + }, + }, + }, + ), + assertMetricPresent( + "test_counter0", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(100), + compareAttributes(map[string]string{}), + }, + }, + }, + ), + } + doCompare(t, "scrape-empty-labels-1", want, rms[0], e1) +} + +func TestEmptyLabels(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: emptyLabelsTarget1}, + }, + validateFunc: verifyEmptyLabelsTarget1, + }, + } + testComponent(t, targets, false, "", featuregate.GlobalRegistry()) +} + +func TestHonorLabelsFalseConfig(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: honorLabelsTarget}, + }, + validateFunc: verifyHonorLabelsFalse, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry()) +} + +func verifyHonorLabelsTrue(t *testing.T, td *testData, rms []pmetric.ResourceMetrics) { + require.Greater(t, len(rms), 0, "At least one resource metric should be present") + + // job and instance label values should be honored from honorLabelsTarget + expectedAttributes := td.attributes + expectedAttributes.PutStr("service.name", "honor_labels_test") + expectedAttributes.PutStr("service.instance.id", "hostname:8080") + expectedAttributes.PutStr("net.host.port", "8080") + expectedAttributes.PutStr("net.host.name", "hostname") + + metrics1 := rms[0].ScopeMetrics().At(0).Metrics() + ts1 := metrics1.At(0).Gauge().DataPoints().At(0).Timestamp() + + doCompare(t, "honor_labels_true", expectedAttributes, rms[0], []testExpectation{ + assertMetricPresent("test_gauge0", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(1), + compareAttributes(map[string]string{"testLabel": "value1"}), + }, + }, + }), + }) +} + +func TestHonorLabelsTrueConfig(t *testing.T) { + targets := []*testData{ + { + name: "honor_labels_test", + pages: []mockPrometheusResponse{ + {code: 200, data: honorLabelsTarget}, + }, + validateFunc: verifyHonorLabelsTrue, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry(), func(cfg *promcfg.Config) { + // set label name limit in scrape_config + for _, scrapeCfg := range cfg.ScrapeConfigs { + scrapeCfg.HonorLabels = true + } + }) +} + +const targetRelabelJobInstance = ` +# HELP jvm_memory_bytes_used Used bytes of a given JVM memory area. +# TYPE jvm_memory_bytes_used gauge +jvm_memory_bytes_used{area="heap"} 100 +` + +func TestRelabelJobInstance(t *testing.T) { + targets := []*testData{ + { + name: "target1", + relabeledJob: "not-target1", + pages: []mockPrometheusResponse{ + {code: 200, data: targetRelabelJobInstance}, + }, + validateFunc: verifyRelabelJobInstance, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry(), func(cfg *promcfg.Config) { + for _, scrapeConfig := range cfg.ScrapeConfigs { + scrapeConfig.MetricRelabelConfigs = []*relabel.Config{ + { + // this config should replace the instance label with 'relabeled-instance' + Action: relabel.Replace, + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "instance", + Replacement: "relabeled-instance", + }, + { + // this config should replace the job label with 'not-target1' + Action: relabel.Replace, + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "job", + Replacement: "not-target1", + }, + } + } + }) +} + +func verifyRelabelJobInstance(t *testing.T, td *testData, rms []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, rms) + require.Greater(t, len(rms), 0, "At least one resource metric should be present") + + wantAttributes := td.attributes + wantAttributes.PutStr("service.name", "not-target1") + wantAttributes.PutStr("service.instance.id", "relabeled-instance") + wantAttributes.PutStr("net.host.port", "") + wantAttributes.PutStr("net.host.name", "relabeled-instance") + + metrics1 := rms[0].ScopeMetrics().At(0).Metrics() + ts1 := metrics1.At(0).Gauge().DataPoints().At(0).Timestamp() + doCompare(t, "relabel-job-instance", wantAttributes, rms[0], []testExpectation{ + assertMetricPresent("jvm_memory_bytes_used", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(100), + compareAttributes(map[string]string{"area": "heap"}), + }, + }, + }), + }) +} + +const targetResourceAttsInTargetInfo = ` +# HELP jvm_memory_bytes_used Used bytes of a given JVM memory area. +# TYPE jvm_memory_bytes_used gauge +jvm_memory_bytes_used{area="heap"} 100 +# HELP target_info has the resource attributes +# TYPE target_info gauge +target_info{foo="bar", team="infra"} 1 +` + +func TestTargetInfoResourceAttributes(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: targetResourceAttsInTargetInfo}, + }, + validateFunc: verifyTargetInfoResourceAttributes, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry()) +} + +func verifyTargetInfoResourceAttributes(t *testing.T, td *testData, rms []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, rms) + require.Greater(t, len(rms), 0, "At least one resource metric should be present") + + wantAttributes := td.attributes + wantAttributes.PutStr("foo", "bar") + wantAttributes.PutStr("team", "infra") + + metrics1 := rms[0].ScopeMetrics().At(0).Metrics() + ts1 := metrics1.At(0).Gauge().DataPoints().At(0).Timestamp() + doCompare(t, "relabel-job-instance", wantAttributes, rms[0], []testExpectation{ + assertMetricPresent("jvm_memory_bytes_used", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(100), + compareAttributes(map[string]string{"area": "heap"}), + }, + }, + }), + }) +} diff --git a/collector/receiver/prometheusreceiver/metrics_receiver_metric_name_normalize_test.go b/collector/receiver/prometheusreceiver/metrics_receiver_metric_name_normalize_test.go new file mode 100644 index 0000000..255834c --- /dev/null +++ b/collector/receiver/prometheusreceiver/metrics_receiver_metric_name_normalize_test.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +var normalizeMetric = `# HELP http_connected connected clients +# TYPE http_connected counter +http_connected_total{method="post",port="6380"} 15 +http_connected_total{method="get",port="6380"} 12 +# HELP foo_gauge_total foo gauge with _total suffix +# TYPE foo_gauge_total gauge +foo_gauge_total{method="post",port="6380"} 7 +foo_gauge_total{method="get",port="6380"} 13 +# HELP http_connection_duration_seconds connection duration total +# TYPE http_connection_duration_seconds counter +# UNIT http_connection_duration_seconds seconds +http_connection_duration_seconds_total{method="post",port="6380"} 23 +http_connection_duration_seconds_total{method="get",port="6380"} 41 +# HELP foo_gauge_seconds foo gauge with unit suffix +# UNIT foo_gauge_seconds seconds +# TYPE foo_gauge_seconds gauge +foo_gauge_seconds{method="post",port="6380"} 732 +foo_gauge_seconds{method="get",port="6380"} 5 +# EOF +` + +// TestMetricNormalize validates that type's and unit's suffixes are correctly trimmed. +func TestMetricNormalize(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: normalizeMetric, useOpenMetrics: true}, + }, + normalizedName: true, + validateFunc: verifyNormalizeMetric, + }, + } + + registry := featuregate.NewRegistry() + _, err := registry.Register("pkg.translator.prometheus.NormalizeName", featuregate.StageBeta) + require.NoError(t, err) + + testComponent(t, targets, false, "", registry) +} + +func verifyNormalizeMetric(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + + // m1 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m1)) + + wantAttributes := td.attributes + + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("http_connected", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(15), + compareAttributes(map[string]string{"method": "post", "port": "6380"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(12), + compareAttributes(map[string]string{"method": "get", "port": "6380"}), + }, + }, + }), + assertMetricPresent("foo_gauge_total", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(7), + compareAttributes(map[string]string{"method": "post", "port": "6380"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(13), + compareAttributes(map[string]string{"method": "get", "port": "6380"}), + }, + }, + }), + assertMetricPresent("http_connection_duration", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(23), + compareAttributes(map[string]string{"method": "post", "port": "6380"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(41), + compareAttributes(map[string]string{"method": "get", "port": "6380"}), + }, + }, + }), + assertMetricPresent("foo_gauge", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(732), + compareAttributes(map[string]string{"method": "post", "port": "6380"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(5), + compareAttributes(map[string]string{"method": "get", "port": "6380"}), + }, + }, + }), + } + doCompareNormalized(t, "scrape-metricNormalize-1", wantAttributes, m1, e1, true) +} diff --git a/collector/receiver/prometheusreceiver/metrics_receiver_non_numerical_test.go b/collector/receiver/prometheusreceiver/metrics_receiver_non_numerical_test.go new file mode 100644 index 0000000..a0b45f3 --- /dev/null +++ b/collector/receiver/prometheusreceiver/metrics_receiver_non_numerical_test.go @@ -0,0 +1,406 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "fmt" + "math" + "testing" + + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" + + "github.com/prometheus/prometheus/model/value" + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +var staleNaNsPage1 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 19 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 100 +http_requests_total{method="post",code="400"} 5 + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 1000 +http_request_duration_seconds_bucket{le="0.5"} 1500 +http_request_duration_seconds_bucket{le="1"} 2000 +http_request_duration_seconds_bucket{le="+Inf"} 2500 +http_request_duration_seconds_sum 5000 +http_request_duration_seconds_count 2500 + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 +rpc_duration_seconds{quantile="0.9"} 5 +rpc_duration_seconds{quantile="0.99"} 8 +rpc_duration_seconds_sum 5000 +rpc_duration_seconds_count 1000 +` + +var ( + totalScrapes = 10 +) + +// TestStaleNaNs validates that staleness marker gets generated when the timeseries is no longer present +func TestStaleNaNs(t *testing.T) { + var mockResponses []mockPrometheusResponse + for i := 0; i < totalScrapes; i++ { + if i%2 == 0 { + mockResponses = append(mockResponses, mockPrometheusResponse{ + code: 200, + data: staleNaNsPage1, + }) + } else { + mockResponses = append(mockResponses, mockPrometheusResponse{ + code: 500, + data: "", + }) + } + } + targets := []*testData{ + { + name: "target1", + pages: mockResponses, + validateFunc: verifyStaleNaNs, + validateScrapes: true, + }, + } + testComponent(t, targets, false, "", featuregate.GlobalRegistry()) +} + +func verifyStaleNaNs(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumTotalScrapeResults(t, td, resourceMetrics) + metrics1 := resourceMetrics[0].ScopeMetrics().At(0).Metrics() + ts := getTS(metrics1) + for i := 0; i < totalScrapes; i++ { + if i%2 == 0 { + verifyStaleNaNsSuccessfulScrape(t, td, resourceMetrics[i], ts, i+1) + } else { + verifyStaleNaNsFailedScrape(t, td, resourceMetrics[i], ts, i+1) + } + } +} + +func verifyStaleNaNsSuccessfulScrape(t *testing.T, td *testData, resourceMetric pmetric.ResourceMetrics, startTimestamp pcommon.Timestamp, iteration int) { + // m1 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(resourceMetric)) + wantAttributes := td.attributes // should want attribute be part of complete target or each scrape? + metrics1 := resourceMetric.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(19), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(startTimestamp), + compareTimestamp(ts1), + compareDoubleValue(100), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(startTimestamp), + compareTimestamp(ts1), + compareDoubleValue(5), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(startTimestamp), + compareHistogramTimestamp(ts1), + compareHistogram(2500, 5000, []uint64{1000, 500, 500, 500}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(startTimestamp), + compareSummaryTimestamp(ts1), + compareSummary(1000, 5000, [][]float64{{0.01, 1}, {0.9, 5}, {0.99, 8}}), + }, + }, + }), + } + doCompare(t, fmt.Sprintf("validScrape-scrape-%d", iteration), wantAttributes, resourceMetric, e1) +} + +func verifyStaleNaNsFailedScrape(t *testing.T, td *testData, resourceMetric pmetric.ResourceMetrics, startTimestamp pcommon.Timestamp, iteration int) { + // m1 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(resourceMetric)) + wantAttributes := td.attributes + allMetrics := getMetrics(resourceMetric) + assertUp(t, 0, allMetrics) + + metrics1 := resourceMetric.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + assertNumberPointFlagNoRecordedValue(), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(startTimestamp), + compareTimestamp(ts1), + assertNumberPointFlagNoRecordedValue(), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(startTimestamp), + compareTimestamp(ts1), + assertNumberPointFlagNoRecordedValue(), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(startTimestamp), + compareHistogramTimestamp(ts1), + assertHistogramPointFlagNoRecordedValue(), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(startTimestamp), + compareSummaryTimestamp(ts1), + assertSummaryPointFlagNoRecordedValue(), + }, + }, + }), + } + doCompare(t, fmt.Sprintf("failedScrape-scrape-%d", iteration), wantAttributes, resourceMetric, e1) +} + +// Prometheus gauge metric can be set to NaN, a use case could be when value 0 is not representable +// Prometheus summary metric quantiles can have NaN after getting expired +var normalNaNsPage1 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads NaN + +# HELP redis_connected_clients Redis connected clients +redis_connected_clients{name="rough-snowflake-web",port="6380"} NaN + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} NaN +rpc_duration_seconds{quantile="0.9"} NaN +rpc_duration_seconds{quantile="0.99"} NaN +rpc_duration_seconds_sum 5000 +rpc_duration_seconds_count 1000 +` + +// TestNormalNaNs validates the output of receiver when testdata contains NaN values +func TestNormalNaNs(t *testing.T) { + // 1. setup input data + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: normalNaNsPage1}, + }, + validateFunc: verifyNormalNaNs, + }, + } + testComponent(t, targets, false, "", featuregate.GlobalRegistry()) +} + +func verifyNormalNaNs(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + + // m1 has 3 metrics + 5 internal scraper metrics + assert.Equal(t, 8, metricsCount(m1)) + + wantAttributes := td.attributes + + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + assertNormalNan(), + }, + }, + }), + assertMetricPresent("redis_connected_clients", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareAttributes(map[string]string{"name": "rough-snowflake-web", "port": "6380", internal.GCPOpsAgentUntypedMetricKey: "true"}), + assertNormalNan(), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts1), + compareSummary(1000, 5000, [][]float64{{0.01, math.Float64frombits(value.NormalNaN)}, + {0.9, math.Float64frombits(value.NormalNaN)}, {0.99, math.Float64frombits(value.NormalNaN)}}), + }, + }, + }), + } + doCompare(t, "scrape-NormalNaN-1", wantAttributes, m1, e1) +} + +var infPage1 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads +Inf + +# HELP redis_connected_clients Redis connected clients +redis_connected_clients{name="rough-snowflake-web",port="6380"} -Inf + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} +Inf + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} +Inf +rpc_duration_seconds{quantile="0.9"} +Inf +rpc_duration_seconds{quantile="0.99"} +Inf +rpc_duration_seconds_sum 5000 +rpc_duration_seconds_count 1000 +` + +func TestInfValues(t *testing.T) { + // 1. setup input data + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: infPage1}, + }, + validateFunc: verifyInfValues, + }, + } + testComponent(t, targets, false, "", featuregate.GlobalRegistry()) +} + +func verifyInfValues(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + + // m1 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m1)) + + wantAttributes := td.attributes + + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(math.Inf(1)), + }, + }, + }), + assertMetricPresent("redis_connected_clients", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareAttributes(map[string]string{"name": "rough-snowflake-web", "port": "6380", internal.GCPOpsAgentUntypedMetricKey: "true"}), + compareDoubleValue(math.Inf(-1)), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts1), + compareDoubleValue(math.Inf(1)), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts1), + compareSummary(1000, 5000, [][]float64{{0.01, math.Inf(1)}, {0.9, math.Inf(1)}, {0.99, math.Inf(1)}}), + }, + }, + }), + } + doCompare(t, "scrape-InfValues-1", wantAttributes, m1, e1) +} diff --git a/collector/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go b/collector/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go new file mode 100644 index 0000000..498d7dd --- /dev/null +++ b/collector/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go @@ -0,0 +1,270 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "log" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +const testDir = "./testdata/openmetrics/" + +var skippedTests = map[string]struct{}{ + "bad_clashing_names_0": {}, "bad_clashing_names_1": {}, "bad_clashing_names_2": {}, + "bad_counter_values_0": {}, "bad_counter_values_1": {}, "bad_counter_values_2": {}, + "bad_counter_values_3": {}, "bad_counter_values_5": {}, "bad_counter_values_6": {}, + "bad_counter_values_10": {}, "bad_counter_values_11": {}, "bad_counter_values_12": {}, + "bad_counter_values_13": {}, "bad_counter_values_14": {}, "bad_counter_values_15": {}, + "bad_counter_values_16": {}, "bad_counter_values_17": {}, "bad_counter_values_18": {}, + "bad_counter_values_19": {}, "bad_exemplars_on_unallowed_samples_2": {}, "bad_exemplar_timestamp_0": {}, + "bad_exemplar_timestamp_1": {}, "bad_exemplar_timestamp_2": {}, "bad_grouping_or_ordering_0": {}, + "bad_grouping_or_ordering_2": {}, "bad_grouping_or_ordering_3": {}, "bad_grouping_or_ordering_4": {}, + "bad_grouping_or_ordering_5": {}, "bad_grouping_or_ordering_6": {}, "bad_grouping_or_ordering_7": {}, + "bad_grouping_or_ordering_8": {}, "bad_grouping_or_ordering_9": {}, "bad_grouping_or_ordering_10": {}, + "bad_histograms_0": {}, "bad_histograms_1": {}, "bad_histograms_2": {}, "bad_histograms_3": {}, + "bad_histograms_6": {}, "bad_histograms_7": {}, "bad_histograms_8": {}, + "bad_info_and_stateset_values_0": {}, "bad_info_and_stateset_values_1": {}, "bad_metadata_in_wrong_place_0": {}, + "bad_metadata_in_wrong_place_1": {}, "bad_metadata_in_wrong_place_2": {}, + "bad_missing_or_invalid_labels_for_a_type_1": {}, "bad_missing_or_invalid_labels_for_a_type_3": {}, + "bad_missing_or_invalid_labels_for_a_type_4": {}, "bad_missing_or_invalid_labels_for_a_type_6": {}, + "bad_missing_or_invalid_labels_for_a_type_7": {}, "bad_repeated_metadata_0": {}, + "bad_repeated_metadata_1": {}, "bad_repeated_metadata_3": {}, "bad_stateset_info_values_0": {}, + "bad_stateset_info_values_1": {}, "bad_stateset_info_values_2": {}, "bad_stateset_info_values_3": {}, + "bad_timestamp_4": {}, "bad_timestamp_5": {}, "bad_timestamp_7": {}, "bad_unit_6": {}, "bad_unit_7": {}, + "bad_exemplars_on_unallowed_samples_0": {}, "bad_exemplars_on_unallowed_metric_types_0": {}, + "bad_exemplars_on_unallowed_samples_1": {}, "bad_exemplars_on_unallowed_metric_types_1": {}, + "bad_exemplars_on_unallowed_samples_3": {}, "bad_exemplars_on_unallowed_metric_types_2": {}, +} + +func verifyPositiveTarget(t *testing.T, _ *testData, mds []pmetric.ResourceMetrics) { + require.Greater(t, len(mds), 0, "At least one resource metric should be present") + metrics := getMetrics(mds[0]) + assertUp(t, 1, metrics) +} + +// Test open metrics positive test cases +func TestOpenMetricsPositive(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping test on windows, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/10148") + } + targetsMap := getOpenMetricsTestData(false) + var targets []*testData + for k, v := range targetsMap { + testData := &testData{ + name: k, + pages: []mockPrometheusResponse{ + {code: 200, data: v, useOpenMetrics: true}, + }, + validateFunc: verifyPositiveTarget, + validateScrapes: true, + } + targets = append(targets, testData) + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry()) +} + +func verifyNegativeTarget(t *testing.T, td *testData, mds []pmetric.ResourceMetrics) { + // failing negative tests are skipped since prometheus scrape package is currently not fully + // compatible with OpenMetrics tests and successfully scrapes some invalid metrics + // see: https://github.com/prometheus/prometheus/issues/9699 + if _, ok := skippedTests[td.name]; ok { + t.Skip("skipping failing negative OpenMetrics parser tests") + } + + require.Greater(t, len(mds), 0, "At least one resource metric should be present") + metrics := getMetrics(mds[0]) + assertUp(t, 0, metrics) +} + +// Test open metrics negative test cases +func TestOpenMetricsNegative(t *testing.T) { + + targetsMap := getOpenMetricsTestData(true) + var targets []*testData + for k, v := range targetsMap { + testData := &testData{ + name: k, + pages: []mockPrometheusResponse{ + {code: 200, data: v, useOpenMetrics: true}, + }, + validateFunc: verifyNegativeTarget, + validateScrapes: true, + } + targets = append(targets, testData) + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry()) +} + +// reads test data from testdata/openmetrics directory +func getOpenMetricsTestData(negativeTestsOnly bool) map[string]string { + testDir, err := os.Open(testDir) + if err != nil { + log.Fatalf("failed opening openmetrics test directory") + } + defer testDir.Close() + + // read all test file names in testdata/openmetrics + testList, _ := testDir.Readdirnames(0) + + targetsData := make(map[string]string) + for _, testName := range testList { + // ignore hidden files + if strings.HasPrefix(testName, ".") { + continue + } + if negativeTestsOnly && !strings.Contains(testName, "bad") { + continue + } else if !negativeTestsOnly && strings.Contains(testName, "bad") { + continue + } + if testData, err := readTestCase(testName); err == nil { + targetsData[testName] = testData + } + } + return targetsData +} + +func readTestCase(testName string) (string, error) { + filePath := filepath.Join(testDir, testName, "metrics") + content, err := os.ReadFile(filePath) + if err != nil { + log.Printf("failed opening file: %s", filePath) + return "", err + } + return string(content), nil +} + +// info and stateset metrics are converted to non-monotonic sums +var infoAndStatesetMetrics = `# TYPE foo info +foo_info{entity="controller",name="prettyname",version="8.2.7"} 1.0 +foo_info{entity="replica",name="prettiername",version="8.1.9"} 1.0 +# TYPE bar stateset +bar{entity="controller",foo="a"} 1.0 +bar{entity="controller",foo="bb"} 0.0 +bar{entity="controller",foo="ccc"} 0.0 +bar{entity="replica",foo="a"} 1.0 +bar{entity="replica",foo="bb"} 0.0 +bar{entity="replica",foo="ccc"} 1.0 +# EOF +` + +// TestInfoStatesetMetrics validates the translation of info and stateset +// metrics +func TestInfoStatesetMetrics(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: infoAndStatesetMetrics, useOpenMetrics: true}, + }, + validateFunc: verifyInfoStatesetMetrics, + validateScrapes: true, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry()) + +} + +func verifyInfoStatesetMetrics(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + + // m1 has 2 metrics + 5 internal scraper metrics + assert.Equal(t, 7, metricsCount(m1)) + + wantAttributes := td.attributes + + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("foo", + compareMetricIsMonotonic(false), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(1.0), + compareAttributes(map[string]string{"entity": "controller", "name": "prettyname", "version": "8.2.7"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(1.0), + compareAttributes(map[string]string{"entity": "replica", "name": "prettiername", "version": "8.1.9"}), + }, + }, + }), + assertMetricPresent("bar", + compareMetricIsMonotonic(false), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(1.0), + compareAttributes(map[string]string{"entity": "controller", "foo": "a"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(0.0), + compareAttributes(map[string]string{"entity": "controller", "foo": "bb"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(0.0), + compareAttributes(map[string]string{"entity": "controller", "foo": "ccc"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(1.0), + compareAttributes(map[string]string{"entity": "replica", "foo": "a"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(0.0), + compareAttributes(map[string]string{"entity": "replica", "foo": "bb"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(1.0), + compareAttributes(map[string]string{"entity": "replica", "foo": "ccc"}), + }, + }, + }), + } + doCompare(t, "scrape-infostatesetmetrics-1", wantAttributes, m1, e1) +} diff --git a/collector/receiver/prometheusreceiver/metrics_receiver_target_allocator_test.go b/collector/receiver/prometheusreceiver/metrics_receiver_target_allocator_test.go new file mode 100644 index 0000000..5cc7a77 --- /dev/null +++ b/collector/receiver/prometheusreceiver/metrics_receiver_target_allocator_test.go @@ -0,0 +1,554 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !race + +package prometheusreceiver + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + commonconfig "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + promConfig "github.com/prometheus/prometheus/config" + promHTTP "github.com/prometheus/prometheus/discovery/http" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/receiver/receivertest" +) + +type MockTargetAllocator struct { + mu sync.Mutex // mu protects the fields below. + endpoints map[string][]mockTargetAllocatorResponse + accessIndex map[string]*atomic.Int32 + wg *sync.WaitGroup + srv *httptest.Server + waitIndex map[string]int +} + +type mockTargetAllocatorResponse struct { + code int + data []byte +} + +type mockTargetAllocatorResponseRaw struct { + code int + data interface{} +} + +type hTTPSDResponse struct { + Targets []string `json:"targets"` + Labels map[model.LabelName]model.LabelValue `json:"labels"` +} + +type expectedTestResultJobMap struct { + Targets []string + Labels model.LabelSet +} + +type expectedTestResult struct { + empty bool + jobMap map[string]expectedTestResultJobMap +} + +func (mta *MockTargetAllocator) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + mta.mu.Lock() + defer mta.mu.Unlock() + + iptr, ok := mta.accessIndex[req.URL.Path] + if !ok { + rw.WriteHeader(404) + return + } + index := int(iptr.Load()) + iptr.Add(1) + pages := mta.endpoints[req.URL.Path] + if index >= len(pages) { + rw.WriteHeader(404) + return + } + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(pages[index].code) + _, _ = rw.Write(pages[index].data) + + // release WaitGroup after all endpoints have been hit by Prometheus SD once. After that we will call them manually + wait := mta.waitIndex[req.URL.Path] + if index == wait { + mta.wg.Done() + } +} + +func (mta *MockTargetAllocator) Start() { + mta.srv.Start() +} + +func (mta *MockTargetAllocator) Stop() { + mta.srv.Close() +} + +func transformTAResponseMap(rawResponses map[string][]mockTargetAllocatorResponseRaw) (map[string][]mockTargetAllocatorResponse, map[string]*atomic.Int32, error) { + responsesMap := make(map[string][]mockTargetAllocatorResponse) + responsesIndexMap := make(map[string]*atomic.Int32) + for path, responsesRaw := range rawResponses { + var responses []mockTargetAllocatorResponse + for _, responseRaw := range responsesRaw { + respBodyBytes, err := json.Marshal(responseRaw.data) + if err != nil { + return nil, nil, err + } + responses = append(responses, mockTargetAllocatorResponse{ + code: responseRaw.code, + data: respBodyBytes, + }) + } + responsesMap[path] = responses + + v := &atomic.Int32{} + responsesIndexMap[path] = v + } + return responsesMap, responsesIndexMap, nil +} + +func setupMockTargetAllocator(responses Responses) (*MockTargetAllocator, error) { + responsesMap, responsesIndexMap, err := transformTAResponseMap(responses.responses) + if err != nil { + return nil, err + } + + mockTA := &MockTargetAllocator{ + endpoints: responsesMap, + accessIndex: responsesIndexMap, + waitIndex: responses.releaserMap, + wg: &sync.WaitGroup{}, + } + mockTA.srv = httptest.NewUnstartedServer(mockTA) + mockTA.wg.Add(len(responsesMap)) + + return mockTA, nil +} + +func labelSetTargetsToList(sets []model.LabelSet) []string { + var result []string + for _, set := range sets { + address := set["__address__"] + result = append(result, string(address)) + } + return result +} + +type Responses struct { + releaserMap map[string]int + responses map[string][]mockTargetAllocatorResponseRaw +} + +func TestTargetAllocatorJobRetrieval(t *testing.T) { + for _, tc := range []struct { + desc string + responses Responses + cfg *Config + want expectedTestResult + }{ + { + desc: "default", + responses: Responses{ + responses: map[string][]mockTargetAllocatorResponseRaw{ + "/scrape_configs": { + mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]interface{}{ + "job1": { + "job_name": "job1", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + "job2": { + "job_name": "job2", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + }}, + }, + "/jobs/job1/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + {Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }}, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + {Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }}, + }}, + }, + "/jobs/job2/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + {Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }}, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + {Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }}, + }}, + }, + }, + }, + cfg: &Config{ + PrometheusConfig: &promConfig.Config{}, + TargetAllocator: &targetAllocator{ + Interval: 10 * time.Second, + CollectorID: "collector-1", + HTTPSDConfig: &promHTTP.SDConfig{ + HTTPClientConfig: commonconfig.HTTPClientConfig{ + BasicAuth: &commonconfig.BasicAuth{ + Username: "user", + Password: "aPassword", + }, + }, + RefreshInterval: model.Duration(60 * time.Second), + }, + }, + }, + want: expectedTestResult{ + empty: false, + jobMap: map[string]expectedTestResultJobMap{ + "job1": { + Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + }, + "job2": {Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }}, + }, + }, + }, + { + desc: "update labels and targets", + responses: Responses{ + responses: map[string][]mockTargetAllocatorResponseRaw{ + "/scrape_configs": { + mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]interface{}{ + "job1": { + "job_name": "job1", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + "job2": { + "job_name": "job2", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + }}, + }, + "/jobs/job1/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + {Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }}, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + {Targets: []string{"localhost:9090"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + "test": "aTest", + }}, + }}, + }, + "/jobs/job2/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + {Targets: []string{"10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }}, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + {Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + }}, + }}, + }, + }, + }, + cfg: &Config{ + PrometheusConfig: &promConfig.Config{}, + TargetAllocator: &targetAllocator{ + Interval: 10 * time.Second, + CollectorID: "collector-1", + HTTPSDConfig: &promHTTP.SDConfig{ + HTTPClientConfig: commonconfig.HTTPClientConfig{}, + RefreshInterval: model.Duration(60 * time.Second), + }, + }, + }, + want: expectedTestResult{ + empty: false, + jobMap: map[string]expectedTestResultJobMap{ + "job1": { + Targets: []string{"localhost:9090"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + "test": "aTest", + }, + }, + "job2": {Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + }}, + }, + }, + }, + { + desc: "update job list", + responses: Responses{ + releaserMap: map[string]int{ + "/scrape_configs": 1, + }, + responses: map[string][]mockTargetAllocatorResponseRaw{ + "/scrape_configs": { + mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]interface{}{ + "job1": { + "job_name": "job1", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + "job2": { + "job_name": "job2", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]interface{}{ + "job1": { + "job_name": "job1", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + "job3": { + "job_name": "job3", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + }}, + }, + "/jobs/job1/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + {Targets: []string{"localhost:9090"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }}, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + {Targets: []string{"localhost:9090"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }}, + }}, + }, + "/jobs/job3/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + {Targets: []string{"10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }}, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + {Targets: []string{"10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }}, + }}, + }, + }, + }, + cfg: &Config{ + PrometheusConfig: &promConfig.Config{}, + TargetAllocator: &targetAllocator{ + Interval: 10 * time.Second, + CollectorID: "collector-1", + HTTPSDConfig: &promHTTP.SDConfig{ + HTTPClientConfig: commonconfig.HTTPClientConfig{}, + RefreshInterval: model.Duration(60 * time.Second), + }, + }, + }, + want: expectedTestResult{ + empty: false, + jobMap: map[string]expectedTestResultJobMap{ + "job1": { + Targets: []string{"localhost:9090"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + }, + "job3": {Targets: []string{"10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }}, + }, + }, + }, + { + desc: "endpoint is not reachable", + responses: Responses{ + releaserMap: map[string]int{ + "/scrape_configs": 1, // we are too fast if we ignore the first wait a tick + }, + responses: map[string][]mockTargetAllocatorResponseRaw{ + "/scrape_configs": { + mockTargetAllocatorResponseRaw{code: 404, data: map[string]map[string]interface{}{}}, + mockTargetAllocatorResponseRaw{code: 404, data: map[string]map[string]interface{}{}}, + }, + }, + }, + cfg: &Config{ + PrometheusConfig: &promConfig.Config{}, + TargetAllocator: &targetAllocator{ + Interval: 50 * time.Millisecond, + CollectorID: "collector-1", + HTTPSDConfig: &promHTTP.SDConfig{ + HTTPClientConfig: commonconfig.HTTPClientConfig{}, + RefreshInterval: model.Duration(60 * time.Second), + }, + }, + }, + want: expectedTestResult{ + empty: true, + jobMap: map[string]expectedTestResultJobMap{}, + }, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + ctx := context.Background() + cms := new(consumertest.MetricsSink) + + allocator, err := setupMockTargetAllocator(tc.responses) + require.NoError(t, err, "Failed to create allocator", tc.responses) + + allocator.Start() + defer allocator.Stop() + + tc.cfg.TargetAllocator.Endpoint = allocator.srv.URL // set service URL with the automatic generated one + receiver := newPrometheusReceiver(receivertest.NewNopCreateSettings(), tc.cfg, cms, featuregate.GlobalRegistry()) + + require.NoError(t, receiver.Start(ctx, componenttest.NewNopHost())) + + allocator.wg.Wait() + + providers := receiver.discoveryManager.Providers() + if tc.want.empty { + // if no base config is supplied and the job retrieval fails then no configuration should be found + require.Len(t, providers, 0) + return + } + + require.NotNil(t, providers) + + for _, provider := range providers { + require.IsType(t, &promHTTP.Discovery{}, provider.Discoverer()) + httpDiscovery := provider.Discoverer().(*promHTTP.Discovery) + refresh, err := httpDiscovery.Refresh(ctx) + require.NoError(t, err) + + // are http configs applied? + sdConfig := provider.Config().(*promHTTP.SDConfig) + require.Equal(t, tc.cfg.TargetAllocator.HTTPSDConfig.HTTPClientConfig, sdConfig.HTTPClientConfig) + + for _, group := range refresh { + found := false + for job, s := range tc.want.jobMap { + // find correct job to compare to. + if !strings.Contains(group.Source, job) { + continue + } + // compare targets + require.Equal(t, s.Targets, labelSetTargetsToList(group.Targets)) + + // compare labels and add __meta_url as this label gets automatically added by the SD. + // which is identical to the source url + s.Labels["__meta_url"] = model.LabelValue(sdConfig.URL) + require.Equal(t, s.Labels, group.Labels) + found = true + } + require.True(t, found, "Returned job is not defined in expected values", group) + } + } + }) + } +} diff --git a/collector/receiver/prometheusreceiver/metrics_receiver_test.go b/collector/receiver/prometheusreceiver/metrics_receiver_test.go new file mode 100644 index 0000000..942cf63 --- /dev/null +++ b/collector/receiver/prometheusreceiver/metrics_receiver_test.go @@ -0,0 +1,1539 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "testing" + "time" + + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" + + "github.com/prometheus/common/model" + promConfig "github.com/prometheus/prometheus/config" + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pdata/pmetric" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// Test data and validation functions for all four core metrics for Prometheus Receiver. +// Make sure every page has a gauge, we are relying on it to figure out the start time if needed + +// target1 has one gauge, two counts of a same family, one histogram and one summary. We are expecting the both +// successful scrapes will produce all metrics using the first scrape's timestamp as start time. +var target1Page1 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 19 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 100 +http_requests_total{method="post",code="400"} 5 + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 1000 +http_request_duration_seconds_bucket{le="0.5"} 1500 +http_request_duration_seconds_bucket{le="1"} 2000 +http_request_duration_seconds_bucket{le="+Inf"} 2500 +http_request_duration_seconds_sum 5000 +http_request_duration_seconds_count 2500 + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 +rpc_duration_seconds{quantile="0.9"} 5 +rpc_duration_seconds{quantile="0.99"} 8 +rpc_duration_seconds_sum 5000 +rpc_duration_seconds_count 1000 +` + +var target1Page2 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 18 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 199 +http_requests_total{method="post",code="400"} 12 + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 1100 +http_request_duration_seconds_bucket{le="0.5"} 1600 +http_request_duration_seconds_bucket{le="1"} 2100 +http_request_duration_seconds_bucket{le="+Inf"} 2600 +http_request_duration_seconds_sum 5050 +http_request_duration_seconds_count 2600 + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 +rpc_duration_seconds{quantile="0.9"} 6 +rpc_duration_seconds{quantile="0.99"} 8 +rpc_duration_seconds_sum 5002 +rpc_duration_seconds_count 1001 +` + +// target1Page3 has lower values than previous scrape. +// So, even after seeing a failed scrape, start_timestamp should be reset for target1Page3 +var target1Page3 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 16 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 99 +http_requests_total{method="post",code="400"} 3 + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 900 +http_request_duration_seconds_bucket{le="0.5"} 1400 +http_request_duration_seconds_bucket{le="1"} 1900 +http_request_duration_seconds_bucket{le="+Inf"} 2400 +http_request_duration_seconds_sum 4900 +http_request_duration_seconds_count 2400 + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 +rpc_duration_seconds{quantile="0.9"} 4 +rpc_duration_seconds{quantile="0.99"} 6 +rpc_duration_seconds_sum 4900 +rpc_duration_seconds_count 900 +` + +func verifyTarget1(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + + // m1 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m1)) + + wantAttributes := td.attributes + + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(19), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts1), + compareDoubleValue(100), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts1), + compareDoubleValue(5), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts1), + compareHistogramTimestamp(ts1), + compareHistogram(2500, 5000, []uint64{1000, 500, 500, 500}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts1), + compareSummary(1000, 5000, [][]float64{{0.01, 1}, {0.9, 5}, {0.99, 8}}), + }, + }, + }), + } + doCompare(t, "scrape1", wantAttributes, m1, e1) + + m2 := resourceMetrics[1] + // m2 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m2)) + + metricsScrape2 := m2.ScopeMetrics().At(0).Metrics() + ts2 := getTS(metricsScrape2) + e2 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts2), + compareDoubleValue(18), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts2), + compareDoubleValue(199), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts2), + compareDoubleValue(12), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + // TODO: Prometheus Receiver Issue- start_timestamp are incorrect for Summary and Histogram metrics after a failed scrape (issue not yet posted on collector-contrib repo) + compareHistogramStartTimestamp(ts1), + compareHistogramTimestamp(ts2), + compareHistogram(2600, 5050, []uint64{1100, 500, 500, 500}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + // TODO: Prometheus Receiver Issue- start_timestamp are incorrect for Summary and Histogram metrics after a failed scrape (issue not yet posted on collector-contrib repo) + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts2), + compareSummary(1001, 5002, [][]float64{{0.01, 1}, {0.9, 6}, {0.99, 8}}), + }, + }, + }), + } + doCompare(t, "scrape2", wantAttributes, m2, e2) + + m3 := resourceMetrics[2] + // m3 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m3)) + metricsScrape3 := m3.ScopeMetrics().At(0).Metrics() + ts3 := getTS(metricsScrape3) + e3 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts3), + compareDoubleValue(16), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + // TODO: #6360 Prometheus Receiver Issue- start_timestamp should reset if the prior scrape had higher value + compareStartTimestamp(ts3), + compareTimestamp(ts3), + compareDoubleValue(99), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + // TODO: #6360 Prometheus Receiver Issue- start_timestamp should reset if the prior scrape had higher value + compareStartTimestamp(ts3), + compareTimestamp(ts3), + compareDoubleValue(3), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + // TODO: #6360 Prometheus Receiver Issue- start_timestamp should reset if the prior scrape had higher value + compareHistogramStartTimestamp(ts3), + compareHistogramTimestamp(ts3), + compareHistogram(2400, 4900, []uint64{900, 500, 500, 500}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + // TODO: #6360 Prometheus Receiver Issue- start_timestamp should reset if the prior scrape had higher value + compareSummaryStartTimestamp(ts3), + compareSummaryTimestamp(ts3), + compareSummary(900, 4900, [][]float64{{0.01, 1}, {0.9, 4}, {0.99, 6}}), + }, + }, + }), + } + doCompare(t, "scrape3", wantAttributes, m3, e3) +} + +// target2 is going to have 5 pages, and there's a newly added item on the 2nd page. +// with the 4th page, we are simulating a reset (values smaller than previous), start times should be from +// this run for the 4th and 5th scrapes. +var target2Page1 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 18 + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{method="post",code="200",le="1"} 8 +http_request_duration_seconds_bucket{method="post",code="200",le="+Inf"} 10 +http_request_duration_seconds_sum{method="post",code="200"} 7 +http_request_duration_seconds_count{method="post",code="200"} 10 +http_request_duration_seconds_bucket{method="post",code="400",le="1"} 30 +http_request_duration_seconds_bucket{method="post",code="400",le="+Inf"} 50 +http_request_duration_seconds_sum{method="post",code="400"} 25 +http_request_duration_seconds_count{method="post",code="400"} 50 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 10 +http_requests_total{method="post",code="400"} 50 + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{code="0" quantile="0.5"} 47 +rpc_duration_seconds_sum{code="0"} 100 +rpc_duration_seconds_count{code="0"} 50 +rpc_duration_seconds{code="5" quantile="0.5"} 35 +rpc_duration_seconds_sum{code="5"} 180 +rpc_duration_seconds_count{code="5"} 400 +` + +var target2Page2 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 16 + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{method="post",code="200",le="1"} 40 +http_request_duration_seconds_bucket{method="post",code="200",le="+Inf"} 50 +http_request_duration_seconds_sum{method="post",code="200"} 43 +http_request_duration_seconds_count{method="post",code="200"} 50 +http_request_duration_seconds_bucket{method="post",code="300",le="1"} 3 +http_request_duration_seconds_bucket{method="post",code="300",le="+Inf"} 3 +http_request_duration_seconds_sum{method="post",code="300"} 2 +http_request_duration_seconds_count{method="post",code="300"} 3 +http_request_duration_seconds_bucket{method="post",code="400",le="1"} 35 +http_request_duration_seconds_bucket{method="post",code="400",le="+Inf"} 60 +http_request_duration_seconds_sum{method="post",code="400"} 30 +http_request_duration_seconds_count{method="post",code="400"} 60 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 50 +http_requests_total{method="post",code="300"} 3 +http_requests_total{method="post",code="400"} 60 + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{code="0" quantile="0.5"} 57 +rpc_duration_seconds_sum{code="0"} 110 +rpc_duration_seconds_count{code="0"} 60 +rpc_duration_seconds{code="3" quantile="0.5"} 42 +rpc_duration_seconds_sum{code="3"} 50 +rpc_duration_seconds_count{code="3"} 30 +rpc_duration_seconds{code="5" quantile="0.5"} 45 +rpc_duration_seconds_sum{code="5"} 190 +rpc_duration_seconds_count{code="5"} 410 +` + +var target2Page3 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 16 + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{method="post",code="200",le="1"} 40 +http_request_duration_seconds_bucket{method="post",code="200",le="+Inf"} 50 +http_request_duration_seconds_sum{method="post",code="200"} 43 +http_request_duration_seconds_count{method="post",code="200"} 50 +http_request_duration_seconds_bucket{method="post",code="300",le="1"} 3 +http_request_duration_seconds_bucket{method="post",code="300",le="+Inf"} 5 +http_request_duration_seconds_sum{method="post",code="300"} 7 +http_request_duration_seconds_count{method="post",code="300"} 5 +http_request_duration_seconds_bucket{method="post",code="400",le="1"} 35 +http_request_duration_seconds_bucket{method="post",code="400",le="+Inf"} 60 +http_request_duration_seconds_sum{method="post",code="400"} 30 +http_request_duration_seconds_count{method="post",code="400"} 60 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 50 +http_requests_total{method="post",code="300"} 5 +http_requests_total{method="post",code="400"} 60 + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{code="0" quantile="0.5"} 67 +rpc_duration_seconds_sum{code="0"} 120 +rpc_duration_seconds_count{code="0"} 70 +rpc_duration_seconds{code="3" quantile="0.5"} 52 +rpc_duration_seconds_sum{code="3"} 60 +rpc_duration_seconds_count{code="3"} 40 +rpc_duration_seconds{code="5" quantile="0.5"} 55 +rpc_duration_seconds_sum{code="5"} 200 +rpc_duration_seconds_count{code="5"} 420 +` + +var target2Page4 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 16 + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{method="post",code="200",le="1"} 40 +http_request_duration_seconds_bucket{method="post",code="200",le="+Inf"} 49 +http_request_duration_seconds_sum{method="post",code="200"} 42 +http_request_duration_seconds_count{method="post",code="200"} 49 +http_request_duration_seconds_bucket{method="post",code="300",le="1"} 2 +http_request_duration_seconds_bucket{method="post",code="300",le="+Inf"} 3 +http_request_duration_seconds_sum{method="post",code="300"} 4 +http_request_duration_seconds_count{method="post",code="300"} 3 +http_request_duration_seconds_bucket{method="post",code="400",le="1"} 34 +http_request_duration_seconds_bucket{method="post",code="400",le="+Inf"} 59 +http_request_duration_seconds_sum{method="post",code="400"} 29 +http_request_duration_seconds_count{method="post",code="400"} 59 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 49 +http_requests_total{method="post",code="300"} 3 +http_requests_total{method="post",code="400"} 59 + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{code="0" quantile="0.5"} 66 +rpc_duration_seconds_sum{code="0"} 119 +rpc_duration_seconds_count{code="0"} 69 +rpc_duration_seconds{code="3" quantile="0.5"} 51 +rpc_duration_seconds_sum{code="3"} 59 +rpc_duration_seconds_count{code="3"} 39 +rpc_duration_seconds{code="5" quantile="0.5"} 54 +rpc_duration_seconds_sum{code="5"} 199 +rpc_duration_seconds_count{code="5"} 419 +` + +var target2Page5 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 16 + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{method="post",code="200",le="1"} 41 +http_request_duration_seconds_bucket{method="post",code="200",le="+Inf"} 50 +http_request_duration_seconds_sum{method="post",code="200"} 43 +http_request_duration_seconds_count{method="post",code="200"} 50 +http_request_duration_seconds_bucket{method="post",code="300",le="1"} 4 +http_request_duration_seconds_bucket{method="post",code="300",le="+Inf"} 5 +http_request_duration_seconds_sum{method="post",code="300"} 4 +http_request_duration_seconds_count{method="post",code="300"} 5 +http_request_duration_seconds_bucket{method="post",code="400",le="1"} 34 +http_request_duration_seconds_bucket{method="post",code="400",le="+Inf"} 59 +http_request_duration_seconds_sum{method="post",code="400"} 29 +http_request_duration_seconds_count{method="post",code="400"} 59 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 50 +http_requests_total{method="post",code="300"} 5 +http_requests_total{method="post",code="400"} 59 + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{code="0" quantile="0.5"} 76 +rpc_duration_seconds_sum{code="0"} 129 +rpc_duration_seconds_count{code="0"} 79 +rpc_duration_seconds{code="3" quantile="0.5"} 61 +rpc_duration_seconds_sum{code="3"} 69 +rpc_duration_seconds_count{code="3"} 49 +rpc_duration_seconds{code="5" quantile="0.5"} 64 +rpc_duration_seconds_sum{code="5"} 209 +rpc_duration_seconds_count{code="5"} 429 +` + +func verifyTarget2(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + // m1 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m1)) + + wantAttributes := td.attributes + + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(18), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts1), + compareHistogramTimestamp(ts1), + compareHistogramAttributes(map[string]string{"method": "post", "code": "200"}), + compareHistogram(10, 7, []uint64{8, 2}), + }, + }, + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts1), + compareHistogramTimestamp(ts1), + compareHistogramAttributes(map[string]string{"method": "post", "code": "400"}), + compareHistogram(50, 25, []uint64{30, 20}), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts1), + compareDoubleValue(10), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts1), + compareDoubleValue(50), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts1), + compareSummaryAttributes(map[string]string{"code": "0"}), + compareSummary(50, 100, [][]float64{{0.5, 47}}), + }, + }, + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts1), + compareSummaryAttributes(map[string]string{"code": "5"}), + compareSummary(400, 180, [][]float64{{0.5, 35}}), + }, + }, + }), + } + doCompare(t, "scrape1", wantAttributes, m1, e1) + + m2 := resourceMetrics[1] + // m2 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m2)) + + metricsScrape2 := m2.ScopeMetrics().At(0).Metrics() + ts2 := getTS(metricsScrape2) + e2 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts2), + compareDoubleValue(16), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts1), + compareHistogramTimestamp(ts2), + compareHistogramAttributes(map[string]string{"method": "post", "code": "200"}), + compareHistogram(50, 43, []uint64{40, 10}), + }, + }, + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts2), + compareHistogramTimestamp(ts2), + compareHistogramAttributes(map[string]string{"method": "post", "code": "300"}), + compareHistogram(3, 2, []uint64{3, 0}), + }, + }, + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts1), + compareHistogramTimestamp(ts2), + compareHistogramAttributes(map[string]string{"method": "post", "code": "400"}), + compareHistogram(60, 30, []uint64{35, 25}), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts2), + compareDoubleValue(50), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts2), + compareTimestamp(ts2), + compareDoubleValue(3), + compareAttributes(map[string]string{"method": "post", "code": "300"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts2), + compareDoubleValue(60), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts2), + compareSummaryAttributes(map[string]string{"code": "0"}), + compareSummary(60, 110, [][]float64{{0.5, 57}}), + }, + }, + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts2), + compareSummaryTimestamp(ts2), + compareSummaryAttributes(map[string]string{"code": "3"}), + compareSummary(30, 50, [][]float64{{0.5, 42}}), + }, + }, + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts2), + compareSummaryAttributes(map[string]string{"code": "5"}), + compareSummary(410, 190, [][]float64{{0.5, 45}}), + }, + }, + }), + } + doCompare(t, "scrape2", wantAttributes, m2, e2) + + m3 := resourceMetrics[2] + // m3 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m3)) + + metricsScrape3 := m3.ScopeMetrics().At(0).Metrics() + ts3 := getTS(metricsScrape3) + e3 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts3), + compareDoubleValue(16), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts1), + compareHistogramTimestamp(ts3), + compareHistogramAttributes(map[string]string{"method": "post", "code": "200"}), + compareHistogram(50, 43, []uint64{40, 10}), + }, + }, + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts2), + compareHistogramTimestamp(ts3), + compareHistogramAttributes(map[string]string{"method": "post", "code": "300"}), + compareHistogram(5, 7, []uint64{3, 2}), + }, + }, + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts1), + compareHistogramTimestamp(ts3), + compareHistogramAttributes(map[string]string{"method": "post", "code": "400"}), + compareHistogram(60, 30, []uint64{35, 25}), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts3), + compareDoubleValue(50), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts2), + compareTimestamp(ts3), + compareDoubleValue(5), + compareAttributes(map[string]string{"method": "post", "code": "300"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts1), + compareTimestamp(ts3), + compareDoubleValue(60), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts3), + compareSummaryAttributes(map[string]string{"code": "0"}), + compareSummary(70, 120, [][]float64{{0.5, 67}}), + }, + }, + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts2), + compareSummaryTimestamp(ts3), + compareSummaryAttributes(map[string]string{"code": "3"}), + compareSummary(40, 60, [][]float64{{0.5, 52}}), + }, + }, + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts3), + compareSummaryAttributes(map[string]string{"code": "5"}), + compareSummary(420, 200, [][]float64{{0.5, 55}}), + }, + }, + }), + } + doCompare(t, "scrape3", wantAttributes, m3, e3) + + m4 := resourceMetrics[3] + // m4 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m4)) + + metricsScrape4 := m4.ScopeMetrics().At(0).Metrics() + ts4 := getTS(metricsScrape4) + e4 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts4), + compareDoubleValue(16), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts4), + compareHistogramTimestamp(ts4), + compareHistogramAttributes(map[string]string{"method": "post", "code": "200"}), + compareHistogram(49, 42, []uint64{40, 9}), + }, + }, + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts4), + compareHistogramTimestamp(ts4), + compareHistogramAttributes(map[string]string{"method": "post", "code": "300"}), + compareHistogram(3, 4, []uint64{2, 1}), + }, + }, + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts4), + compareHistogramTimestamp(ts4), + compareHistogramAttributes(map[string]string{"method": "post", "code": "400"}), + compareHistogram(59, 29, []uint64{34, 25}), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts4), + compareTimestamp(ts4), + compareDoubleValue(49), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts4), + compareTimestamp(ts4), + compareDoubleValue(3), + compareAttributes(map[string]string{"method": "post", "code": "300"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts4), + compareTimestamp(ts4), + compareDoubleValue(59), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts4), + compareSummaryTimestamp(ts4), + compareSummaryAttributes(map[string]string{"code": "0"}), + compareSummary(69, 119, [][]float64{{0.5, 66}}), + }, + }, + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts4), + compareSummaryTimestamp(ts4), + compareSummaryAttributes(map[string]string{"code": "3"}), + compareSummary(39, 59, [][]float64{{0.5, 51}}), + }, + }, + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts4), + compareSummaryTimestamp(ts4), + compareSummaryAttributes(map[string]string{"code": "5"}), + compareSummary(419, 199, [][]float64{{0.5, 54}}), + }, + }, + }), + } + doCompare(t, "scrape4", wantAttributes, m4, e4) + + m5 := resourceMetrics[4] + // m5 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m5)) + + metricsScrape5 := m5.ScopeMetrics().At(0).Metrics() + ts5 := getTS(metricsScrape5) + e5 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts5), + compareDoubleValue(16), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts4), + compareHistogramTimestamp(ts5), + compareHistogramAttributes(map[string]string{"method": "post", "code": "200"}), + compareHistogram(50, 43, []uint64{41, 9}), + }, + }, + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts4), + compareHistogramTimestamp(ts5), + compareHistogramAttributes(map[string]string{"method": "post", "code": "300"}), + compareHistogram(5, 4, []uint64{4, 1}), + }, + }, + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts4), + compareHistogramTimestamp(ts5), + compareHistogramAttributes(map[string]string{"method": "post", "code": "400"}), + compareHistogram(59, 29, []uint64{34, 25}), + }, + }, + }), + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts4), + compareTimestamp(ts5), + compareDoubleValue(50), + compareAttributes(map[string]string{"method": "post", "code": "200"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts4), + compareTimestamp(ts5), + compareDoubleValue(5), + compareAttributes(map[string]string{"method": "post", "code": "300"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareStartTimestamp(ts4), + compareTimestamp(ts5), + compareDoubleValue(59), + compareAttributes(map[string]string{"method": "post", "code": "400"}), + }, + }, + }), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts4), + compareSummaryTimestamp(ts5), + compareSummaryAttributes(map[string]string{"code": "0"}), + compareSummary(79, 129, [][]float64{{0.5, 76}}), + }, + }, + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts4), + compareSummaryTimestamp(ts5), + compareSummaryAttributes(map[string]string{"code": "3"}), + compareSummary(49, 69, [][]float64{{0.5, 61}}), + }, + }, + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts4), + compareSummaryTimestamp(ts5), + compareSummaryAttributes(map[string]string{"code": "5"}), + compareSummary(429, 209, [][]float64{{0.5, 64}}), + }, + }, + }), + } + doCompare(t, "scrape5", wantAttributes, m5, e5) +} + +// target3 for complicated data types, including summaries and histograms. one of the summary and histogram have only +// sum/count, for the summary it's valid, however the histogram one is not, but it shall not cause the scrape to fail +var target3Page1 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 18 + +# A histogram, which has a pretty complex representation in the text format: +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.2"} 10000 +http_request_duration_seconds_bucket{le="0.5"} 11000 +http_request_duration_seconds_bucket{le="1"} 12001 +http_request_duration_seconds_bucket{le="+Inf"} 13003 +http_request_duration_seconds_sum 50000 +http_request_duration_seconds_count 13003 + +# A corrupted histogram with only sum and count +# HELP corrupted_hist A corrupted_hist. +# TYPE corrupted_hist histogram +corrupted_hist_sum 100 +corrupted_hist_count 10 + +# Finally a summary, which has a complex representation, too: +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{foo="bar" quantile="0.01"} 31 +rpc_duration_seconds{foo="bar" quantile="0.05"} 35 +rpc_duration_seconds{foo="bar" quantile="0.5"} 47 +rpc_duration_seconds{foo="bar" quantile="0.9"} 70 +rpc_duration_seconds{foo="bar" quantile="0.99"} 76 +rpc_duration_seconds_sum{foo="bar"} 8000 +rpc_duration_seconds_count{foo="bar"} 900 +rpc_duration_seconds_sum{foo="no_quantile"} 100 +rpc_duration_seconds_count{foo="no_quantile"} 50 +` + +var target3Page2 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 16 + +# A histogram, which has a pretty complex representation in the text format: +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.2"} 11000 +http_request_duration_seconds_bucket{le="0.5"} 12000 +http_request_duration_seconds_bucket{le="1"} 13001 +http_request_duration_seconds_bucket{le="+Inf"} 14003 +http_request_duration_seconds_sum 50100 +http_request_duration_seconds_count 14003 + +# A corrupted histogram with only sum and count +# HELP corrupted_hist A corrupted_hist. +# TYPE corrupted_hist histogram +corrupted_hist_sum 101 +corrupted_hist_count 15 + +# Finally a summary, which has a complex representation, too: +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{foo="bar" quantile="0.01"} 32 +rpc_duration_seconds{foo="bar" quantile="0.05"} 35 +rpc_duration_seconds{foo="bar" quantile="0.5"} 47 +rpc_duration_seconds{foo="bar" quantile="0.9"} 70 +rpc_duration_seconds{foo="bar" quantile="0.99"} 77 +rpc_duration_seconds_sum{foo="bar"} 8100 +rpc_duration_seconds_count{foo="bar"} 950 +rpc_duration_seconds_sum{foo="no_quantile"} 101 +rpc_duration_seconds_count{foo="no_quantile"} 55 +` + +var target4Page1 = ` +# A simple counter +# TYPE foo counter +foo 0 +# Another counter with the same name but also _total suffix +# TYPE foo_total counter +foo_total 1 +` + +func verifyTarget3(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + // m1 has 3 metrics + 5 internal scraper metrics + assert.Equal(t, 8, metricsCount(m1)) + + wantAttributes := td.attributes + + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(18), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts1), + compareHistogramTimestamp(ts1), + compareHistogram(13003, 50000, []uint64{10000, 1000, 1001, 1002}), + }, + }, + }), + assertMetricAbsent("corrupted_hist"), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts1), + compareSummaryAttributes(map[string]string{"foo": "bar"}), + compareSummary(900, 8000, [][]float64{{0.01, 31}, {0.05, 35}, {0.5, 47}, {0.9, 70}, {0.99, 76}}), + }, + }, + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts1), + compareSummaryAttributes(map[string]string{"foo": "no_quantile"}), + compareSummary(50, 100, [][]float64{}), + }, + }, + }), + } + doCompare(t, "scrape1", wantAttributes, m1, e1) + + m2 := resourceMetrics[1] + // m2 has 3 metrics + 5 internal scraper metrics + assert.Equal(t, 8, metricsCount(m2)) + + metricsScrape2 := m2.ScopeMetrics().At(0).Metrics() + ts2 := getTS(metricsScrape2) + e2 := []testExpectation{ + assertMetricPresent("go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts2), + compareDoubleValue(16), + }, + }, + }), + assertMetricPresent("http_request_duration_seconds", + compareMetricType(pmetric.MetricTypeHistogram), + []dataPointExpectation{ + { + histogramPointComparator: []histogramPointComparator{ + compareHistogramStartTimestamp(ts1), + compareHistogramTimestamp(ts2), + compareHistogram(14003, 50100, []uint64{11000, 1000, 1001, 1002}), + }, + }, + }), + assertMetricAbsent("corrupted_hist"), + assertMetricPresent("rpc_duration_seconds", + compareMetricType(pmetric.MetricTypeSummary), + []dataPointExpectation{ + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts2), + compareSummaryAttributes(map[string]string{"foo": "bar"}), + compareSummary(950, 8100, [][]float64{{0.01, 32}, {0.05, 35}, {0.5, 47}, {0.9, 70}, {0.99, 77}}), + }, + }, + { + summaryPointComparator: []summaryPointComparator{ + compareSummaryStartTimestamp(ts1), + compareSummaryTimestamp(ts2), + compareSummaryAttributes(map[string]string{"foo": "no_quantile"}), + compareSummary(55, 101, [][]float64{}), + }, + }, + }), + } + doCompare(t, "scrape2", wantAttributes, m2, e2) +} + +func verifyTarget4(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + + // m1 has 2 metrics + 5 internal scraper metrics + assert.Equal(t, 7, metricsCount(m1)) + + wantAttributes := td.attributes + + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("foo", + compareMetricIsMonotonic(true), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(0), + }, + }, + }), + assertMetricPresent("foo_total", + compareMetricIsMonotonic(true), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(1.0), + }, + }, + }), + } + doCompare(t, "scrape-infostatesetmetrics-1", wantAttributes, m1, e1) +} + +// TestCoreMetricsEndToEnd end to end test executor +func TestCoreMetricsEndToEnd(t *testing.T) { + // 1. setup input data + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: target1Page1}, + {code: 500, data: ""}, + {code: 200, data: target1Page2}, + {code: 500, data: ""}, + {code: 200, data: target1Page3}, + }, + validateFunc: verifyTarget1, + }, + { + name: "target2", + pages: []mockPrometheusResponse{ + {code: 200, data: target2Page1}, + {code: 200, data: target2Page2}, + {code: 500, data: ""}, + {code: 200, data: target2Page3}, + {code: 200, data: target2Page4}, + {code: 500, data: ""}, + {code: 200, data: target2Page5}, + }, + validateFunc: verifyTarget2, + }, + { + name: "target3", + pages: []mockPrometheusResponse{ + {code: 200, data: target3Page1}, + {code: 200, data: target3Page2}, + }, + validateFunc: verifyTarget3, + }, + { + name: "target4", + pages: []mockPrometheusResponse{ + {code: 200, data: target4Page1, useOpenMetrics: false}, + }, + validateFunc: verifyTarget4, + validateScrapes: true, + }, + } + testComponent(t, targets, false, "", featuregate.GlobalRegistry()) +} + +var startTimeMetricPage = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 19 +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 100 +http_requests_total{method="post",code="400"} 5 +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 1000 +http_request_duration_seconds_bucket{le="0.5"} 1500 +http_request_duration_seconds_bucket{le="1"} 2000 +http_request_duration_seconds_bucket{le="+Inf"} 2500 +http_request_duration_seconds_sum 5000 +http_request_duration_seconds_count 2500 +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 +rpc_duration_seconds{quantile="0.9"} 5 +rpc_duration_seconds{quantile="0.99"} 8 +rpc_duration_seconds_sum 5000 +rpc_duration_seconds_count 1000 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 400.8 +` + +var startTimeMetricPageStartTimestamp = ×tamppb.Timestamp{Seconds: 400, Nanos: 800000000} + +// 6 metrics + 5 internal metrics +const numStartTimeMetricPageTimeseries = 11 + +func verifyStartTimeMetricPage(t *testing.T, td *testData, result []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, result) + numTimeseries := 0 + for _, rm := range result { + metrics := getMetrics(rm) + for i := 0; i < len(metrics); i++ { + timestamp := startTimeMetricPageStartTimestamp + switch metrics[i].Type() { + case pmetric.MetricTypeGauge: + timestamp = nil + for j := 0; j < metrics[i].Gauge().DataPoints().Len(); j++ { + time := metrics[i].Gauge().DataPoints().At(j).StartTimestamp() + assert.Equal(t, timestamp.AsTime(), time.AsTime()) + numTimeseries++ + } + + case pmetric.MetricTypeSum: + for j := 0; j < metrics[i].Sum().DataPoints().Len(); j++ { + assert.Equal(t, timestamp.AsTime(), metrics[i].Sum().DataPoints().At(j).StartTimestamp().AsTime()) + numTimeseries++ + } + + case pmetric.MetricTypeHistogram: + for j := 0; j < metrics[i].Histogram().DataPoints().Len(); j++ { + assert.Equal(t, timestamp.AsTime(), metrics[i].Histogram().DataPoints().At(j).StartTimestamp().AsTime()) + numTimeseries++ + } + + case pmetric.MetricTypeSummary: + for j := 0; j < metrics[i].Summary().DataPoints().Len(); j++ { + assert.Equal(t, timestamp.AsTime(), metrics[i].Summary().DataPoints().At(j).StartTimestamp().AsTime()) + numTimeseries++ + } + } + } + assert.Equal(t, numStartTimeMetricPageTimeseries, numTimeseries) + } +} + +// TestStartTimeMetric validates that timeseries have start time set to 'process_start_time_seconds' +func TestStartTimeMetric(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: startTimeMetricPage}, + }, + validateFunc: verifyStartTimeMetricPage, + }, + } + testComponent(t, targets, true, "", featuregate.GlobalRegistry()) +} + +var startTimeMetricRegexPage = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 19 +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 100 +http_requests_total{method="post",code="400"} 5 +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 1000 +http_request_duration_seconds_bucket{le="0.5"} 1500 +http_request_duration_seconds_bucket{le="1"} 2000 +http_request_duration_seconds_bucket{le="+Inf"} 2500 +http_request_duration_seconds_sum 5000 +http_request_duration_seconds_count 2500 +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 +rpc_duration_seconds{quantile="0.9"} 5 +rpc_duration_seconds{quantile="0.99"} 8 +rpc_duration_seconds_sum 5000 +rpc_duration_seconds_count 1000 +# HELP example_process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE example_process_start_time_seconds gauge +example_process_start_time_seconds 400.8 +` + +// TestStartTimeMetricRegex validates that timeseries have start time regex set to 'process_start_time_seconds' +func TestStartTimeMetricRegex(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: startTimeMetricRegexPage}, + }, + validateFunc: verifyStartTimeMetricPage, + }, + { + name: "target2", + pages: []mockPrometheusResponse{ + {code: 200, data: startTimeMetricPage}, + }, + validateFunc: verifyStartTimeMetricPage, + }, + } + testComponent(t, targets, true, "^(.+_)*process_start_time_seconds$", featuregate.GlobalRegistry()) +} + +// metric type is defined as 'untyped' in the first metric +// and, type hint is missing in the 2nd metric +var untypedMetrics = ` +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total untyped +http_requests_total{method="post",code="200"} 100 +http_requests_total{method="post",code="400"} 5 + +# HELP redis_connected_clients Redis connected clients +redis_connected_clients{name="rough-snowflake-web",port="6380"} 10.0 +redis_connected_clients{name="rough-snowflake-web",port="6381"} 12.0 +` + +// TestUntypedMetrics validates the pass through of untyped metrics +// through metric receiver and the conversion of untyped to gauge double +func TestUntypedMetrics(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: untypedMetrics}, + }, + validateFunc: verifyUntypedMetrics, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry()) + +} + +func verifyUntypedMetrics(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + + // m1 has 2 metrics + 5 internal scraper metrics + assert.Equal(t, 7, metricsCount(m1)) + + wantAttributes := td.attributes + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(100), + compareAttributes(map[string]string{"method": "post", "code": "200", internal.GCPOpsAgentUntypedMetricKey: "true"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(5), + compareAttributes(map[string]string{"method": "post", "code": "400", internal.GCPOpsAgentUntypedMetricKey: "true"}), + }, + }, + }), + assertMetricPresent("redis_connected_clients", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(10), + compareAttributes(map[string]string{"name": "rough-snowflake-web", "port": "6380", internal.GCPOpsAgentUntypedMetricKey: "true"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(12), + compareAttributes(map[string]string{"name": "rough-snowflake-web", "port": "6381", internal.GCPOpsAgentUntypedMetricKey: "true"}), + }, + }, + }), + } + doCompare(t, "scrape-untypedMetric-1", wantAttributes, m1, e1) +} + +func TestGCInterval(t *testing.T) { + for _, tc := range []struct { + desc string + input *promConfig.Config + want time.Duration + }{ + { + desc: "default", + input: &promConfig.Config{}, + want: defaultGCInterval, + }, + { + desc: "global override", + input: &promConfig.Config{ + GlobalConfig: promConfig.GlobalConfig{ + ScrapeInterval: model.Duration(10 * time.Minute), + }, + }, + want: 11 * time.Minute, + }, + { + desc: "scrape config override", + input: &promConfig.Config{ + ScrapeConfigs: []*promConfig.ScrapeConfig{ + { + ScrapeInterval: model.Duration(10 * time.Minute), + }, + }, + }, + want: 11 * time.Minute, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + got := gcInterval(tc.input) + if got != tc.want { + t.Errorf("gcInterval(%+v) = %v, want %v", tc.input, got, tc.want) + } + }) + } +} diff --git a/collector/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go b/collector/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go new file mode 100644 index 0000000..1c208f2 --- /dev/null +++ b/collector/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go @@ -0,0 +1,460 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "testing" + + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver/internal" + + "github.com/prometheus/common/model" + promcfg "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/relabel" + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +var renameMetric = ` +# HELP http_go_threads Number of OS threads created +# TYPE http_go_threads gauge +http_go_threads 19 + +# HELP http_connected_total connected clients +# TYPE http_connected_total counter +http_connected_total{method="post",port="6380"} 15.0 + +# HELP redis_http_requests_total Redis connected clients +# TYPE redis_http_requests_total counter +redis_http_requests_total{method="post",port="6380"} 10.0 +redis_http_requests_total{method="post",port="6381"} 12.0 + +# HELP rpc_duration_total RPC clients +# TYPE rpc_duration_total counter +rpc_duration_total{method="post",port="6380"} 100.0 +rpc_duration_total{method="post",port="6381"} 120.0 +` + +// TestMetricRenaming validates the 'Replace' and 'Drop' actions of metric renaming config +// Renaming metric config converts any metric type to Gauge double. +// And usage of renaming metric on complex types like histogram or summary will lead to undefined results and hence not tested here +func TestMetricRenaming(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: renameMetric}, + }, + validateFunc: verifyRenameMetric, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry(), func(cfg *promcfg.Config) { + for _, scrapeConfig := range cfg.ScrapeConfigs { + scrapeConfig.MetricRelabelConfigs = []*relabel.Config{ + { + // this config should replace the matching regex metric name with 'foo' + SourceLabels: model.LabelNames{"__name__"}, + Regex: relabel.MustNewRegexp("http_.*"), + Action: relabel.Replace, + TargetLabel: "__name__", + Replacement: "foo", + }, + { + // this config should omit 'redis_' from the matching regex metric name + SourceLabels: model.LabelNames{"__name__"}, + Regex: relabel.MustNewRegexp("redis_(.*)"), + Action: relabel.Replace, + TargetLabel: "__name__", + Replacement: "$1", + }, + { + // this config should drop the metric that matches the regex metric name + SourceLabels: model.LabelNames{"__name__"}, + Regex: relabel.MustNewRegexp("rpc_(.*)"), + Action: relabel.Drop, + Replacement: relabel.DefaultRelabelConfig.Replacement, + }, + } + } + }) +} + +// TestMetricRenaming validates the 'Keep' action of metric renaming config +func TestMetricRenamingKeepAction(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: renameMetric}, + }, + validateFunc: verifyRenameMetricKeepAction, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry(), func(cfg *promcfg.Config) { + for _, scrapeConfig := range cfg.ScrapeConfigs { + scrapeConfig.MetricRelabelConfigs = []*relabel.Config{ + { + // this config should keep only the metric that matches the regex metric name, and drop the rest + SourceLabels: model.LabelNames{"__name__"}, + Regex: relabel.MustNewRegexp("rpc_(.*)"), + Action: relabel.Keep, + Replacement: relabel.DefaultRelabelConfig.Replacement, + }, + } + } + }) + +} + +func verifyRenameMetric(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + + // m1 has 2 metrics + 5 internal scraper metrics + assert.Equal(t, 7, metricsCount(m1)) + + wantAttributes := td.attributes + + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("foo", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(19), + }, + }, + { + // renaming config converts any metric type to untyped metric, which then gets converted to gauge double type by metric builder + // This bug is tracked here: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5001 + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(15), + compareAttributes(map[string]string{"method": "post", "port": "6380", internal.GCPOpsAgentUntypedMetricKey: "true"}), + }, + }, + }), + // renaming config converts any metric type to untyped metric, which then gets converted to gauge double type by metric builder + assertMetricPresent("http_requests_total", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(10), + compareAttributes(map[string]string{"method": "post", "port": "6380", internal.GCPOpsAgentUntypedMetricKey: "true"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(12), + compareAttributes(map[string]string{"method": "post", "port": "6381", internal.GCPOpsAgentUntypedMetricKey: "true"}), + }, + }, + }), + assertMetricAbsent("rpc_duration_total"), + } + doCompare(t, "scrape-metricRename-1", wantAttributes, m1, e1) +} + +func verifyRenameMetricKeepAction(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + + // m1 has 1 metrics + 5 internal scraper metrics + assert.Equal(t, 6, metricsCount(m1)) + + wantAttributes := td.attributes + + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("rpc_duration_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareStartTimestamp(ts1), + compareDoubleValue(100), + compareAttributes(map[string]string{"method": "post", "port": "6380"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(120), + compareAttributes(map[string]string{"method": "post", "port": "6381"}), + }, + }, + }), + assertMetricAbsent("http_go_threads"), + assertMetricAbsent("http_connected_total"), + assertMetricAbsent("redis_http_requests_total"), + } + doCompare(t, "scrape-metricRenameKeepAction-1", wantAttributes, m1, e1) +} + +var renamingLabel = ` +# HELP http_go_threads Number of OS threads created +# TYPE http_go_threads gauge +http_go_threads 19 + +# HELP http_connected_total connected clients +# TYPE http_connected_total counter +http_connected_total{url="localhost",status="ok"} 15.0 + +# HELP redis_http_requests_total Redis connected clients +# TYPE redis_http_requests_total counter +redis_http_requests_total{method="post",port="6380"} 10.0 +redis_http_requests_total{job="sample-app",statusCode="200"} 12.0 + +# HELP rpc_duration_total RPC clients +# TYPE rpc_duration_total counter +rpc_duration_total{monitor="codeLab",host="local"} 100.0 +rpc_duration_total{address="localhost:9090/metrics",contentType="application/json"} 120.0 +` + +func TestLabelRenaming(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: renamingLabel}, + }, + validateFunc: verifyRenameLabel, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry(), func(cfg *promcfg.Config) { + for _, scrapeConfig := range cfg.ScrapeConfigs { + scrapeConfig.MetricRelabelConfigs = []*relabel.Config{ + { + // this config should add new label {foo="bar"} to all metrics' + Regex: relabel.MustNewRegexp("(.*)"), + Action: relabel.Replace, + TargetLabel: "foo", + Replacement: "bar", + }, + { + // this config should create new label {id="target1/metrics"} + // using the value from capture group of matched regex + SourceLabels: model.LabelNames{"address"}, + Regex: relabel.MustNewRegexp(".*/(.*)"), + Action: relabel.Replace, + TargetLabel: "id", + Replacement: "$1", + }, + { + // this config creates a new label for metrics that has matched regex label. + // They key of this new label will be as given in 'replacement' + // and value will be of the matched regex label value. + Regex: relabel.MustNewRegexp("method(.*)"), + Action: relabel.LabelMap, + Replacement: "bar$1", + }, + { + // this config should drop the matched regex label + Regex: relabel.MustNewRegexp("(url.*)"), + Action: relabel.LabelDrop, + }, + } + } + }) + +} + +func verifyRenameLabel(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + + // m1 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m1)) + + wantAttributes := td.attributes + + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("http_go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(19), + compareAttributes(map[string]string{"foo": "bar"}), + }, + }, + }), + assertMetricPresent("http_connected_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(15), + compareAttributes(map[string]string{"foo": "bar", "status": "ok"}), + }, + }, + }), + assertMetricPresent("redis_http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(10), + compareAttributes(map[string]string{"method": "post", "port": "6380", "bar": "post", "foo": "bar"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(12), + // since honor_label bool in config is true by default, + // Prometheus reserved keywords like "job" and "instance" should be prefixed by "exported_" + compareAttributes(map[string]string{"exported_job": "sample-app", "statusCode": "200", "foo": "bar"}), + }, + }, + }), + assertMetricPresent("rpc_duration_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(100), + compareAttributes(map[string]string{"monitor": "codeLab", "host": "local", "foo": "bar"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(120), + compareAttributes(map[string]string{"address": "localhost:9090/metrics", + "contentType": "application/json", "id": "metrics", "foo": "bar"}), + }, + }, + }), + } + doCompare(t, "scrape-labelRename-1", wantAttributes, m1, e1) +} + +func TestLabelRenamingKeepAction(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: renamingLabel}, + }, + validateFunc: verifyRenameLabelKeepAction, + }, + } + + testComponent(t, targets, false, "", featuregate.GlobalRegistry(), func(cfg *promcfg.Config) { + for _, scrapeConfig := range cfg.ScrapeConfigs { + scrapeConfig.MetricRelabelConfigs = []*relabel.Config{ + { + // this config should keep only metric that matches the regex metric name, and drop the rest + Regex: relabel.MustNewRegexp("__name__|__scheme__|__address__|" + + "__metrics_path__|__scrape_interval__|instance|job|(m.*)"), + Action: relabel.LabelKeep, + }, + } + } + }) + +} + +func verifyRenameLabelKeepAction(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { + verifyNumValidScrapeResults(t, td, resourceMetrics) + m1 := resourceMetrics[0] + + // m1 has 4 metrics + 5 internal scraper metrics + assert.Equal(t, 9, metricsCount(m1)) + + wantAttributes := td.attributes + + metrics1 := m1.ScopeMetrics().At(0).Metrics() + ts1 := getTS(metrics1) + e1 := []testExpectation{ + assertMetricPresent("http_go_threads", + compareMetricType(pmetric.MetricTypeGauge), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(19), + assertAttributesAbsent(), + }, + }, + }), + assertMetricPresent("http_connected_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(15), + assertAttributesAbsent(), + }, + }, + }), + assertMetricPresent("redis_http_requests_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(10), + compareAttributes(map[string]string{"method": "post"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(12), + assertAttributesAbsent(), + }, + }, + }), + assertMetricPresent("rpc_duration_total", + compareMetricType(pmetric.MetricTypeSum), + []dataPointExpectation{ + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(100), + compareAttributes(map[string]string{"monitor": "codeLab"}), + }, + }, + { + numberPointComparator: []numberPointComparator{ + compareTimestamp(ts1), + compareDoubleValue(120), + assertAttributesAbsent(), + }, + }, + }), + } + doCompare(t, "scrape-LabelRenameKeepAction-1", wantAttributes, m1, e1) +} diff --git a/collector/receiver/prometheusreceiver/scrapeloop-flowchart.png b/collector/receiver/prometheusreceiver/scrapeloop-flowchart.png new file mode 100644 index 0000000..5853a9d Binary files /dev/null and b/collector/receiver/prometheusreceiver/scrapeloop-flowchart.png differ diff --git a/collector/receiver/prometheusreceiver/testdata/config.yaml b/collector/receiver/prometheusreceiver/testdata/config.yaml new file mode 100644 index 0000000..f767602 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/config.yaml @@ -0,0 +1,21 @@ +prometheus: +prometheus/customname: + buffer_period: 234 + buffer_count: 45 + use_start_time_metric: true + start_time_metric_regex: '^(.+_)*process_start_time_seconds$' + target_allocator: + endpoint: http://my-targetallocator-service + interval: 30s + collector_id: collector-1 + # imported struct from the Prometheus code base. Can be used optionally to configure the jobs as seen in the docs + # https://prometheus.io/docs/prometheus/latest/configuration/configuration/#http_sd_config + http_sd_config: + refresh_interval: 60s + basic_auth: + username: prometheus + password: changeme + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s diff --git a/collector/receiver/prometheusreceiver/testdata/config_env.yaml b/collector/receiver/prometheusreceiver/testdata/config_env.yaml new file mode 100644 index 0000000..a528511 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/config_env.yaml @@ -0,0 +1,5 @@ +prometheus: + config: + scrape_configs: + - job_name: ${env:JOBNAME} + scrape_interval: 5s diff --git a/collector/receiver/prometheusreceiver/testdata/config_k8s.yaml b/collector/receiver/prometheusreceiver/testdata/config_k8s.yaml new file mode 100644 index 0000000..cdbe077 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/config_k8s.yaml @@ -0,0 +1,28 @@ +prometheus: + config: + scrape_configs: + - job_name: apps + kubernetes_sd_configs: + - role: pod + selectors: + - role: pod + # only scrape data from pods running on the same node as collector + field: "spec.nodeName=${NODE_NAME}" + relabel_configs: + # scrape pods annotated with "prometheus.io/scrape: true" + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + regex: "true" + action: keep + # read the port from "prometheus.io/port: " annotation and update scraping address accordingly + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + # escaped $1:$2 + replacement: $$1:$$2 + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name diff --git a/collector/receiver/prometheusreceiver/testdata/config_sd.yaml b/collector/receiver/prometheusreceiver/testdata/config_sd.yaml new file mode 100644 index 0000000..f8afd00 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/config_sd.yaml @@ -0,0 +1,70 @@ +prometheus: + config: + scrape_configs: + - job_name: file + file_sd_configs: + - files: + - './testdata/dummy.json' + - job_name: k8s + kubernetes_sd_configs: + - role: node + - job_name: ec2 + ec2_sd_configs: + - region: us-west-2 + - job_name: gce + gce_sd_configs: + - project: my-project + zone: my-zone + - job_name: dns + dns_sd_configs: + - names: + - name1 + - job_name: openstack + openstack_sd_configs: + - role: hypervisor + region: region + - job_name: hetzner + hetzner_sd_configs: + - role: robot + - job_name: marathon + marathon_sd_configs: + - servers: + - server1 + - job_name: nerve + nerve_sd_configs: + - servers: + - server1 + paths: + - /path1 + - job_name: serverset + serverset_sd_configs: + - servers: + - server1 + paths: + - /path1 + - job_name: triton + triton_sd_configs: + - account: account + dns_suffix: suffix + endpoint: endpoint + - job_name: eureka + eureka_sd_configs: + - server: http://server1 + - job_name: azure + azure_sd_configs: + - subscription_id: subscription + tenant_id: tenant + client_id: client + client_secret: secret + - job_name: consul + consul_sd_configs: + - server: server1 + - job_name: digitalocean + digitalocean_sd_configs: + - basic_auth: + username: username + password: password + - job_name: dockerswarm_sd_config + dockerswarm_sd_configs: + - host: host + role: nodes diff --git a/collector/receiver/prometheusreceiver/testdata/config_target_allocator.yaml b/collector/receiver/prometheusreceiver/testdata/config_target_allocator.yaml new file mode 100644 index 0000000..699e3c4 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/config_target_allocator.yaml @@ -0,0 +1,19 @@ +prometheus: + target_allocator: + endpoint: http://localhost:8080 + interval: 30s + collector_id: collector-1 +prometheus/withScrape: + target_allocator: + endpoint: http://localhost:8080 + interval: 30s + collector_id: collector-1 + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s +prometheus/withOnlyScrape: + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s diff --git a/collector/receiver/prometheusreceiver/testdata/dummy-tls-cert-file b/collector/receiver/prometheusreceiver/testdata/dummy-tls-cert-file new file mode 100644 index 0000000..e69de29 diff --git a/collector/receiver/prometheusreceiver/testdata/dummy-tls-key-file b/collector/receiver/prometheusreceiver/testdata/dummy-tls-key-file new file mode 100644 index 0000000..e69de29 diff --git a/collector/receiver/prometheusreceiver/testdata/dummy.json b/collector/receiver/prometheusreceiver/testdata/dummy.json new file mode 100644 index 0000000..0637a08 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/dummy.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-cert-file-without-key-file.yaml b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-cert-file-without-key-file.yaml new file mode 100644 index 0000000..4e4859a --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-cert-file-without-key-file.yaml @@ -0,0 +1,7 @@ +prometheus: + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s + tls_config: + cert_file: ./testdata/dummy-tls-cert-file diff --git a/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-file-sd-config-json.yaml b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-file-sd-config-json.yaml new file mode 100644 index 0000000..d33312a --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-file-sd-config-json.yaml @@ -0,0 +1,7 @@ +prometheus: + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s + file_sd_configs: + - files: ["./testdata/sd-config-with-null-target-group.json"] diff --git a/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-file-sd-config-yaml.yaml b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-file-sd-config-yaml.yaml new file mode 100644 index 0000000..f101bdb --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-file-sd-config-yaml.yaml @@ -0,0 +1,7 @@ +prometheus: + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s + file_sd_configs: + - files: ["./testdata/sd-config-with-null-target-group.yaml"] diff --git a/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-key-file-without-cert-file.yaml b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-key-file-without-cert-file.yaml new file mode 100644 index 0000000..55831cc --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-key-file-without-cert-file.yaml @@ -0,0 +1,7 @@ +prometheus: + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s + tls_config: + key_file: ./testdata/dummy-tls-key-file diff --git a/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-kubernetes-sd-config.yaml b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-kubernetes-sd-config.yaml new file mode 100644 index 0000000..50d7978 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-kubernetes-sd-config.yaml @@ -0,0 +1,10 @@ +prometheus: + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s + kubernetes_sd_configs: + - role: pod + api_server: "" + tls_config: + cert_file: ./testdata/dummy-tls-cert-file diff --git a/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-non-existent-auth-credentials-file.yaml b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-non-existent-auth-credentials-file.yaml new file mode 100644 index 0000000..dba4d3e --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-non-existent-auth-credentials-file.yaml @@ -0,0 +1,7 @@ +prometheus: + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s + authorization: + credentials_file: /nonexistentauthcredentialsfile diff --git a/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-non-existent-cert-file.yaml b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-non-existent-cert-file.yaml new file mode 100644 index 0000000..afab2fd --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-non-existent-cert-file.yaml @@ -0,0 +1,8 @@ +prometheus: + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s + tls_config: + cert_file: /nonexistentcertfile + key_file: ./testdata/dummy-tls-key-file diff --git a/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-non-existent-key-file.yaml b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-non-existent-key-file.yaml new file mode 100644 index 0000000..c318391 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-non-existent-key-file.yaml @@ -0,0 +1,8 @@ +prometheus: + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s + tls_config: + key_file: /nonexistentkeyfile + cert_file: ./testdata/dummy-tls-cert-file diff --git a/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-relabel.yaml b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-relabel.yaml new file mode 100644 index 0000000..68559c2 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-relabel.yaml @@ -0,0 +1,8 @@ +prometheus: + config: + scrape_configs: + - job_name: rename + metric_relabel_configs: + - source_labels: [__name__] + regex: "foo_(.*)" + target_label: __name__ diff --git a/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-section.yaml b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-section.yaml new file mode 100644 index 0000000..08b36c4 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-section.yaml @@ -0,0 +1,6 @@ +prometheus: + config: + use_start_time_metric: true + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s diff --git a/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-unsupported-features.yaml b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-unsupported-features.yaml new file mode 100644 index 0000000..3111f1d --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-unsupported-features.yaml @@ -0,0 +1,29 @@ +prometheus: + buffer_period: 234 + buffer_count: 45 + use_start_time_metric: true + start_time_metric_regex: '^(.+_)*process_start_time_seconds$' + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s + remote_write: + - url: "https://example.org/write" + + remote_read: + - url: "https://example.org/read" + + rule_files: [ "a", "b" ] + + alerting: + alert_relabel_configs: + - separator: "," + target_label: "fix" + + - separator: "|" + target_label: "gotham" + + alertmanagers: + - scheme: "http" + path_prefix: "/prefix1" + timeout: 1s diff --git a/collector/receiver/prometheusreceiver/testdata/invalid-config-section.yaml b/collector/receiver/prometheusreceiver/testdata/invalid-config-section.yaml new file mode 100644 index 0000000..da67b9d --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/invalid-config-section.yaml @@ -0,0 +1,6 @@ +prometheus: + unknow_section: 1 + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_blank_line/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_blank_line/metrics new file mode 100644 index 0000000..1651763 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_blank_line/metrics @@ -0,0 +1,3 @@ +a 1 + +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_clashing_names_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_clashing_names_0/metrics new file mode 100644 index 0000000..0107183 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_clashing_names_0/metrics @@ -0,0 +1,3 @@ +# TYPE a counter +# TYPE a counter +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_clashing_names_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_clashing_names_1/metrics new file mode 100644 index 0000000..ffc07e0 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_clashing_names_1/metrics @@ -0,0 +1,3 @@ +# TYPE a info +# TYPE a counter +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_clashing_names_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_clashing_names_2/metrics new file mode 100644 index 0000000..a8503f3 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_clashing_names_2/metrics @@ -0,0 +1,3 @@ +# TYPE a_created gauge +# TYPE a counter +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_0/metrics new file mode 100644 index 0000000..18af962 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_0/metrics @@ -0,0 +1,3 @@ +# TYPE a counter +a_total NaN +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_1/metrics new file mode 100644 index 0000000..10bf43c --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_1/metrics @@ -0,0 +1,3 @@ +# TYPE a counter +a_total -1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_10/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_10/metrics new file mode 100644 index 0000000..3d29d85 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_10/metrics @@ -0,0 +1,3 @@ +# TYPE a gaugehistogram +a_bucket{le=" Inf"} NaN +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_11/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_11/metrics new file mode 100644 index 0000000..d883705 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_11/metrics @@ -0,0 +1,4 @@ +# TYPE a gaugehistogram +a_bucket{le=" Inf"} -1 +a_gcount -1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_12/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_12/metrics new file mode 100644 index 0000000..154661f --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_12/metrics @@ -0,0 +1,3 @@ +# TYPE a gaugehistogram +a_bucket{le=" Inf"} -1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_13/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_13/metrics new file mode 100644 index 0000000..c86b2dc --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_13/metrics @@ -0,0 +1,4 @@ +# TYPE a gaugehistogram +a_bucket{le=" Inf"} 1 +a_gsum -1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_14/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_14/metrics new file mode 100644 index 0000000..013a4c8 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_14/metrics @@ -0,0 +1,4 @@ +# TYPE a gaugehistogram +a_bucket{le=" Inf"} 1 +a_gsum NaN +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_15/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_15/metrics new file mode 100644 index 0000000..40dfde9 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_15/metrics @@ -0,0 +1,3 @@ +# TYPE a summary +a_sum NaN +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_16/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_16/metrics new file mode 100644 index 0000000..3dc0b5e --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_16/metrics @@ -0,0 +1,3 @@ +# TYPE a summary +a_count NaN +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_17/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_17/metrics new file mode 100644 index 0000000..e6a5cdb --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_17/metrics @@ -0,0 +1,3 @@ +# TYPE a summary +a_sum -1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_18/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_18/metrics new file mode 100644 index 0000000..595f3b1 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_18/metrics @@ -0,0 +1,3 @@ +# TYPE a summary +a_count -1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_19/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_19/metrics new file mode 100644 index 0000000..2922761 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_19/metrics @@ -0,0 +1,3 @@ +# TYPE a summary +a{quantile="0.5"} -1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_2/metrics new file mode 100644 index 0000000..9846e8a --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_2/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_sum NaN +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_3/metrics new file mode 100644 index 0000000..0c0f48d --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_3/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_count NaN +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_4/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_4/metrics new file mode 100644 index 0000000..994b0f7 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_4/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_bucket{le=" Inf"} NaN +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_5/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_5/metrics new file mode 100644 index 0000000..06e20d5 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_5/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_sum -1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_6/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_6/metrics new file mode 100644 index 0000000..683b8cb --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_6/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_count -1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_7/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_7/metrics new file mode 100644 index 0000000..fcc1eff --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_7/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_bucket{le=" Inf"} -1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_8/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_8/metrics new file mode 100644 index 0000000..f8dd970 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_8/metrics @@ -0,0 +1,5 @@ +# TYPE a histogram +a_bucket{le="-1.0"} 1 +a_bucket{le=" Inf"} 2 +a_sum -1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_9/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_9/metrics new file mode 100644 index 0000000..eae0f5a --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_counter_values_9/metrics @@ -0,0 +1,5 @@ +# TYPE a histogram +a_bucket{le="-1.0"} 1 +a_bucket{le=" Inf"} 2 +a_sum 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplar_timestamp_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplar_timestamp_0/metrics new file mode 100644 index 0000000..1550137 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplar_timestamp_0/metrics @@ -0,0 +1,3 @@ +# TYPE a counter +a_total 1 # {a="b"} 0.5 NaN +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplar_timestamp_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplar_timestamp_1/metrics new file mode 100644 index 0000000..927a0ca --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplar_timestamp_1/metrics @@ -0,0 +1,3 @@ +# TYPE a counter +a_total 1 # {a="b"} 0.5 Inf +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplar_timestamp_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplar_timestamp_2/metrics new file mode 100644 index 0000000..9090b80 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplar_timestamp_2/metrics @@ -0,0 +1,3 @@ +# TYPE a counter +a_total 1 # {a="b"} 0.5 -Inf +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_0/metrics new file mode 100644 index 0000000..5b4c21b --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_0/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_bucket{le=" Inf"} 1 # +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_1/metrics new file mode 100644 index 0000000..9b1b78c --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_1/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_bucket{le=" Inf"} 1# {} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_10/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_10/metrics new file mode 100644 index 0000000..673e450 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_10/metrics @@ -0,0 +1,3 @@ +# TYPE a counter +a_total 1 1 # id="a"} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_11/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_11/metrics new file mode 100644 index 0000000..1682ea3 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_11/metrics @@ -0,0 +1,3 @@ +# TYPE a counter +a_total 1 1 #id=" # "} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_12/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_12/metrics new file mode 100644 index 0000000..25fc3c0 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_12/metrics @@ -0,0 +1,3 @@ +# TYPE a counter +a_total 1 1 id=" # "} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_2/metrics new file mode 100644 index 0000000..241103f --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_2/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_bucket{le=" Inf"} 1 #{} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_3/metrics new file mode 100644 index 0000000..3584eca --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_3/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_bucket{le=" Inf"} 1 # {}1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_4/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_4/metrics new file mode 100644 index 0000000..74dde01 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_4/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_bucket{le=" Inf"} 1 # {} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_5/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_5/metrics new file mode 100644 index 0000000..7d06118 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_5/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_bucket{le=" Inf"} 1 # {} 1 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_6/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_6/metrics new file mode 100644 index 0000000..6858542 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_6/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_bucket{le=" Inf"} 1 # {a="23456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"} 1 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_7/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_7/metrics new file mode 100644 index 0000000..b5b0bdc --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_7/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_bucket{le=" Inf"} 1 # {} 0x1p-3 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_8/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_8/metrics new file mode 100644 index 0000000..40df8c1 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_8/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_bucket{le=" Inf"} 1 # {} 1 0x1p-3 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_9/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_9/metrics new file mode 100644 index 0000000..32283d1 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_9/metrics @@ -0,0 +1,3 @@ +# TYPE a counter +a_total 1 1 # {id="a"} +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_metric_types_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_metric_types_0/metrics new file mode 100644 index 0000000..ff3f6d6 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_metric_types_0/metrics @@ -0,0 +1,3 @@ +# TYPE a gauge +a 1 # {a="b"} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_metric_types_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_metric_types_1/metrics new file mode 100644 index 0000000..9f76dc7 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_metric_types_1/metrics @@ -0,0 +1,3 @@ +# TYPE a info +a_info 1 # {a="b"} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_metric_types_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_metric_types_2/metrics new file mode 100644 index 0000000..dc7c955 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_metric_types_2/metrics @@ -0,0 +1,3 @@ +# TYPE a stateset +a{a="b"} 1 # {c="d"} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_samples_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_samples_0/metrics new file mode 100644 index 0000000..994e3ca --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_samples_0/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_sum 1 # {a="b"} 0.5 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_samples_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_samples_1/metrics new file mode 100644 index 0000000..e55effc --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_samples_1/metrics @@ -0,0 +1,3 @@ +# TYPE a gaugehistogram +a_sum 1 # {a="b"} 0.5 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_samples_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_samples_2/metrics new file mode 100644 index 0000000..bb9cf6d --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_samples_2/metrics @@ -0,0 +1,3 @@ +# TYPE a_bucket gauge +a_bucket 1 # {a="b"} 0.5 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_samples_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_samples_3/metrics new file mode 100644 index 0000000..b997228 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_exemplars_on_unallowed_samples_3/metrics @@ -0,0 +1,3 @@ +# TYPE a counter +a_created 1 # {a="b"} 0.5 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_0/metrics new file mode 100644 index 0000000..e1649ff --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_0/metrics @@ -0,0 +1,8 @@ +# TYPE a histogram +a_sum{a="1"} 0 +a_bucket{a="2",le="+Inf"} 0 +a_count{a="2"} 0 +a_sum{a="2"} 0 +a_bucket{a="1",le="+Inf"} 0 +a_count{a="1"} 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_1/metrics new file mode 100644 index 0000000..856b142 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_1/metrics @@ -0,0 +1,5 @@ +# TYPE a histogram +a_bucket{a="1",le="1"} 0 +a_bucket{a="2",le=" Inf""} 0 +a_bucket{a="1",le=" Inf"} 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_10/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_10/metrics new file mode 100644 index 0000000..61326d2 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_10/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +a 0 0 +a 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_2/metrics new file mode 100644 index 0000000..235cf5b --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_2/metrics @@ -0,0 +1,5 @@ +# TYPE a gaugehistogram +a_gsum{a="1"} 0 +a_gsum{a="2"} 0 +a_gcount{a="1"} 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_3/metrics new file mode 100644 index 0000000..bd6a6eb --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_3/metrics @@ -0,0 +1,5 @@ +# TYPE a summary +quantile{quantile="0"} 0 +a_sum{a="1"} 0 +quantile{quantile="1"} 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_4/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_4/metrics new file mode 100644 index 0000000..39421fa --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_4/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +a 0 -1 +a 0 -2 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_5/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_5/metrics new file mode 100644 index 0000000..0902bbb --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_5/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +a 0 -1 +a 0 -1.1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_6/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_6/metrics new file mode 100644 index 0000000..7ac8000 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_6/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +a 0 1 +a 0 -1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_7/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_7/metrics new file mode 100644 index 0000000..91895d5 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_7/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +a 0 1.1 +a 0 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_8/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_8/metrics new file mode 100644 index 0000000..c2f8d15 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_8/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +a 0 1 +a 0 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_9/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_9/metrics new file mode 100644 index 0000000..154777a --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_grouping_or_ordering_9/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +a 0 +a 0 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_0/metrics new file mode 100644 index 0000000..14abe94 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_0/metrics @@ -0,0 +1,2 @@ +# HELP +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_1/metrics new file mode 100644 index 0000000..b58f10e --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_1/metrics @@ -0,0 +1,2 @@ +# HELP +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_2/metrics new file mode 100644 index 0000000..8d294d3 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_2/metrics @@ -0,0 +1,2 @@ +# HELP a +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_3/metrics new file mode 100644 index 0000000..8a55351 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_3/metrics @@ -0,0 +1,2 @@ +# HELP a +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_4/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_4/metrics new file mode 100644 index 0000000..71d0bfa --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_help_4/metrics @@ -0,0 +1,2 @@ + # HELP a meh +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_0/metrics new file mode 100644 index 0000000..d34c878 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_0/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_sum 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_1/metrics new file mode 100644 index 0000000..fcb3d55 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_1/metrics @@ -0,0 +1,4 @@ +# TYPE a histogram +a_bucket{le="+Inf"} 0 +a_sum 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_10/bad_histograms_6/metrics.txt b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_10/bad_histograms_6/metrics.txt new file mode 100644 index 0000000..0f8f1a5 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_10/bad_histograms_6/metrics.txt @@ -0,0 +1,4 @@ +# TYPE a histogram +a_count 1 +a_bucket{le=" Inf"} 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_10/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_10/metrics new file mode 100644 index 0000000..36cc0a0 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_10/metrics @@ -0,0 +1,5 @@ +# TYPE a histogram +a_bucket{le="1"} 1 +a_bucket{le="2"} 1 +a_bucket{le=" Inf"} 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_11/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_11/metrics new file mode 100644 index 0000000..c145cd3 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_11/metrics @@ -0,0 +1,4 @@ +# TYPE a histogram +a_bucket{le=" Inf"} 0 +a_count 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_12/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_12/metrics new file mode 100644 index 0000000..3906607 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_12/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_bucket{le=" INF"} 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_2/metrics new file mode 100644 index 0000000..9ead473 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_2/metrics @@ -0,0 +1,4 @@ +# TYPE a histogram +a_bucket{le="+Inf"} 0 +a_count 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_3/metrics new file mode 100644 index 0000000..d2499e1 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_3/metrics @@ -0,0 +1,6 @@ +# TYPE a histogram +a_bucket{le="-1"} 0 +a_bucket{le="+Inf"} 0 +a_sum 0 +a_count 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_4/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_4/metrics new file mode 100644 index 0000000..1158906 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_4/metrics @@ -0,0 +1,4 @@ +# TYPE a histogram +a_bucket{le=" Inf"} 0 +#a_sum 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_5/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_5/metrics new file mode 100644 index 0000000..90262e7 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_5/metrics @@ -0,0 +1,4 @@ +# TYPE a histogram +a_bucket{le=" Inf"} 0 +#a_count 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_6/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_6/metrics new file mode 100644 index 0000000..ba6ceaa --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_6/metrics @@ -0,0 +1,3 @@ +# TYPE a gaugehistogram +a_gsum 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_7/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_7/metrics new file mode 100644 index 0000000..ae20c84 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_7/metrics @@ -0,0 +1,5 @@ +# TYPE a gaugehistogram +a_bucket{le="+Inf"} 1 +a_gsum -1 +a_gcount 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_8/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_8/metrics new file mode 100644 index 0000000..939d9b8 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_8/metrics @@ -0,0 +1,4 @@ +# TYPE a gaugehistogram +a_bucket{le=" Inf"} 0 +a_gsum 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_9/bad_histograms_5/metrics.txt b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_9/bad_histograms_5/metrics.txt new file mode 100644 index 0000000..fab941c --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_9/bad_histograms_5/metrics.txt @@ -0,0 +1,4 @@ +# TYPE a gaugehistogram +a_bucket{le=" Inf"} 0 +a_gcount 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_9/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_9/metrics new file mode 100644 index 0000000..21ec60b --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_histograms_9/metrics @@ -0,0 +1,5 @@ +# TYPE a histogram +a_bucket{le="2"} 0 +a_bucket{le="1"} 0 +a_bucket{le=" Inf"} 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_info_and_stateset_values_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_info_and_stateset_values_0/metrics new file mode 100644 index 0000000..b0c6094 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_info_and_stateset_values_0/metrics @@ -0,0 +1,3 @@ +# TYPE a info +a_info{foo="bar"} 2 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_info_and_stateset_values_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_info_and_stateset_values_1/metrics new file mode 100644 index 0000000..0deecff --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_info_and_stateset_values_1/metrics @@ -0,0 +1,3 @@ +# TYPE a stateset +a{a="bar"} 2 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_0/metrics new file mode 100644 index 0000000..6ef701c --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_0/metrics @@ -0,0 +1,2 @@ +a{1="1"} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_1/metrics new file mode 100644 index 0000000..a771190 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_1/metrics @@ -0,0 +1,2 @@ +a{1="1"}1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_2/metrics new file mode 100644 index 0000000..1fe2177 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_2/metrics @@ -0,0 +1,2 @@ +a{a="1",a="1"} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_3/metrics new file mode 100644 index 0000000..37ad342 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_3/metrics @@ -0,0 +1,2 @@ +a{a="1"b} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_4/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_4/metrics new file mode 100644 index 0000000..adcf529 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_4/metrics @@ -0,0 +1,2 @@ +a{1=" # "} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_5/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_5/metrics new file mode 100644 index 0000000..f7f78b5 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_5/metrics @@ -0,0 +1,2 @@ +a{a=" # ",a=" # "} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_6/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_6/metrics new file mode 100644 index 0000000..324566b --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_6/metrics @@ -0,0 +1,2 @@ +a{a=" # "}1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_7/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_7/metrics new file mode 100644 index 0000000..7996460 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_7/metrics @@ -0,0 +1,2 @@ +a{a=" # ",b=}1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_8/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_8/metrics new file mode 100644 index 0000000..440ced3 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_invalid_labels_8/metrics @@ -0,0 +1,2 @@ +a{a=" # "b}1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metadata/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metadata/metrics new file mode 100644 index 0000000..d927b57 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metadata/metrics @@ -0,0 +1,2 @@ +# FOO a x +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metadata_in_wrong_place_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metadata_in_wrong_place_0/metrics new file mode 100644 index 0000000..e1e4db3 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metadata_in_wrong_place_0/metrics @@ -0,0 +1,4 @@ +# HELP a x +a 1 +# TYPE a gauge +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metadata_in_wrong_place_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metadata_in_wrong_place_1/metrics new file mode 100644 index 0000000..f05c08e --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metadata_in_wrong_place_1/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +a 1 +# HELP a gauge +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metadata_in_wrong_place_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metadata_in_wrong_place_2/metrics new file mode 100644 index 0000000..c875fca --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metadata_in_wrong_place_2/metrics @@ -0,0 +1,4 @@ +# TYPE a_s gauge +a_s 1 +# UNIT a_s s +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metric_names_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metric_names_0/metrics new file mode 100644 index 0000000..e179685 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metric_names_0/metrics @@ -0,0 +1,2 @@ +0a 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metric_names_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metric_names_1/metrics new file mode 100644 index 0000000..7538037 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metric_names_1/metrics @@ -0,0 +1,2 @@ +a.b 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metric_names_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metric_names_2/metrics new file mode 100644 index 0000000..0d79e9e --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_metric_names_2/metrics @@ -0,0 +1,2 @@ +a-b 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_0/metrics new file mode 100644 index 0000000..7629da5 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_0/metrics @@ -0,0 +1,2 @@ +a{a} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_1/metrics new file mode 100644 index 0000000..8ab0742 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_1/metrics @@ -0,0 +1,2 @@ +a{a"value"} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_2/metrics new file mode 100644 index 0000000..4638d59 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_2/metrics @@ -0,0 +1,2 @@ +a{a""} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_3/metrics new file mode 100644 index 0000000..1eed3b3 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_3/metrics @@ -0,0 +1,2 @@ +a{a=} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_4/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_4/metrics new file mode 100644 index 0000000..03a0883 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_equal_or_label_value_4/metrics @@ -0,0 +1,2 @@ +a{a="} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_extra_commas_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_extra_commas_0/metrics new file mode 100644 index 0000000..14824b6 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_extra_commas_0/metrics @@ -0,0 +1,2 @@ +a{a="1"b="2"} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_extra_commas_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_extra_commas_1/metrics new file mode 100644 index 0000000..87126e4 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_extra_commas_1/metrics @@ -0,0 +1,2 @@ +a{a="1",,b="2"} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_extra_commas_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_extra_commas_2/metrics new file mode 100644 index 0000000..0a42c38 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_extra_commas_2/metrics @@ -0,0 +1,2 @@ +a{a="1",b="2",} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_0/metrics new file mode 100644 index 0000000..2e3537d --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_0/metrics @@ -0,0 +1,3 @@ +# TYPE a summary +a 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_1/metrics new file mode 100644 index 0000000..296d6c6 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_1/metrics @@ -0,0 +1,3 @@ +# TYPE a summary +a{quantile="-1"} 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_2/metrics new file mode 100644 index 0000000..5ad8ecb --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_2/metrics @@ -0,0 +1,3 @@ +# TYPE a summary +a{quantile="foo"} 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_3/metrics new file mode 100644 index 0000000..032e702 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_3/metrics @@ -0,0 +1,3 @@ +# TYPE a summary +a{quantile="1.01"} 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_4/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_4/metrics new file mode 100644 index 0000000..39d7a9d --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_4/metrics @@ -0,0 +1,3 @@ +# TYPE a summary +a{quantile="NaN"} 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_5/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_5/metrics new file mode 100644 index 0000000..d7aee45 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_5/metrics @@ -0,0 +1,3 @@ +# TYPE a histogram +a_bucket 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_6/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_6/metrics new file mode 100644 index 0000000..b921150 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_6/metrics @@ -0,0 +1,3 @@ +# TYPE a gaugehistogram +a_bucket 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_7/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_7/metrics new file mode 100644 index 0000000..622b69a --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_invalid_labels_for_a_type_7/metrics @@ -0,0 +1,3 @@ +# TYPE a stateset +a 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_wrong_quotes_on_label_value_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_wrong_quotes_on_label_value_0/metrics new file mode 100644 index 0000000..93223ea --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_wrong_quotes_on_label_value_0/metrics @@ -0,0 +1,2 @@ +a{a=1} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_wrong_quotes_on_label_value_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_wrong_quotes_on_label_value_1/metrics new file mode 100644 index 0000000..54944e0 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_wrong_quotes_on_label_value_1/metrics @@ -0,0 +1,2 @@ +a{a="1} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_wrong_quotes_on_label_value_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_wrong_quotes_on_label_value_2/metrics new file mode 100644 index 0000000..3accb25 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_or_wrong_quotes_on_label_value_2/metrics @@ -0,0 +1,2 @@ +a{a='1'} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_value_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_value_0/metrics new file mode 100644 index 0000000..7894c0c --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_value_0/metrics @@ -0,0 +1,2 @@ +a +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_value_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_value_1/metrics new file mode 100644 index 0000000..05f5107 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_missing_value_1/metrics @@ -0,0 +1,2 @@ +a +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_no_eof/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_no_eof/metrics new file mode 100644 index 0000000..e69de29 diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_repeated_metadata_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_repeated_metadata_0/metrics new file mode 100644 index 0000000..280da0c --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_repeated_metadata_0/metrics @@ -0,0 +1,3 @@ +# HELP a +# HELP a +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_repeated_metadata_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_repeated_metadata_1/metrics new file mode 100644 index 0000000..ee4beab --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_repeated_metadata_1/metrics @@ -0,0 +1,3 @@ +# HELP a x +# HELP a x +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_repeated_metadata_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_repeated_metadata_2/metrics new file mode 100644 index 0000000..9774835 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_repeated_metadata_2/metrics @@ -0,0 +1,3 @@ +# TYPE a untyped +# TYPE a untyped +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_repeated_metadata_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_repeated_metadata_3/metrics new file mode 100644 index 0000000..0fcb6f7 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_repeated_metadata_3/metrics @@ -0,0 +1,3 @@ +# UNIT a_s s +# UNIT a_s s +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_stateset_info_values_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_stateset_info_values_0/metrics new file mode 100644 index 0000000..5846f09 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_stateset_info_values_0/metrics @@ -0,0 +1,3 @@ +# TYPE a stateset +a 2 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_stateset_info_values_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_stateset_info_values_1/metrics new file mode 100644 index 0000000..f25e381 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_stateset_info_values_1/metrics @@ -0,0 +1,3 @@ +# TYPE a info +a 2 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_stateset_info_values_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_stateset_info_values_2/metrics new file mode 100644 index 0000000..e91fa4b --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_stateset_info_values_2/metrics @@ -0,0 +1,3 @@ +# TYPE a stateset +a 2.0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_stateset_info_values_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_stateset_info_values_3/metrics new file mode 100644 index 0000000..605f610 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_stateset_info_values_3/metrics @@ -0,0 +1,3 @@ +# TYPE a info +a 2.0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_text_after_eof_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_text_after_eof_0/metrics new file mode 100644 index 0000000..dc5272f --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_text_after_eof_0/metrics @@ -0,0 +1,3 @@ +a 1 +# EOF +blah \ No newline at end of file diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_text_after_eof_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_text_after_eof_1/metrics new file mode 100644 index 0000000..c47ed6e --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_text_after_eof_1/metrics @@ -0,0 +1,2 @@ +a 1 +# EOFblah \ No newline at end of file diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_0/metrics new file mode 100644 index 0000000..b12bb27 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_0/metrics @@ -0,0 +1,2 @@ +a 1 z +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_1/metrics new file mode 100644 index 0000000..9cd6c83 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_1/metrics @@ -0,0 +1,2 @@ +a 1 1z +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_2/metrics new file mode 100644 index 0000000..6da2dab --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_2/metrics @@ -0,0 +1,2 @@ +a 1 1_2 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_3/metrics new file mode 100644 index 0000000..e07eb4a --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_3/metrics @@ -0,0 +1,2 @@ +a 1 1.1.1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_4/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_4/metrics new file mode 100644 index 0000000..dccd52c --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_4/metrics @@ -0,0 +1,2 @@ +a 1 NaN +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_5/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_5/metrics new file mode 100644 index 0000000..313ab4f --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_5/metrics @@ -0,0 +1,2 @@ +a 1 Inf +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_6/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_6/metrics new file mode 100644 index 0000000..bb2658f --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_6/metrics @@ -0,0 +1,2 @@ +a 1 Inf +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_7/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_7/metrics new file mode 100644 index 0000000..37df511 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_7/metrics @@ -0,0 +1,2 @@ +a 1 -Inf +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_8/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_8/metrics new file mode 100644 index 0000000..6a0684d --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_timestamp_8/metrics @@ -0,0 +1,2 @@ +a 1 0x1p-3 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_0/metrics new file mode 100644 index 0000000..b5ab1f7 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_0/metrics @@ -0,0 +1,2 @@ +# TYPE +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_1/metrics new file mode 100644 index 0000000..c2ae708 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_1/metrics @@ -0,0 +1,2 @@ +# TYPE +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_2/metrics new file mode 100644 index 0000000..c9d0100 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_2/metrics @@ -0,0 +1,2 @@ +# TYPE a +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_3/metrics new file mode 100644 index 0000000..f2d3ce1 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_3/metrics @@ -0,0 +1,2 @@ +# TYPE a +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_4/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_4/metrics new file mode 100644 index 0000000..baedc29 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_4/metrics @@ -0,0 +1,2 @@ +# TYPE a meh +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_5/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_5/metrics new file mode 100644 index 0000000..42dacfa --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_5/metrics @@ -0,0 +1,2 @@ +# TYPE a meh +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_6/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_6/metrics new file mode 100644 index 0000000..099b174 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_6/metrics @@ -0,0 +1,2 @@ +# TYPE a gauge +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_7/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_7/metrics new file mode 100644 index 0000000..f08bde7 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_type_7/metrics @@ -0,0 +1,2 @@ +# TYPE a untyped +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_0/metrics new file mode 100644 index 0000000..6f54889 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_0/metrics @@ -0,0 +1,2 @@ +# UNIT +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_1/metrics new file mode 100644 index 0000000..919cdfa --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_1/metrics @@ -0,0 +1,2 @@ +# UNIT +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_2/metrics new file mode 100644 index 0000000..30a25df --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_2/metrics @@ -0,0 +1,2 @@ +# UNIT a +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_3/metrics new file mode 100644 index 0000000..942a2ba --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_3/metrics @@ -0,0 +1,2 @@ +# UNIT a +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_4/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_4/metrics new file mode 100644 index 0000000..67ff4ee --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_4/metrics @@ -0,0 +1,2 @@ +# UNIT a seconds +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_5/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_5/metrics new file mode 100644 index 0000000..dcb4fcf --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_5/metrics @@ -0,0 +1,2 @@ +# UNIT a_seconds seconds +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_6/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_6/metrics new file mode 100644 index 0000000..7527428 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_6/metrics @@ -0,0 +1,3 @@ +# TYPE x_u info +# UNIT x_u u +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_7/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_7/metrics new file mode 100644 index 0000000..4e2d210 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_unit_7/metrics @@ -0,0 +1,3 @@ +# TYPE x_u stateset +# UNIT x_u u +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_0/metrics new file mode 100644 index 0000000..668ca3f --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_0/metrics @@ -0,0 +1,2 @@ +a a +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_1/metrics new file mode 100644 index 0000000..2c5769e --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_1/metrics @@ -0,0 +1,2 @@ +a 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_10/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_10/metrics new file mode 100644 index 0000000..eebf59d --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_10/metrics @@ -0,0 +1,2 @@ +a 0X1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_11/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_11/metrics new file mode 100644 index 0000000..0f41a29 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_11/metrics @@ -0,0 +1,2 @@ +a 0o1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_12/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_12/metrics new file mode 100644 index 0000000..46a6c92 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_12/metrics @@ -0,0 +1,2 @@ +a 0O1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_2/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_2/metrics new file mode 100644 index 0000000..7bedce8 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_2/metrics @@ -0,0 +1,2 @@ +a 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_3/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_3/metrics new file mode 100644 index 0000000..7f5bb9b --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_3/metrics @@ -0,0 +1,2 @@ +a 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_4/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_4/metrics new file mode 100644 index 0000000..493bd6b --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_4/metrics @@ -0,0 +1,2 @@ +a 1_2 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_5/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_5/metrics new file mode 100644 index 0000000..45d4b85 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_5/metrics @@ -0,0 +1,2 @@ +a 0x1p-3 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_6/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_6/metrics new file mode 100644 index 0000000..272e7f2 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_6/metrics @@ -0,0 +1,2 @@ +a 0x1P-3 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_7/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_7/metrics new file mode 100644 index 0000000..9e7ea0a --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_7/metrics @@ -0,0 +1,2 @@ +a 0b1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_8/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_8/metrics new file mode 100644 index 0000000..6912c49 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_8/metrics @@ -0,0 +1,2 @@ +a 0B1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_9/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_9/metrics new file mode 100644 index 0000000..493a9bc --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/bad_value_9/metrics @@ -0,0 +1,2 @@ +a 0x1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/counter_exemplars/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/counter_exemplars/metrics new file mode 100644 index 0000000..64f0128 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/counter_exemplars/metrics @@ -0,0 +1,4 @@ +# TYPE a counter +# HELP a help +a_total 0 123 # {a="b"} 0.5 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/counter_exemplars_empty_brackets/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/counter_exemplars_empty_brackets/metrics new file mode 100644 index 0000000..8773744 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/counter_exemplars_empty_brackets/metrics @@ -0,0 +1,4 @@ +# TYPE a counter +# HELP a help +a_total{} 0 123 # {a="b"} 0.5 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/counter_unit/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/counter_unit/metrics new file mode 100644 index 0000000..4d93d5e --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/counter_unit/metrics @@ -0,0 +1,6 @@ +# HELP cc_seconds A counter +# TYPE cc_seconds counter +# UNIT cc_seconds seconds +cc_seconds_total 1.0 +cc_seconds_created 123.456 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/duplicate_timestamps_0/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/duplicate_timestamps_0/metrics new file mode 100644 index 0000000..2537e89 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/duplicate_timestamps_0/metrics @@ -0,0 +1,8 @@ +# TYPE a gauge +# HELP a help +a{a="1",foo="bar"} 1 0.0000000000 +a{a="1",foo="bar"} 2 0.0000000001 +a{a="1",foo="bar"} 3 0.0000000010 +a{a="2",foo="bar"} 4 0.0000000000 +a{a="2",foo="bar"} 5 0.0000000001 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/duplicate_timestamps_1/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/duplicate_timestamps_1/metrics new file mode 100644 index 0000000..02611b5 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/duplicate_timestamps_1/metrics @@ -0,0 +1,8 @@ +# TYPE a gauge +# HELP a help +a{a="1",foo="bar"} 1 0 +a{a="1",foo="bar"} 2 0 +a{a="1",foo="bar"} 3 0 +a{a="2",foo="bar"} 4 0 +a{a="2",foo="bar"} 5 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/empty_brackets/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/empty_brackets/metrics new file mode 100644 index 0000000..3a14b0d --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/empty_brackets/metrics @@ -0,0 +1,4 @@ +# TYPE a counter +# HELP a help +a_total{} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/empty_help/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/empty_help/metrics new file mode 100644 index 0000000..b4f930e --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/empty_help/metrics @@ -0,0 +1,4 @@ +# TYPE a counter +# HELP a +a_total 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/empty_label/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/empty_label/metrics new file mode 100644 index 0000000..29e6696 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/empty_label/metrics @@ -0,0 +1,5 @@ +# TYPE a counter +# HELP a help +a_total{foo="bar"} 1 +a_total{foo=""} 2 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/empty_metadata/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/empty_metadata/metrics new file mode 100644 index 0000000..1c6f47d --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/empty_metadata/metrics @@ -0,0 +1,3 @@ +# HELP a +# UNIT a +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/escaping/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/escaping/metrics new file mode 100644 index 0000000..f3b6246 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/escaping/metrics @@ -0,0 +1,7 @@ +# TYPE a counter +# HELP a he\n\\l\tp +a_total{foo="b\"a\nr"} 1 +a_total{foo="b\\a\z"} 2 +a_total{foo="b\"a\nr # "} 3 +a_total{foo="b\\a\z # "} 4 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/exemplars_with_hash_in_label_values/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/exemplars_with_hash_in_label_values/metrics new file mode 100644 index 0000000..d8908b9 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/exemplars_with_hash_in_label_values/metrics @@ -0,0 +1,6 @@ +# TYPE a histogram +# HELP a help +a_bucket{le="1.0",foo="bar # "} 0 # {a="b",foo="bar # bar"} 0.5 +a_bucket{le="2.0",foo="bar # "} 2 # {a="c",foo="bar # bar"} 0.5 +a_bucket{le="+Inf",foo="bar # "} 3 # {a="d",foo="bar # bar"} 4 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/float_gauge/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/float_gauge/metrics new file mode 100644 index 0000000..63120de --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/float_gauge/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +# HELP a help +a 1.2 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/gaugehistogram_exemplars/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/gaugehistogram_exemplars/metrics new file mode 100644 index 0000000..f666a6d --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/gaugehistogram_exemplars/metrics @@ -0,0 +1,6 @@ +# TYPE a gaugehistogram +# HELP a help +a_bucket{le="1.0"} 0 123 # {a="b"} 0.5 +a_bucket{le="2.0"} 2 123 # {a="c"} 0.5 +a_bucket{le="+Inf"} 3 123 # {a="d"} 4 123 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/hash_in_label_value/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/hash_in_label_value/metrics new file mode 100644 index 0000000..e4307fc --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/hash_in_label_value/metrics @@ -0,0 +1,5 @@ +# TYPE a counter +# HELP a help +a_total{foo="foo # bar"} 1 +a_total{foo="} foo # bar # "} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/help_escaping/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/help_escaping/metrics new file mode 100644 index 0000000..4baf782 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/help_escaping/metrics @@ -0,0 +1,31 @@ +# TYPE a0 counter +# HELP a0 foo +a0_total{foo="bar"} 1 +# TYPE a1 counter +# HELP a1 \foo +a1_total{foo="bar"} 1 +# TYPE a2 counter +# HELP a2 \\foo +a2_total{foo="bar"} 1 +# TYPE a3 counter +# HELP a3 foo\\ +a3_total{foo="bar"} 1 +# TYPE a4 counter +# HELP a4 \\ +a4_total{foo="bar"} 1 +# TYPE a5 counter +# HELP a5 \n +a5_total{foo="bar"} 1 +# TYPE a6 counter +# HELP a6 \\n +a6_total{foo="bar"} 1 +# TYPE a7 counter +# HELP a7 \\\n +a7_total{foo="bar"} 1 +# TYPE a8 counter +# HELP a8 \" +a8_total{foo="bar"} 1 +# TYPE a9 counter +# HELP a9 \\" +a9_total{foo="bar"} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/histogram_exemplars/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/histogram_exemplars/metrics new file mode 100644 index 0000000..b191834 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/histogram_exemplars/metrics @@ -0,0 +1,6 @@ +# TYPE a histogram +# HELP a help +a_bucket{le="1.0"} 0 # {a="b"} 0.5 +a_bucket{le="2.0"} 2 # {a="c"} 0.5 +a_bucket{le="+Inf"} 3 # {a="2345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678"} 4 123 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/histogram_noncanonical/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/histogram_noncanonical/metrics new file mode 100644 index 0000000..041353e --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/histogram_noncanonical/metrics @@ -0,0 +1,17 @@ +# TYPE a histogram +# HELP a help +a_bucket{le="0"} 0 +a_bucket{le="0.00000000001"} 0 +a_bucket{le="0.0000000001"} 0 +a_bucket{le="1e-04"} 0 +a_bucket{le="1.1e-4"} 0 +a_bucket{le="1.1e-3"} 0 +a_bucket{le="1.1e-2"} 0 +a_bucket{le="1"} 0 +a_bucket{le="1e+05"} 0 +a_bucket{le="10000000000"} 0 +a_bucket{le="100000000000.0"} 0 +a_bucket{le="+Inf"} 3 +a_count 3 +a_sum 2 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/info_timestamps/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/info_timestamps/metrics new file mode 100644 index 0000000..bb215e6 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/info_timestamps/metrics @@ -0,0 +1,5 @@ +# TYPE a info +# HELP a help +a_info{a="1",foo="bar"} 1 1 +a_info{a="2",foo="bar"} 1 0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/label_escaping/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/label_escaping/metrics new file mode 100644 index 0000000..4480f5a --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/label_escaping/metrics @@ -0,0 +1,31 @@ +# TYPE a0 counter +# HELP a0 help +a0_total{foo="foo",bar="baz"} 1 +# TYPE a1 counter +# HELP a1 help +a1_total{foo="\foo",bar="baz"} 1 +# TYPE a2 counter +# HELP a2 help +a2_total{foo="\\foo",bar="baz"} 1 +# TYPE a3 counter +# HELP a3 help +a3_total{foo="foo\\",bar="baz"} 1 +# TYPE a4 counter +# HELP a4 help +a4_total{foo="\\",bar="baz"} 1 +# TYPE a5 counter +# HELP a5 help +a5_total{foo="\n",bar="baz"} 1 +# TYPE a6 counter +# HELP a6 help +a6_total{foo="\\n",bar="baz"} 1 +# TYPE a7 counter +# HELP a7 help +a7_total{foo="\\\n",bar="baz"} 1 +# TYPE a8 counter +# HELP a8 help +a8_total{foo="\"",bar="baz"} 1 +# TYPE a9 counter +# HELP a9 help +a9_total{foo="\\\"",bar="baz"} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/labels_and_infinite/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/labels_and_infinite/metrics new file mode 100644 index 0000000..9b24fbe --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/labels_and_infinite/metrics @@ -0,0 +1,5 @@ +# TYPE a gauge +# HELP a help +a{foo="bar"} +Inf +a{foo="baz"} -Inf +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/labels_with_curly_braces/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/labels_with_curly_braces/metrics new file mode 100644 index 0000000..c40b08b --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/labels_with_curly_braces/metrics @@ -0,0 +1,4 @@ +# TYPE a counter +# HELP a help +a_total{foo="bar",bar="b{a}z"} 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/leading_zeros_float_gauge/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/leading_zeros_float_gauge/metrics new file mode 100644 index 0000000..f304202 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/leading_zeros_float_gauge/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +# HELP a help +a 0000000000000000000000000000000000000000001.2e-1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/leading_zeros_simple_gauge/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/leading_zeros_simple_gauge/metrics new file mode 100644 index 0000000..8b7fffb --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/leading_zeros_simple_gauge/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +# HELP a help +a 0000000000000000000000000000000000000000001 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/nan/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/nan/metrics new file mode 100644 index 0000000..17f9164 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/nan/metrics @@ -0,0 +1,2 @@ +a NaN +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/nan_gauge/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/nan_gauge/metrics new file mode 100644 index 0000000..3f48ced --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/nan_gauge/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +# HELP a help +a NaN +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/negative_bucket_gaugehistogram/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/negative_bucket_gaugehistogram/metrics new file mode 100644 index 0000000..619cad0 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/negative_bucket_gaugehistogram/metrics @@ -0,0 +1,8 @@ +# TYPE a gaugehistogram +# HELP a help +a_bucket{le="-1.0"} 1 +a_bucket{le="1.0"} 2 +a_bucket{le="+Inf"} 3 +a_gcount 3 +a_gsum -5 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/negative_bucket_histogram/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/negative_bucket_histogram/metrics new file mode 100644 index 0000000..ade0f99 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/negative_bucket_histogram/metrics @@ -0,0 +1,6 @@ +# TYPE a histogram +# HELP a help +a_bucket{le="-1.0"} 0 +a_bucket{le="1.0"} 1 +a_bucket{le="+Inf"} 3 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/no_metadata/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/no_metadata/metrics new file mode 100644 index 0000000..4e66dbb --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/no_metadata/metrics @@ -0,0 +1,2 @@ +a 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/no_newline_after_eof/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/no_newline_after_eof/metrics new file mode 100644 index 0000000..564ba61 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/no_newline_after_eof/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +# HELP a help +a 1 +# EOF \ No newline at end of file diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/null_byte/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/null_byte/metrics new file mode 100644 index 0000000..c22ff3e Binary files /dev/null and b/collector/receiver/prometheusreceiver/testdata/openmetrics/null_byte/metrics differ diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/roundtrip/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/roundtrip/metrics new file mode 100644 index 0000000..6be34e7 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/roundtrip/metrics @@ -0,0 +1,59 @@ +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0.0"} 0.013300656000000001 +go_gc_duration_seconds{quantile="0.25"} 0.013638736 +go_gc_duration_seconds{quantile="0.5"} 0.013759906 +go_gc_duration_seconds{quantile="0.75"} 0.013962066 +go_gc_duration_seconds{quantile="1.0"} 0.021383540000000003 +go_gc_duration_seconds_sum 56.12904785 +go_gc_duration_seconds_count 7476.0 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 166.0 +# HELP prometheus_local_storage_indexing_batch_duration_milliseconds Quantiles for batch indexing duration in milliseconds. +# TYPE prometheus_local_storage_indexing_batch_duration_milliseconds summary +prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.5"} NaN +prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.9"} NaN +prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.99"} NaN +prometheus_local_storage_indexing_batch_duration_milliseconds_sum 871.5665949999999 +prometheus_local_storage_indexing_batch_duration_milliseconds_count 229.0 +# HELP process_cpu_seconds Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds counter +process_cpu_seconds_total 29323.4 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 2.478268416e+09 +# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, and branch from which Prometheus was built. +# TYPE prometheus_build_info gauge +prometheus_build_info{branch="HEAD",revision="ef176e5",version="0.16.0rc1"} 1.0 +# HELP prometheus_local_storage_chunk_ops The total number of chunk operations by their type. +# TYPE prometheus_local_storage_chunk_ops counter +prometheus_local_storage_chunk_ops_total{type="clone"} 28.0 +prometheus_local_storage_chunk_ops_total{type="create"} 997844.0 +prometheus_local_storage_chunk_ops_total{type="drop"} 1.345758e+06 +prometheus_local_storage_chunk_ops_total{type="load"} 1641.0 +prometheus_local_storage_chunk_ops_total{type="persist"} 981408.0 +prometheus_local_storage_chunk_ops_total{type="pin"} 32662.0 +prometheus_local_storage_chunk_ops_total{type="transcode"} 980180.0 +prometheus_local_storage_chunk_ops_total{type="unpin"} 32662.0 +# HELP foo histogram Testing histogram buckets +# TYPE foo histogram +foo_bucket{le="0.0"} 0.0 +foo_bucket{le="1e-05"} 0.0 +foo_bucket{le="0.0001"} 0.0 +foo_bucket{le="0.1"} 8.0 +foo_bucket{le="1.0"} 10.0 +foo_bucket{le="10.0"} 17.0 +foo_bucket{le="100000.0"} 17.0 +foo_bucket{le="1e+06"} 17.0 +foo_bucket{le="1.55555555555552e+06"} 17.0 +foo_bucket{le="1e+23"} 17.0 +foo_bucket{le="+Inf"} 17.0 +foo_count 17.0 +foo_sum 324789.3 +foo_created 1.520430000123e+09 +# HELP bar histogram Testing with labels +# TYPE bar histogram +bar_bucket{a="b",le="+Inf"} 0.0 +bar_bucket{a="c",le="+Inf"} 0.0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_counter/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_counter/metrics new file mode 100644 index 0000000..c36297c --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_counter/metrics @@ -0,0 +1,4 @@ +# TYPE a counter +# HELP a help +a_total 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_gauge/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_gauge/metrics new file mode 100644 index 0000000..59eb4bb --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_gauge/metrics @@ -0,0 +1,4 @@ +# TYPE a gauge +# HELP a help +a 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_gaugehistogram/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_gaugehistogram/metrics new file mode 100644 index 0000000..4b781f1 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_gaugehistogram/metrics @@ -0,0 +1,7 @@ +# TYPE a gaugehistogram +# HELP a help +a_bucket{le="1.0"} 0 +a_bucket{le="+Inf"} 3 +a_gcount 3 +a_gsum 2 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_histogram/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_histogram/metrics new file mode 100644 index 0000000..8fef110 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_histogram/metrics @@ -0,0 +1,7 @@ +# TYPE a histogram +# HELP a help +a_bucket{le="1.0"} 0 +a_bucket{le="+Inf"} 3 +a_count 3 +a_sum 2 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_stateset/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_stateset/metrics new file mode 100644 index 0000000..f5cfc1e --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_stateset/metrics @@ -0,0 +1,5 @@ +# TYPE a stateset +# HELP a help +a{a="bar"} 0 +a{a="foo"} 1.0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_summary/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_summary/metrics new file mode 100644 index 0000000..e20539e --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/simple_summary/metrics @@ -0,0 +1,5 @@ +# TYPE a summary +# HELP a help +a_count 1 +a_sum 2 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/summary_quantiles/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/summary_quantiles/metrics new file mode 100644 index 0000000..f887673 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/summary_quantiles/metrics @@ -0,0 +1,7 @@ +# TYPE a summary +# HELP a help +a_count 1 +a_sum 2 +a{quantile="0.5"} 0.7 +a{quantile="1"} 0.8 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/timestamps/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/timestamps/metrics new file mode 100644 index 0000000..a90c72d --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/timestamps/metrics @@ -0,0 +1,11 @@ +# TYPE a counter +# HELP a help +a_total{foo="1"} 1 000 +a_total{foo="2"} 1 0.0 +a_total{foo="3"} 1 1.1 +a_total{foo="4"} 1 12345678901234567890.1234567890 +a_total{foo="5"} 1 1.5e3 +# TYPE b counter +# HELP b help +b_total 2 1234567890 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/type_help_switched/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/type_help_switched/metrics new file mode 100644 index 0000000..91b6db4 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/type_help_switched/metrics @@ -0,0 +1,4 @@ +# HELP a help +# TYPE a counter +a_total 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/uint64_counter/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/uint64_counter/metrics new file mode 100644 index 0000000..f295e36 --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/uint64_counter/metrics @@ -0,0 +1,4 @@ +# TYPE a counter +# HELP a help +a_total 9223372036854775808 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/unit_gauge/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/unit_gauge/metrics new file mode 100644 index 0000000..407f36a --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/unit_gauge/metrics @@ -0,0 +1,5 @@ +# TYPE a_seconds gauge +# UNIT a_seconds seconds +# HELP a_seconds help +a_seconds 1 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/openmetrics/untyped/metrics b/collector/receiver/prometheusreceiver/testdata/openmetrics/untyped/metrics new file mode 100644 index 0000000..d2eb66b --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/openmetrics/untyped/metrics @@ -0,0 +1,5 @@ +# HELP redis_connected_clients Redis connected clients +# TYPE redis_connected_clients unknown +redis_connected_clients{instance="rough-snowflake-web",port="6380"} 10.0 +redis_connected_clients{instance="rough-snowflake-web",port="6381"} 12.0 +# EOF diff --git a/collector/receiver/prometheusreceiver/testdata/sd-config-with-null-target-group.json b/collector/receiver/prometheusreceiver/testdata/sd-config-with-null-target-group.json new file mode 100644 index 0000000..c519aca --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/sd-config-with-null-target-group.json @@ -0,0 +1,12 @@ +[ + { + "targets": [ + "localhost:9090", + "example.org:443" + ], + "labels": { + "foo": "bar" + } + }, + null +] \ No newline at end of file diff --git a/collector/receiver/prometheusreceiver/testdata/sd-config-with-null-target-group.yaml b/collector/receiver/prometheusreceiver/testdata/sd-config-with-null-target-group.yaml new file mode 100644 index 0000000..a730aab --- /dev/null +++ b/collector/receiver/prometheusreceiver/testdata/sd-config-with-null-target-group.yaml @@ -0,0 +1,5 @@ +- targets: ['localhost:9090', 'example.org:443'] + labels: + foo: bar + +- null \ No newline at end of file diff --git a/collector/service/components.go b/collector/service/components.go new file mode 100644 index 0000000..beccac0 --- /dev/null +++ b/collector/service/components.go @@ -0,0 +1,140 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package service + +import ( + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter" + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudexporter" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/loggingexporter" + "go.opentelemetry.io/collector/exporter/otlpexporter" + "go.opentelemetry.io/collector/exporter/otlphttpexporter" + "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/extension/ballastextension" + "go.opentelemetry.io/collector/extension/zpagesextension" + "go.opentelemetry.io/collector/otelcol" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/batchprocessor" + "go.opentelemetry.io/collector/processor/memorylimiterprocessor" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/otlpreceiver" + "go.uber.org/multierr" + + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/exporter/googlemanagedprometheusexporter" + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/receiver/prometheusreceiver" +) + +func components() (otelcol.Factories, error) { + errs := []error{} + factories, err := Components() + if err != nil { + return otelcol.Factories{}, err + } + + extensions := []extension.Factory{} + for _, ext := range factories.Extensions { + extensions = append(extensions, ext) + } + factories.Extensions, err = extension.MakeFactoryMap(extensions...) + if err != nil { + errs = append(errs, err) + } + + receivers := []receiver.Factory{ + prometheusreceiver.NewFactory(), + } + for _, rcv := range factories.Receivers { + receivers = append(receivers, rcv) + } + factories.Receivers, err = receiver.MakeFactoryMap(receivers...) + if err != nil { + errs = append(errs, err) + } + + exporters := []exporter.Factory{ + fileexporter.NewFactory(), + googlecloudexporter.NewFactory(), + googlemanagedprometheusexporter.NewFactory(), + } + for _, exp := range factories.Exporters { + exporters = append(exporters, exp) + } + factories.Exporters, err = exporter.MakeFactoryMap(exporters...) + if err != nil { + errs = append(errs, err) + } + + processors := []processor.Factory{ + filterprocessor.NewFactory(), + resourcedetectionprocessor.NewFactory(), + resourceprocessor.NewFactory(), + transformprocessor.NewFactory(), + groupbyattrsprocessor.NewFactory(), + } + for _, pr := range factories.Processors { + processors = append(processors, pr) + } + factories.Processors, err = processor.MakeFactoryMap(processors...) + if err != nil { + errs = append(errs, err) + } + + return factories, multierr.Combine(errs...) +} + +func Components() ( + otelcol.Factories, + error, +) { + var errs error + + extensions, err := extension.MakeFactoryMap( + zpagesextension.NewFactory(), + ballastextension.NewFactory(), + ) + errs = multierr.Append(errs, err) + + receivers, err := receiver.MakeFactoryMap( + otlpreceiver.NewFactory(), + ) + errs = multierr.Append(errs, err) + + exporters, err := exporter.MakeFactoryMap( + loggingexporter.NewFactory(), + otlpexporter.NewFactory(), + otlphttpexporter.NewFactory(), + ) + errs = multierr.Append(errs, err) + + processors, err := processor.MakeFactoryMap( + batchprocessor.NewFactory(), + memorylimiterprocessor.NewFactory(), + ) + errs = multierr.Append(errs, err) + + factories := otelcol.Factories{ + Extensions: extensions, + Receivers: receivers, + Processors: processors, + Exporters: exporters, + } + + return factories, errs +} diff --git a/collector/service/main.go b/collector/service/main.go new file mode 100644 index 0000000..8f88fbb --- /dev/null +++ b/collector/service/main.go @@ -0,0 +1,74 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package service + +import ( + "context" + "fmt" + "log" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/otelcol" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/internal/env" + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/internal/levelchanger" + "github.com/GoogleCloudPlatform/run-gmp-sidecar/collector/internal/version" +) + +func MainContext(ctx context.Context) { + if err := env.Create(); err != nil { + log.Printf("failed to build environment variables for config: %v", err) + } + + factories, err := components() + if err != nil { + log.Fatalf("failed to build default components: %v", err) + } + + info := component.BuildInfo{ + Command: "google-cloud-metrics-agent", + Description: "Google Cloud Metrics Agent", + Version: version.Version, + } + + params := otelcol.CollectorSettings{ + Factories: factories, + BuildInfo: info, + LoggingOptions: []zap.Option{ + levelchanger.NewLevelChangerOption( + zapcore.ErrorLevel, + zapcore.DebugLevel, + // We would like the Error logs from this file to be logged at Debug instead. + // https://github.com/open-telemetry/opentelemetry-collector/blob/831373ae6c6959f6c9258ac585a2ec0ab19a074f/receiver/scraperhelper/scrapercontroller.go#L198 + levelchanger.FilePathLevelChangeCondition("scrapercontroller.go")), + }, + } + + if err := run(ctx, params); err != nil { + log.Fatal(err) + } +} + +func run(ctx context.Context, params otelcol.CollectorSettings) error { + cmd := otelcol.NewCommand(params) + err := cmd.ExecuteContext(ctx) + if err != nil { + return fmt.Errorf("application run finished with error: %w", err) + } + + return nil +} diff --git a/create-service-account.sh b/create-service-account.sh index 9dd2325..722df66 100755 --- a/create-service-account.sh +++ b/create-service-account.sh @@ -16,7 +16,7 @@ set -ex PROJECT_ID=$(gcloud config get-value project) -SA_NAME="run-otel-example-sa" +SA_NAME="run-gmp-sa" REGION="us-east1" #### Create service account with required roles @@ -53,7 +53,7 @@ gcloud projects add-iam-policy-binding "${PROJECT_ID}" \ --quiet #### Create artifact registry -gcloud artifacts repositories create run-otel-example \ +gcloud artifacts repositories create run-gmp \ --location "${REGION}" \ --repository-format=docker \ --quiet diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..b5dbd5a --- /dev/null +++ b/go.mod @@ -0,0 +1,265 @@ +module github.com/GoogleCloudPlatform/run-gmp-sidecar + +go 1.20 + +require ( + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/collector v0.42.0 + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/collector/googlemanagedprometheus v0.42.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.81.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudexporter v0.81.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.81.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.81.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.81.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.81.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.81.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.81.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.81.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.81.0 + github.com/shirou/gopsutil v3.21.10+incompatible + github.com/stretchr/testify v1.8.4 + go.opentelemetry.io/collector v0.81.0 + go.opentelemetry.io/collector/component v0.81.0 + go.opentelemetry.io/collector/confmap v0.81.0 + go.opentelemetry.io/collector/consumer v0.81.0 + go.opentelemetry.io/collector/exporter v0.81.0 + go.opentelemetry.io/collector/exporter/loggingexporter v0.81.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.81.0 + go.opentelemetry.io/collector/exporter/otlphttpexporter v0.81.0 + go.opentelemetry.io/collector/extension v0.81.0 + go.opentelemetry.io/collector/extension/ballastextension v0.81.0 + go.opentelemetry.io/collector/extension/zpagesextension v0.81.0 + go.opentelemetry.io/collector/featuregate v1.0.0-rcv0013 + go.opentelemetry.io/collector/pdata v1.0.0-rcv0013 + go.opentelemetry.io/collector/processor v0.81.0 + go.opentelemetry.io/collector/processor/batchprocessor v0.81.0 + go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.81.0 + go.opentelemetry.io/collector/receiver v0.81.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.81.0 + go.opentelemetry.io/collector/semconv v0.81.0 + go.uber.org/multierr v1.11.0 + go.uber.org/zap v1.24.0 + golang.org/x/text v0.10.0 +) + +require ( + github.com/google/s2a-go v0.1.4 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/knadh/koanf/v2 v2.0.1 // indirect + github.com/observiq/ctimefmt v1.0.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.81.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.81.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.81.0 // indirect + github.com/ovh/go-ovh v1.3.0 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/tidwall/gjson v1.10.2 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + github.com/tidwall/tinylru v1.1.0 // indirect + github.com/tidwall/wal v1.1.7 // indirect + go.opentelemetry.io/collector/config/configauth v0.81.0 // indirect + go.opentelemetry.io/collector/config/configcompression v0.81.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.81.0 // indirect + go.opentelemetry.io/collector/config/confighttp v0.81.0 // indirect + go.opentelemetry.io/collector/config/confignet v0.81.0 // indirect + go.opentelemetry.io/collector/config/configopaque v0.81.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.81.0 // indirect + go.opentelemetry.io/collector/config/configtls v0.81.0 // indirect + go.opentelemetry.io/collector/config/internal v0.81.0 // indirect + go.opentelemetry.io/collector/connector v0.81.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.81.0 // indirect + go.opentelemetry.io/otel/bridge/opencensus v0.39.0 // indirect + golang.org/x/sys v0.9.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect + gotest.tools/v3 v3.4.0 // indirect +) + +require ( + cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 // indirect + cloud.google.com/go/longrunning v0.4.1 // indirect + github.com/iancoleman/strcase v0.2.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.81.0 // indirect; indir6.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/klog/v2 v2.90.1 // indirect +) + +require ( + cloud.google.com/go v0.110.2 // indirect + cloud.google.com/go/compute v1.20.0 // indirect + cloud.google.com/go/logging v1.7.0 // indirect + cloud.google.com/go/monitoring v1.13.0 // indirect + cloud.google.com/go/trace v1.9.0 // indirect + contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect + github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.28 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.22 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.15.2 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.18.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.42.0 // indirect + github.com/Microsoft/go-winio v0.6.0 // indirect + github.com/Showmax/go-fqdn v1.0.0 // indirect + github.com/StackExchange/wmi v1.2.1 // indirect + github.com/alecthomas/participle/v2 v2.0.0 // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/antonmedv/expr v1.12.5 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/aws/aws-sdk-go v1.44.295 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dennwc/varint v1.0.0 // indirect + github.com/digitalocean/godo v1.97.0 // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v24.0.2+incompatible // indirect + github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.10.1 // indirect + github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect + github.com/fatih/color v1.14.1 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-kit/log v0.2.1 + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-resty/resty/v2 v2.7.0 // indirect + github.com/go-zookeeper/zk v1.0.3 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gogo/protobuf v1.3.2 + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 + github.com/google/gnostic v0.6.9 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/googleapis/gax-go/v2 v2.11.0 // indirect + github.com/gophercloud/gophercloud v1.2.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect + github.com/hashicorp/consul/api v1.22.0 // indirect + github.com/hashicorp/cronexpr v1.1.1 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.2 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/golang-lru v0.6.0 // indirect + github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b // indirect + github.com/hashicorp/serf v0.10.1 // indirect + github.com/hetznercloud/hcloud-go v1.41.0 // indirect + github.com/imdario/mergo v0.3.15 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ionos-cloud/sdk-go/v6 v6.1.4 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/knadh/koanf v1.5.0 // indirect + github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect + github.com/linode/linodego v1.14.1 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/miekg/dns v1.1.51 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/hashstructure/v2 v2.0.2 + github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mostynb/go-grpc-compression v1.2.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.81.0 // indirect; indir6.0 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.81.0 // indirect; indir6.0 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.81.0 // indirect; indir6.0 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.81.0 // indirect; indir6.0 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.81.0 // indirect; indir6.0 + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 + github.com/prometheus/common/sigv4 v0.1.0 // indirect + github.com/prometheus/procfs v0.11.0 // indirect + github.com/prometheus/prometheus v0.43.1 + github.com/prometheus/statsd_exporter v0.22.7 // indirect + github.com/rs/cors v1.9.0 // indirect + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 // indirect + github.com/shirou/gopsutil/v3 v3.23.6 // indirect + github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/numcpus v0.6.0 // indirect + github.com/vultr/govultr/v2 v2.17.2 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.1-0.20230612162650-64be7e574a17 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.17.0 // indirect + go.opentelemetry.io/contrib/zpages v0.42.0 // indirect + go.opentelemetry.io/otel v1.16.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.39.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/sdk v1.16.0 // indirect + go.opentelemetry.io/otel/sdk/metric v0.39.0 // indirect + go.opentelemetry.io/otel/trace v1.16.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/goleak v1.2.1 // indirect + golang.org/x/crypto v0.10.0 // indirect + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect + golang.org/x/mod v0.9.0 // indirect + golang.org/x/net v0.11.0 // indirect + golang.org/x/oauth2 v0.9.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/term v0.9.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.7.0 // indirect + gonum.org/v1/gonum v0.13.0 // indirect + google.golang.org/api v0.129.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/grpc v1.56.1 // indirect + google.golang.org/protobuf v1.31.0 + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.27.3 // indirect + k8s.io/apimachinery v0.27.3 // indirect + k8s.io/client-go v0.27.3 // indirect; indirect4 k8s.io/klog/v2 v2.70.1 // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect + k8s.io/utils v0.0.0-20230308161112-d77c459e9343 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) + +// Currently causes build issues on windows. Downgrading to previous version. +replace github.com/mattn/go-ieproxy v0.0.9 => github.com/mattn/go-ieproxy v0.0.1 diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..4a2a6d0 --- /dev/null +++ b/go.sum @@ -0,0 +1,1265 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.110.2 h1:sdFPBr6xG9/wkBbfhmUz/JmZC7X6LavQgcrVINrKiVA= +cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.20.0 h1:cUOcywWuowO9It2i1KX1lIb0HH7gLv6nENKuZGnlcSo= +cloud.google.com/go/compute v1.20.0/go.mod h1:kn5BhC++qUWR/AM3Dn21myV7QbgqejW04cAOrtppaQI= +cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 h1:aRVqY1p2IJaBGStWMsQMpkAa83cPkCDLl80eOj0Rbz4= +cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68/go.mod h1:1a3eRNYX12fs5UABBIXS8HXVvQbX9hRB/RkEBPORpe8= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/monitoring v1.13.0 h1:2qsrgXGVoRXpP7otZ14eE1I568zAa92sJSDPyOJvwjM= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/trace v1.9.0 h1:olxC0QHC59zgJVALtgqfD9tGk0lfeCP5/AGXL3Px/no= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= +contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= +github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.15.2 h1:9VwVugD2NuPr6/IjrNJLpzaX7j+P6EJIup7cpNwcYhw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.15.2/go.mod h1:Xx0VKh7GJ4si3rmElbh19Mejxz68ibWg/J30ZOMrqzU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/collector v0.42.0 h1:1iUFJlwEsO3jbEitj0i9+XIjaY1QsjnUy7J3J81HuH4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/collector v0.42.0/go.mod h1:otJdxGjog6MfV/93oprfeXgyFrGrqsVvDhdM6P779Rk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/collector/googlemanagedprometheus v0.42.0 h1:Fa7XySHYw2HdumRuo2TYiYK5LkGJuwzTiqAPFoZ7t1s= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/collector/googlemanagedprometheus v0.42.0/go.mod h1:7BFtqicd8QzUKtbxBHq8/by6aMYL3mmnEEK0vLtVins= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.18.0 h1:82lUmcpHzBEpGP4qURZvMSU1rJV0AAfXtOCh7Qz6oDw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.18.0/go.mod h1:6abS6wU43wU97qP+JseSJq2+C8/XL50co74AhoMJwwU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.42.0 h1:thAXdOpdEJPWW7kZmD8wU/yhQjd7PA6L01TxFcR5OOY= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.42.0 h1:4gL61NwEDGAFvLJeEMjTYJm6r1T26k3QYuDZK9YEaAk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.42.0/go.mod h1:lz6DEePTxmjvYMtusOoS3qDAErC0STi/wmvqJucKY28= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/Mottl/ctimefmt v0.0.0-20190803144728-fd2ac23a585a/go.mod h1:eyj2WSIdoPMPs2eNTLpSmM6Nzqo4V80/d6jHpnJ1SAI= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Showmax/go-fqdn v1.0.0 h1:0rG5IbmVliNT5O19Mfuvna9LL7zlHyRfsSvBPZmF9tM= +github.com/Showmax/go-fqdn v1.0.0/go.mod h1:SfrFBzmDCtCGrnHhoDjuvFnKsWjEQX/Q9ARZvOrJAko= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= +github.com/alecthomas/participle/v2 v2.0.0 h1:Fgrq+MbuSsJwIkw3fEj9h75vDP0Er5JzepJ0/HNHv0g= +github.com/alecthomas/participle/v2 v2.0.0/go.mod h1:rAKZdJldHu8084ojcWevWAL8KmEU+AT+Olodb+WoN2Y= +github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antonmedv/expr v1.12.5 h1:Fq4okale9swwL3OeLLs9WD9H6GbgBLJyN/NUHRv+n0E= +github.com/antonmedv/expr v1.12.5/go.mod h1:FPC8iWArxls7axbVLsW+kpg1mz29A1b2M6jt+hZfDkU= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.44.295 h1:SGjU1+MqttXfRiWHD6WU0DRhaanJgAFY+xIhEaugV8Y= +github.com/aws/aws-sdk-go v1.44.295/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= +github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/digitalocean/godo v1.97.0 h1:p9w1yCcWMZcxFSLPToNGXA96WfUVLXqoHti6GzVomL4= +github.com/digitalocean/godo v1.97.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= +github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= +github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= +github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= +github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 h1:CqYfpuYIjnlNxM3msdyPRKabhXZWbKjf3Q8BWROFBso= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/gophercloud/gophercloud v1.2.0 h1:1oXyj4g54KBg/kFtCdMM6jtxSzeIyg8wv4z1HoGPp1E= +github.com/gophercloud/gophercloud v1.2.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= +github.com/hashicorp/consul/api v1.22.0 h1:ydEvDooB/A0c/xpsBd8GSt7P2/zYPBui4KrNip0xGjE= +github.com/hashicorp/consul/api v1.22.0/go.mod h1:zHpYgZ7TeYqS6zaszjwSt128OwESRpnhU9aGa6ue3Eg= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/consul/sdk v0.14.0 h1:Hly+BMNMssVzoWddbBnBFi3W+Fzytvm0haSkihhj3GU= +github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= +github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= +github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b h1:EkuSTU8c/63q4LMayj8ilgg/4I5PXDFVcnqKfs9qcwI= +github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b/go.mod h1:bKUb1ytds5KwUioHdvdq9jmrDqCThv95si0Ub7iNeBg= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs= +github.com/hetznercloud/hcloud-go v1.41.0/go.mod h1:NaHg47L6C77mngZhwBG652dTAztYrsZ2/iITJKhQkHA= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= +github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ionos-cloud/sdk-go/v6 v6.1.4 h1:BJHhFA8Q1SZC7VOXqKKr2BV2ysQ2/4hlk1e4hZte7GY= +github.com/ionos-cloud/sdk-go/v6 v6.1.4/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= +github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= +github.com/knadh/koanf/v2 v2.0.1 h1:1dYGITt1I23x8cfx8ZnldtezdyaZtfAuRtIFOiRzK7g= +github.com/knadh/koanf/v2 v2.0.1/go.mod h1:ZeiIlIDXTE7w1lMT6UVcNiRAS2/rCeLn/GdLNvY1Dus= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linode/linodego v1.14.1 h1:uGxQyy0BidoEpLGdvfi4cPgEW+0YUFsEGrLEhcTfjNc= +github.com/linode/linodego v1.14.1/go.mod h1:NJlzvlNtdMRRkXb0oN6UWzUkj6t+IBsyveHgZ5Ppjyk= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.51 h1:0+Xg7vObnhrz/4ZCZcZh7zPXlmU0aveS2HDBd0m0qSo= +github.com/miekg/dns v1.1.51/go.mod h1:2Z9d3CP1LQWihRZUf29mQ19yDThaI4DAYzte2CaQW5c= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= +github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= +github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/mostynb/go-grpc-compression v1.2.0 h1:KJzRFSYPXlcoYjG5/xLZB8tpuOyWF2UnlW4tAuaWnfI= +github.com/mostynb/go-grpc-compression v1.2.0/go.mod h1:oidYvYyefMmhcuvU8fLJ8FfZyTyVzJ6SkmD5fIKgRe8= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= +github.com/observiq/ctimefmt v1.0.0 h1:r7vTJ+Slkrt9fZ67mkf+mA6zAdR5nGIJRMTzkUyvilk= +github.com/observiq/ctimefmt v1.0.0/go.mod h1:mxi62//WbSpG/roCO1c6MqZ7zQTvjVtYheqHN3eOjvc= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= +github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.81.0 h1:Uej91kyfvfXnclwY0fDfWutM7MhFYpSHhr2/WsDEEpc= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.81.0/go.mod h1:GmTJKqMN9B2h7JonMwjeKM4U1dAlefuBHDo4DPrK19k= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudexporter v0.81.0 h1:aowv9rF4p6zdHA/7mtqFOcbvDTWDJ7Kem7dl/wfq1vE= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudexporter v0.81.0/go.mod h1:n2bqq+zTwP5Ws1vFen/E9vbKA1odemH9wxwAGNMLHzM= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.81.0 h1:ts0afKsnRpVQrX9w9dphbmrWI4hTbOt0P3p6Un9oDIk= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.81.0/go.mod h1:70CoC6TEwS0atyXyxASP8V5KWEnuCM+zs6ZxMObT2nI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.81.0 h1:WN6hNUDTQcmODuHeK9XUh5LavKr0JFrpR6+gIMtbHlw= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.81.0/go.mod h1:4KJ5vtmaPwZECM2MEseFkz0y02f2aipnYWRtrHpAwJ8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.81.0 h1:EIbmD7EzonXaKDyq2MrCpfpar1VEKjVFJ50rzfTsWpc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.81.0/go.mod h1:pJHh21NYSJNpnkzaDSy0xCZ/Jg4ETd24l54/XHVkr3s= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.81.0 h1:sPjCHuqjn5UYDJOai4FulMCfLP+7AbspjHfv0jAtmD0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.81.0/go.mod h1:moQ6krtZ8dyziij2P+9eao5+gBfCJjiNDwN7n2MZZs4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.81.0 h1:WddoIEOfszVY9fN1/MocdB3E/4VhpR0XCvzM4rI/Zo4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.81.0/go.mod h1:gkymmEWoAYS3IAJizCVWHsnLlO2srV6jTlauy3ew8Vg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.81.0 h1:60rGHxmwTtBYfwhhi+Renb3BOYwmrH08ZrvkWImXqVk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.81.0/go.mod h1:1zC+Q7SfDi5RrVgR/FXnug0kw8sz3k0OBJrn5u7wYzc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.81.0 h1:SMUb1r6uBrXO7R7ylZMHnlPN2VrdS4GzXgYWuJghPJg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.81.0/go.mod h1:Gfcl9pg+6klCvdemOCmuYv+3EHZJt8gqyJ5S7rkr1Ks= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.81.0 h1:Rb8e1O31dgjTEn6823RsPs2RaOwl7fVuFWz2qK9DRpY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.81.0/go.mod h1:tnyFHqiWxeNUqAAaGuKDD7XDL0KwBMSqvRB9PsKCzng= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.81.0 h1:mPkMu2Dx9QrGmZxnfwcSSvAyUZzBtaeYIdvmFSrC0KA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.81.0 h1:qUNZEYelezsSH6KrbE4u1TrzXCggSFPZqFI1m29gJFQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.81.0/go.mod h1:OUF0FMVFQQnlqc+QUMyQScszuBnqO9pO7xL7/kK1PM0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.81.0 h1:Ic7Jg3q2TU1t7cPRcG6wNncQS4r85TvYU8N2iPVv0j0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.81.0/go.mod h1:LVTxkVTuCAvpmbJu6TADF57OHAHV2nsQU9pGaJOx1Pg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.81.0 h1:syUuhc/z2yIsTmL9jt2gmXkYUM/jgsQZoH6NAE6i54Q= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.81.0/go.mod h1:LRzHoUpI8xZN5KXulEglZnbkYBHyiqHgQXGudIqpTtk= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.81.0 h1:0Wzd9nh8WFfcY769RKfJU1qnEVSwy7g6+nfyyuFNCeE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.81.0/go.mod h1:4R9zh1OOvN7JGaN6qLK3uvH0ZO6vZdUeFx4TK+yK258= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.81.0 h1:AoZYnk7AOzxDuLcZek4sRQgv6fsMp9ufxplVCcPKY/U= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.81.0/go.mod h1:SCXzjDN/HNBffKyoJmAUkoAcGScmRkSFkiVY2PVlOUY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.81.0 h1:MBF10bAMzWddNiivgiWwNl0NqFtwIwWhQTpsNhhgIwo= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.81.0/go.mod h1:wCZn4P2NaOQn9xzVLRRZBCED2zcxilJvrXJFeFiAMLk= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.81.0 h1:fuMc2Hq7o66IjP6gNjRB4kLLwPPSqFG8KhE38VaGFYA= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.81.0/go.mod h1:Pxs1UpCisdoCzlSQoS8tLDqxaU3DPsQu9bTXWdBmwQ0= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.81.0 h1:1ct0JB0jCi4cyBvl4Ektq3G47dg7pAbiXsHlOqSK/aI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.81.0/go.mod h1:tXgkAkD5/uIOvsooOrNmd5FayKcvfasVoTV89SSihME= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.81.0 h1:sVmU1X/9txOvdQX0VbnqwetrKThnsPXlJO52dmaFng8= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.81.0/go.mod h1:x71DNfrdvoQSN3AiovAsoSi4aG74Rary8oJATXzzkMw= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= +github.com/ovh/go-ovh v1.3.0/go.mod h1:AxitLZ5HBRPyUd+Zl60Ajaag+rNTdVXWIkzfrVuTXWA= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= +github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= +github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/prometheus v0.43.1 h1:Z/Z0S0CoPUVtUnHGokFksWMssSw2Y1Ir9NnWS1pPWU0= +github.com/prometheus/prometheus v0.43.1/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2GcqDlJKbF2sA6FM= +github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= +github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= +github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rs/cors v1.9.0 h1:l9HGsTsHJcvW14Nk7J9KFz8bzeAWXn3CG6bgt7LsrAE= +github.com/rs/cors v1.9.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shirou/gopsutil v3.21.10+incompatible h1:AL2kpVykjkqeN+MFe1WcwSBVUjGjvdU8/ubvCuXAjrU= +github.com/shirou/gopsutil v3.21.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil/v3 v3.23.6 h1:5y46WPI9QBKBbK7EEccUPNXpJpNrvPuTD0O2zHEHT08= +github.com/shirou/gopsutil/v3 v3.23.6/go.mod h1:j7QX50DrXYggrpN30W0Mo+I4/8U2UUIQrnrhqUeWrAU= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= +github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= +github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I= +github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= +github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4= +github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= +github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= +github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector v0.81.0 h1:pF+sB8xNXlg/W0a0QTLz4mUWyool1a9toVj8LmLoFqg= +go.opentelemetry.io/collector v0.81.0/go.mod h1:thuOTBMusXwcTPTwLbs3zwwCOLaaQX2g+Hjf8OObc/w= +go.opentelemetry.io/collector/component v0.81.0 h1:AKsl6bss/SRrW248GFpmGiiI/4kdemW92Ai/X82CCqY= +go.opentelemetry.io/collector/component v0.81.0/go.mod h1:+m6/yPiJ7O7Oc/OLfmgUB2mrY1xoUqRj4BsoOtIVpGs= +go.opentelemetry.io/collector/config/configauth v0.81.0 h1:NIiJuIGOdblN0EIJv64R2mvGhthcYfWuvyCnjk8HRN4= +go.opentelemetry.io/collector/config/configauth v0.81.0/go.mod h1:2KscbmU+8fIzwiSU9Kku0Tf4b4A1plqFIJXR1DWSaTw= +go.opentelemetry.io/collector/config/configcompression v0.81.0 h1:Q725pvVH7tR6BP3WK7Ro3pbqMeQdZEV3KeFVHchBxCc= +go.opentelemetry.io/collector/config/configcompression v0.81.0/go.mod h1:xhHm1sEH7BTECAJo1xn64NMxeIvZGKdVGdSKUUc+YuM= +go.opentelemetry.io/collector/config/configgrpc v0.81.0 h1:Q2xEE2SGbg79j3TdHT+781eUu/2uUIyrHVJAG9bLpVk= +go.opentelemetry.io/collector/config/configgrpc v0.81.0/go.mod h1:Frq/l2Ttbvm7cFH3hkxLdhl5TCNHcH6rnkpmi8U2kLY= +go.opentelemetry.io/collector/config/confighttp v0.81.0 h1:vIdiepUT7P/WtJRdfh8mjzvSqJRVF8/vl9GWtUNQlHQ= +go.opentelemetry.io/collector/config/confighttp v0.81.0/go.mod h1:I54THsffkpv//O7bUHw+0bXxjYdvyL6IHg5ksgYez8I= +go.opentelemetry.io/collector/config/confignet v0.81.0 h1:Eu8m3eX8GaGhOUc//YXvV4i3cEivxUSxkLnV1U9ydhg= +go.opentelemetry.io/collector/config/confignet v0.81.0/go.mod h1:unOg7BZvpt6T5xsf+LyeOQvUhD8ld/2AbfOsmUZ/bPM= +go.opentelemetry.io/collector/config/configopaque v0.81.0 h1:MkCAGh0WydRWydETB9FLnuCj9hDPDiz2g4Wxnl53I0w= +go.opentelemetry.io/collector/config/configopaque v0.81.0/go.mod h1:pM1oy6gasukw3H6jAvc9Q9OtFaaY2IbfeuwCPAjOgXc= +go.opentelemetry.io/collector/config/configtelemetry v0.81.0 h1:j3dhWbAcrfL1n0RmShRJf99X/xIMoPfEShN/5Z8bY0k= +go.opentelemetry.io/collector/config/configtelemetry v0.81.0/go.mod h1:KEYQRiYJdx38iZkvcLKBZWH9fK4NeafxBwGRrRKMgyA= +go.opentelemetry.io/collector/config/configtls v0.81.0 h1:2vt+yOZUvGq5ADqFAxL5ONm1ACuGXDSs87AWT54Ez4M= +go.opentelemetry.io/collector/config/configtls v0.81.0/go.mod h1:HMHTYBMMgqBpTvnNAhQYmjO7XuoBMe2T4qRHcKluB4Q= +go.opentelemetry.io/collector/config/internal v0.81.0 h1:wRV2PBnJygdmKpIdt/xfG7zdQvXvHz9L+z8MhGsOji4= +go.opentelemetry.io/collector/config/internal v0.81.0/go.mod h1:RKcLV1gQxhgwx+6rlPYsvGMq1RZNne3UeOUZkHxJnIg= +go.opentelemetry.io/collector/confmap v0.81.0 h1:AqweoBGdF3jGM2/KgP5GS6bmN+1aVrEiCy4nPf7IBE4= +go.opentelemetry.io/collector/confmap v0.81.0/go.mod h1:iCTnTqGgZZJumhJxpY7rrJz9UQ/0zjPmsJz2Z7Tp4RY= +go.opentelemetry.io/collector/connector v0.81.0 h1:5jYYjQwxxgJKFtVvvbFLd0+2QHsvS0z+lVDxzmRv8uk= +go.opentelemetry.io/collector/connector v0.81.0/go.mod h1:rQsgBsEfxcBj0Wdp6a9z8E9NBxybolOfKheXBcosC2c= +go.opentelemetry.io/collector/consumer v0.81.0 h1:8R2iCrSzD7T0RtC2Wh4GXxDiqla2vNhDokGW6Bcrfas= +go.opentelemetry.io/collector/consumer v0.81.0/go.mod h1:jS7+gAKdOx3lD3SnaBztBjUVpUYL3ee7fpoqI4p/gT8= +go.opentelemetry.io/collector/exporter v0.81.0 h1:GLhB8WGrBx+zZSB1HIOx2ivFUMahGtAVO2CC5xbCUHQ= +go.opentelemetry.io/collector/exporter v0.81.0/go.mod h1:Di4RTzI8uRooVNATIeApNUgmGdNt8XiikUTQLabmZaA= +go.opentelemetry.io/collector/exporter/loggingexporter v0.81.0 h1:6cHx9fK58m3h/5IrkfgYarHQunBQkGQaUw150oHL1G0= +go.opentelemetry.io/collector/exporter/loggingexporter v0.81.0/go.mod h1:uVVPQ8OkOrXkchTOS9cA4Yu8aB1DJnyC2+Y+IZY8Sys= +go.opentelemetry.io/collector/exporter/otlpexporter v0.81.0 h1:Ri5pj0slm+FUbbG81UIhQaQ992z2+PcT2++4JI32XGI= +go.opentelemetry.io/collector/exporter/otlpexporter v0.81.0/go.mod h1:u19TJEy/n35jjU/ie2YOlAL4K1s9rvRKSNaq9JDlBF8= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.81.0 h1:KSE7wjy1J0I0izLTodTW4axRmJplpQgCRqYFbAzufZo= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.81.0/go.mod h1:x/G0eEHSDvHPSoOzaqY8v6uSfwnTuVmKudAEYqAXJJ4= +go.opentelemetry.io/collector/extension v0.81.0 h1:Ak7AzZzxTFJxGyVbEklsGzqHyOHW5USiifJilCcRyTU= +go.opentelemetry.io/collector/extension v0.81.0/go.mod h1:DU2bX8qulS5+OCJZGfvqIwIT/q3sFnEjI2HjJ2LDI/s= +go.opentelemetry.io/collector/extension/auth v0.81.0 h1:UzVQSG9naJh1hX7hh+HVcvB3n+rpCJXX2BBdUoL/Ybo= +go.opentelemetry.io/collector/extension/auth v0.81.0/go.mod h1:PaBFcFrzXV+UgM4VZKp6Kn1IiRC/MbEYWxTfIalcIwk= +go.opentelemetry.io/collector/extension/ballastextension v0.81.0 h1:4zA1pd8aSkvIk03HKCDz2z9fCkiDFQUZeJ6b5V7HF8o= +go.opentelemetry.io/collector/extension/ballastextension v0.81.0/go.mod h1:hdvJ7ecQObgYg7SurOttViBwta4bBb5a1hYz1+HddFk= +go.opentelemetry.io/collector/extension/zpagesextension v0.81.0 h1:ov3h5re95uJcF6N+vR/rLpjsEkGs6easxXSphH9UrPg= +go.opentelemetry.io/collector/extension/zpagesextension v0.81.0/go.mod h1:oN9HkYCae/b2ftIJVzY/ATDEqcxS61DuTaC6aCxeJMo= +go.opentelemetry.io/collector/featuregate v1.0.0-rcv0013 h1:tiTUG9X/gEDN1oDYQOBVUFYQfhUG2CvgW9VhBc2uk1U= +go.opentelemetry.io/collector/featuregate v1.0.0-rcv0013/go.mod h1:0mE3mDLmUrOXVoNsuvj+7dV14h/9HFl/Fy9YTLoLObo= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0013 h1:4sONXE9hAX+4Di8m0bQ/KaoH3Mi+OPt04cXkZ7A8W3k= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0013/go.mod h1:x09G/4KjEcDKNuWCjC5ZtnuDE0XEqiRwI+yrHSVjIy8= +go.opentelemetry.io/collector/processor v0.81.0 h1:ypyNV5R0bnN3XGMAsH/q5eNARF5vXtFgSOK9rBWzsLc= +go.opentelemetry.io/collector/processor v0.81.0/go.mod h1:ZDwO3DVg1VUSA92g0r/o0jYk+T7r9uxgZZ3LABJbC34= +go.opentelemetry.io/collector/processor/batchprocessor v0.81.0 h1:qmFuxKR12Sq209MkXSelvuZDG4otMwxZW0yetHKIsj0= +go.opentelemetry.io/collector/processor/batchprocessor v0.81.0/go.mod h1:Rb5jv7bbMxw72RCvZclh4QD64aoC91qQdFwyaa5HHV4= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.81.0 h1:TsfznxCfgnM/SRNXaTRf0b8ealUmnF4fY2rHce3uj40= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.81.0/go.mod h1:DYsauLyWsW8WqgSUz4xt5H5uDh2Q7IL9d9VRoTvg1G4= +go.opentelemetry.io/collector/receiver v0.81.0 h1:0c+YtIV7fmd9ev+zmwS9qjx5ASi8cw+gSypu4I7Gugc= +go.opentelemetry.io/collector/receiver v0.81.0/go.mod h1:q80JkMxVLnk0vWxoTRY2J7F4Qx9069Yy5yxDbZ4JVwk= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.81.0 h1:ewVbfATnAeQkwFK3r0dpFKCXcTb8HJKX4AixUioRt+c= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.81.0/go.mod h1:LGuSMVdOq5Zq+CEHF9YBHMaOIUZrzqW7DQGqo9g0dJA= +go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw= +go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.1-0.20230612162650-64be7e574a17 h1:mdcNStUIXngF/mH3xxAo4nbR4g65IXqLL1SvYMjz7JQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.1-0.20230612162650-64be7e574a17/go.mod h1:N2Nw/UmmvQn0yCnaUzvsWzTWIeffYIdFteg6mxqCWII= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= +go.opentelemetry.io/contrib/propagators/b3 v1.17.0 h1:ImOVvHnku8jijXqkwCSyYKRDt2YrnGXD4BbhcpfbfJo= +go.opentelemetry.io/contrib/propagators/b3 v1.17.0/go.mod h1:IkfUfMpKWmynvvE0264trz0sf32NRTZL4nuAN9AbWRc= +go.opentelemetry.io/contrib/zpages v0.42.0 h1:hFscXKQ9PTjyIVmAr6zIV8cMoiEeR9lPIwPVqHi8+5Q= +go.opentelemetry.io/contrib/zpages v0.42.0/go.mod h1:qRJBEfB0iwRKrYImq5qfwTolmY8HXvZBRucvhuTVQZw= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/bridge/opencensus v0.39.0 h1:YHivttTaDhbZIHuPlg1sWsy2P5gj57vzqPfkHItgbwQ= +go.opentelemetry.io/otel/bridge/opencensus v0.39.0/go.mod h1:vZ4537pNjFDXEx//WldAR6Ro2LC8wwmFC76njAXwNPE= +go.opentelemetry.io/otel/exporters/prometheus v0.39.0 h1:whAaiHxOatgtKd+w0dOi//1KUxj3KoPINZdtDaDj3IA= +go.opentelemetry.io/otel/exporters/prometheus v0.39.0/go.mod h1:4jo5Q4CROlCpSPsXLhymi+LYrDXd2ObU5wbKayfZs7Y= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= +go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= +go.opentelemetry.io/otel/sdk/metric v0.39.0 h1:Kun8i1eYf48kHH83RucG93ffz0zGV1sh46FAScOTuDI= +go.opentelemetry.io/otel/sdk/metric v0.39.0/go.mod h1:piDIRgjcK7u0HCL5pCA4e74qpK/jk3NiUoAHATVAmiI= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs= +golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= +gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.129.0 h1:2XbdjjNfFPXQyufzQVwPf1RRnHH8Den2pfNE2jw7L8w= +google.golang.org/api v0.129.0/go.mod h1:dFjiXlanKwWE3612X97llhsoI36FAoIiRj3aTl5b/zE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y= +k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg= +k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= +k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= +k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8= +k8s.io/client-go v0.27.3/go.mod h1:2MBEKuTo6V1lbKy3z1euEGnhPfGZLKTS9tiJ2xodM48= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= +k8s.io/utils v0.0.0-20230308161112-d77c459e9343/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/run-service.yaml b/run-service.yaml index 9991ee6..bbcd88d 100644 --- a/run-service.yaml +++ b/run-service.yaml @@ -17,7 +17,7 @@ kind: Service metadata: annotations: run.googleapis.com/launch-stage: ALPHA - name: opentelemetry-cloud-run-sample + name: run-gmp-sidecar-service spec: template: metadata: