From 3b31fbd306021726f8d10528449d790bfff1ac21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Krupa=20=28paulfantom=29?= Date: Mon, 1 Aug 2022 17:05:58 +0200 Subject: [PATCH] {chart,scripts}: move datasource test to helm chart MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Krupa (paulfantom) --- .github/workflows/tests.yml | 14 +- .gitignore | 2 +- Makefile | 10 +- chart/ci/default-values.yaml | 39 +++ chart/ci/e2e-values.yaml | 347 -------------------- chart/templates/tests/test-datasources.yaml | 158 +++++++++ chart/values.yaml | 11 +- scripts/check-datasources.sh | 63 ---- 8 files changed, 213 insertions(+), 431 deletions(-) create mode 100644 chart/ci/default-values.yaml delete mode 100644 chart/ci/e2e-values.yaml create mode 100644 chart/templates/tests/test-datasources.yaml delete mode 100755 scripts/check-datasources.sh diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 50e28104..e50f9bdf 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -19,7 +19,7 @@ jobs: uses: helm/chart-testing-action@v2.2.1 - name: Run chart-testing (lint) - run: ct lint --config ct.yaml + run: make lint test: runs-on: ubuntu-latest @@ -46,16 +46,8 @@ jobs: - name: Wait for cluster to finish bootstraping run: kubectl wait --for=condition=Ready pods --all --all-namespaces --timeout=300s - - name: Create namespace for installing the chart - run: kubectl create namespace tobs-test - - - name: Run chart-testing (install) - run: ct install --config ct.yaml --namespace tobs-test - - # TODO(onprem): Figure out a workaround to test datasources as ct just removes - # the chart release after a successful install. - # - name: Check datasources - # run: make check-datasources + - name: Run e2e chart-testing + run: make e2e test-result: name: End-to-End Test Results diff --git a/.gitignore b/.gitignore index ae0ec69b..09622dab 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ .vscode/ .idea/ chart/Chart.lock -chart/charts/* +chart/charts tmp/ manifests.yaml \ No newline at end of file diff --git a/Makefile b/Makefile index 17715a05..3fe39672 100644 --- a/Makefile +++ b/Makefile @@ -46,8 +46,14 @@ helm-install: cert-manager load-images ## This is a phony target that is used t helm dep up chart/ helm upgrade --install --wait --timeout 15m test chart/ -.PHONY: check-datasources - ./scripts/check-datasources.sh +.PHONY: lint +lint: ## Lint helm chart using ct (chart-testing). + ct lint --config ct.yaml + +.PHONY: e2e +e2e: start-kind cert-manager ## Run e2e installation tests using ct (chart-testing). + kubectl create ns tobs-test + ct install --config ct.yaml --namespace tobs-test manifests.yaml: helm template --namespace test test chart/ > $@ diff --git a/chart/ci/default-values.yaml b/chart/ci/default-values.yaml new file mode 100644 index 00000000..c00a5efa --- /dev/null +++ b/chart/ci/default-values.yaml @@ -0,0 +1,39 @@ +# This is the default values for the chart to run in CI system and it can be used as +# a base template for other *-values.yaml files. Only resource requests are nullified +# to allow starting the stack on github action runner and prevent issues related +# to pod scheduling due to insufficient host resources. + +timescaledb-single: + resources: null + +promscale: + resources: null + +kube-prometheus-stack: + alertmanager: + alertmanagerSpec: + resources: null + prometheusOperator: + prometheusConfigReloader: + resources: null + resources: null + prometheus: + prometheusSpec: + resources: null + grafana: + resources: null + prometheus: + datasource: + enabled: true + # By default url of data source is set to ts-prom connector instance + # deployed with this chart. If a connector isn't used this should be + # set to the prometheus-server. + url: "http://{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201" + kube-state-metrics: + resources: null + prometheus-node-exporter: + resources: null + +opentelemetry-operator: + manager: + resources: null diff --git a/chart/ci/e2e-values.yaml b/chart/ci/e2e-values.yaml deleted file mode 100644 index be6fe05a..00000000 --- a/chart/ci/e2e-values.yaml +++ /dev/null @@ -1,347 +0,0 @@ -# Values for configuring the deployment of TimescaleDB -# The charts README is at: -# https://github.com/timescale/timescaledb-kubernetes/tree/master/charts/timescaledb-single -# Check out the various configuration options (administration guide) at: -# https://github.com/timescale/timescaledb-kubernetes/blob/master/charts/timescaledb-single/admin-guide.md - -# Indicates if tobs helm chart is installed using the tobs CLI -cli: false - -# Override the deployment namespace -namespaceOverride: "" - -# TimescaleDB single helm chart configuration -timescaledb-single: - # disable the chart if an existing TimescaleDB instance is used - enabled: &dbEnabled true - - # override default helm chart image to use one with newer promscale_extension - image: - repository: timescale/timescaledb-ha - tag: pg14.4-ts2.7.2-p0 - pullPolicy: IfNotPresent - - # create only a ClusterIP service - loadBalancer: - enabled: false - # number or TimescaleDB pods to spawn (default is 3, 1 for no HA) - replicaCount: 1 - # backup is disabled by default, enable it - # if you want to backup timescaleDB to s3 - # you can provide the s3 details on tobs install - # in the user prompt or you can set s3 details in the - # env variables for the following keys: - # PGBACKREST_REPO1_S3_BUCKET - # PGBACKREST_REPO1_S3_ENDPOINT - # PGBACKREST_REPO1_S3_REGION - # PGBACKREST_REPO1_S3_KEY - # PGBACKREST_REPO1_S3_KEY_SECRET - backup: - enabled: false - # TimescaleDB PVC sizes - persistentVolumes: - data: - size: 150Gi - wal: - size: 20Gi - ## TimescaleDB resource requests - resources: null - -# Values for configuring the deployment of the Promscale -# The charts README is at: -# https://github.com/timescale/promscale/tree/master/helm-chart -promscale: - enabled: true - image: timescale/promscale:0.13.0 - # to pass extra args - extraArgs: - - "--metrics.high-availability=true" - - extraEnv: - - name: "TOBS_TELEMETRY_INSTALLED_BY" - value: "helm" - - name: "TOBS_TELEMETRY_VERSION" - value: "{{ .Chart.Version }}" - - name: "TOBS_TELEMETRY_TRACING_ENABLED" - value: "true" - - name: "TOBS_TELEMETRY_TIMESCALEDB_ENABLED" - value: *dbEnabled - - serviceMonitor: - enabled: true - - ## Note: - - # If you are providing your own secret name, do - # not forget to configure at below connectionSecretName - - # selector used to provision your own Secret containing connection details - # Use this option with caution - - # if you are adding a conn string here do not forget - # to add the same for kube-prometheus.grafana.timescale.adminPassSecret - connectionSecretName: "" - - ## Note: - - # If you using tobs deploy TimescaleDB do not configure below - # any connection details below as tobs will take care of it. - - # connection details to connect to a target db - connection: - # Database connection settings. If `uri` is not - # set then the specific user, pass, host, port and - # sslMode properties are used. - uri: "" - # the db name in which the metrics will be stored - dbName: &metricDB postgres - # user to connect to TimescaleDB with - user: postgres - # empty password string will be populated automatically with a database password - password: "" - # Host name (templated) of the database instance, default - # to service created in timescaledb-single - host: &dbHost "{{ .Release.Name }}.{{ .Release.Namespace }}.svc" - port: 5432 - sslMode: require - - # Promscale deployment resource requests - resources: null - -# Enabling Kube-Prometheus will install -# Grafana & Prometheus into tobs as they -# are part of Kube-Prometheus already -kube-prometheus-stack: - enabled: true - fullnameOverride: "tobs-kube-prometheus" - alertmanager: - alertmanagerSpec: - image: - repository: quay.io/prometheus/alertmanager - tag: v0.24.0 - replicas: 3 - ## AlertManager resource requests - resources: null - prometheusOperator: - image: - repository: quay.io/prometheus-operator/prometheus-operator - tag: v0.58.0 - pullPolicy: IfNotPresent - ## Prometheus config reloader configuration - prometheusConfigReloader: - # image to use for config and rule reloading - image: - repository: quay.io/prometheus-operator/prometheus-config-reloader - tag: v0.58.0 - # resource config for prometheusConfigReloader - resources: null - ## Prometheus Operator resource requests - resources: null - prometheus: - prometheusSpec: - image: - repository: quay.io/prometheus/prometheus - tag: v2.37.0 - scrapeInterval: "1m" - scrapeTimeout: "10s" - evaluationInterval: "1m" - # Prometheus metric retention - retention: 1d - # Number of replicas of each shard to deploy for a Prometheus deployment. - replicas: 2 - ## Prometheus container retention - resources: null - - replicaExternalLabelName: "__replica__" - # Promscale requires a cluster label to be present for high availability mode. - prometheusExternalLabelName: "cluster" - # The remote_read spec configuration for Prometheus. - # ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec - remoteRead: - # - {protocol}://{host}:{port}/{endpoint} - - url: "http://{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201/read" - readRecent: true - - # The remote_write spec configuration for Prometheus. - # ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec - remoteWrite: - - url: "http://{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201/write" - - # Prometheus pod storage spec - storageSpec: - # Using PersistentVolumeClaim - # disable mount sub path, use the root directory of pvc - disableMountSubPath: true - volumeClaimTemplate: - spec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 8Gi - - # We've enabled annotation-based scraping by default for backward-compatibility - # and to support the largest number of use-cases out-of-the-box. - # We encourage people to use ServiceMonitors and PodMonitors for new components. - # See discussion in: https://github.com/prometheus-operator/prometheus-operator/issues/1547 - # and more info: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#prometheusioscrape - - # If additional scrape configurations are already deployed in a single secret file you can use this section. - # Expected values are the secret name and key - # Cannot be used with additionalScrapeConfigs - additionalScrapeConfigsSecret: - enabled: true - name: tobs-scrape-config - key: additional-scrape-config.yaml - - # Values for configuring the deployment of Grafana - # The Grafana Community chart is used and the guide for it - # can be found at: - # https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md - grafana: - enabled: true - # TODO(paulfantom): remove with kube-prometheus bump - image: - repository: grafana/grafana - tag: 9.0.5 - pullPolicy: IfNotPresent - resources: null - envValueFrom: - GRAFANA_PASSWORD: - secretKeyRef: - name: custom-secret-scripts - key: GRAFANA_PASSWORD - sidecar: - datasources: - enabled: true - label: tobs_datasource - labelValue: "true" - # Disable Prometheus datasource by default as - # Promscale is the default datasource - defaultDatasourceEnabled: false - dashboards: - # option to enable multi-cluster support - # in Grafana dashboards by default disabled - multicluster: - global: - enabled: false - enabled: true - files: - - dashboards/k8s-cluster.json - - dashboards/k8s-hardware.json - - dashboards/apm-dependencies.json - - dashboards/apm-home.json - - dashboards/apm-service-dependencies-downstream.json - - dashboards/apm-service-dependencies-upstream.json - - dashboards/apm-service-overview.json - - dashboards/promscale.json - adminUser: admin - # To configure password externally refer to https://github.com/grafana/helm-charts/blob/6578497320d3c4672bab3a3c7fd38dffba1c9aba/charts/grafana/values.yaml#L340-L345 - adminPassword: "" - persistence: - type: pvc - enabled: true - accessModes: - - ReadWriteOnce - prometheus: - datasource: - enabled: true - # By default url of data source is set to ts-prom connector instance - # deployed with this chart. If a connector isn't used this should be - # set to the prometheus-server. - url: "http://{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201" - timescale: - datasource: - enabled: true - user: grafana - # leaving password empty will cause helm to generate a random password - pass: "" - dbName: *metricDB - sslMode: require - # By default the url/host is set to the db instance deployed - # with this chart - host: *dbHost - port: 5432 - jaeger: - # Endpoint for integrating jaeger datasource in grafana. This should point to HTTP endpoint, not gRPC. - promscaleTracesQueryEndPoint: "{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201" - - kube-state-metrics: - image: - repository: registry.k8s.io/kube-state-metrics/kube-state-metrics - tag: v2.5.0 - pullPolicy: IfNotPresent - # By default kube-state-metrics are scraped using - # serviceMonitor disable annotation based scraping - prometheusScrape: false - resources: null - - prometheus-node-exporter: - image: - repository: quay.io/prometheus/node-exporter - tag: v1.3.1 - pullPolicy: IfNotPresent - # By default node-exporter are scraped using - # serviceMonitor disable annotation based scraping - service: - annotations: - prometheus.io/scrape: "false" - resources: null - -# Enable OpenTelemetry Operator -# If using tobs CLI you can enable otel with --enable-opentelemetry flag -opentelemetry-operator: - enabled: true - manager: - image: - repository: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator - tag: v0.56.0 - resources: null - serviceMonitor: - enabled: true - prometheusRule: - enabled: true - instrumentation: - pythonImage: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.32b0 - javaImage: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.16.0 - nodejsImage: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.27.0 - collector: - # The default otel collector that will be deployed by helm once - # the otel operator is in running state - config: | - receivers: - jaeger: - protocols: - grpc: - thrift_http: - - otlp: - protocols: - grpc: - http: - - exporters: - logging: - otlp: - endpoint: "{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9202" - compression: none - tls: - insecure: true - prometheusremotewrite: - endpoint: "http://{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201/write" - tls: - insecure: true - - processors: - batch: - - service: - pipelines: - traces: - receivers: [jaeger, otlp] - exporters: [logging, otlp] - processors: [batch] - metrics: - receivers: [otlp] - processors: [batch] - exporters: [prometheusremotewrite] diff --git a/chart/templates/tests/test-datasources.yaml b/chart/templates/tests/test-datasources.yaml new file mode 100644 index 00000000..80db957f --- /dev/null +++ b/chart/templates/tests/test-datasources.yaml @@ -0,0 +1,158 @@ + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ .Release.Name }}-test-datasource" + namespace: "{{ template "tobs.namespace" . }}" + labels: + {{- include "tobs.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: "{{ .Release.Name }}-test-datasource" + namespace: "{{ template "tobs.namespace" . }}" + labels: + {{- include "tobs.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "{{ .Release.Name }}-test-datasource" +subjects: + - kind: ServiceAccount + name: "{{ .Release.Name }}-test-datasource" + namespace: "{{ template "tobs.namespace" . }}" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: "{{ .Release.Name }}-test-datasource" + namespace: "{{ template "tobs.namespace" . }}" + labels: + {{- include "tobs.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + resourceNames: + - "{{ .Release.Name }}-grafana" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ .Release.Name }}-test-datasource" + namespace: "{{ template "tobs.namespace" . }}" + labels: + {{- include "tobs.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +data: + datasources.sh: |- + #!/bin/bash + + # set -euo pipefail + set -o pipefail + + # Assemble grafana query URL + RELEASE="{{ .Release.Name }}" + NAMESPACE="{{ template "tobs.namespace" . }}" + {{- $kubePrometheus := index .Values "kube-prometheus-stack" }} + GRAFANA_USER="{{ $kubePrometheus.grafana.adminUser }}" + + # use curl instead of kubectl to access k8s api. This way we don't need to use container image with kubectl in it. + TOKEN="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + K8S_API_URI="https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT_HTTPS}/api/v1/namespaces/${NAMESPACE}/secrets/${RELEASE}-grafana" + GRAFANA_PASS="$( + curl -s \ + --header "Authorization: Bearer ${TOKEN}" \ + --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + "$K8S_API_URI" \ + | jq -r '.data["admin-password"]' \ + | base64 -d + )" + + GRAFANA_QUERY_URL="http://${GRAFANA_USER}:${GRAFANA_PASS}@${RELEASE}-grafana.${NAMESPACE}.svc:80/api/ds/query" + + function query() { + local uid="$1" + local query="$2" + local format="$3" + + local body=$(cat <<-EOM + { + "queries":[ + { + "datasource":{ + "uid":"$uid" + }, + "refId":"A", + "format":"$format", + "expr":"$query" + } + ], + "from":"now-5m", + "to":"now" + } + EOM + ) + + curl -H "Content-Type: application/json" -X POST -d "$body" "${GRAFANA_QUERY_URL}" 2>/dev/null | jq '.results.A' + } + + SQL_QUERY="SELECT * FROM pg_extension WHERE extname = 'timescaledb_toolkit';" + RESULT_SQL=$(query "c4729dfb8ceeaa0372ef27403a3932695eee995d" "$SQL_QUERY" "table") + if [ "$(jq 'has("error")' <<< ${RESULT_SQL})" == "true" ]; then + echo "GRAFANA SQL DATASOURCE CANNOT QUERY DATA DUE TO:" + jq '.error' <<< ${RESULT_SQL} + exit 1 + fi + + RESULT_PRM=$(query "dc08d25c8f267b054f12002f334e6d3d32a853e4" "ALERTS" "time_series") + if [ "$(jq 'has("error")' <<< ${RESULT_PRM})" == "true" ]; then + echo "GRAFANA PROMQL DATASOURCE CANNOT QUERY DATA DUE TO:" + jq '.error' <<< ${RESULT_PRM} + exit 1 + fi + + echo "All queries passed" +--- +apiVersion: v1 +kind: Pod +metadata: + name: "{{ .Release.Name }}-test-datasource" + labels: + {{- include "tobs.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: checker + # TODO(paulfantom): move image build to tobs repo + # Current multi-arch image is built from https://github.com/paulfantom/dockerfiles/blob/master/curl-jq/Dockerfile + image: quay.io/paulfantom/curl-jq + command: + - /bin/bash + - -c + - /usr/local/bin/datasources.sh + volumeMounts: + - mountPath: /usr/local/bin/datasources.sh + name: datasources-bin + readOnly: true + subPath: datasources.sh + serviceAccountName: "{{ .Release.Name }}-test-datasource" + restartPolicy: Never + volumes: + - name: datasources-bin + configMap: + name: "{{ .Release.Name }}-test-datasource" + defaultMode: 0755 \ No newline at end of file diff --git a/chart/values.yaml b/chart/values.yaml index 5a81b670..a321cc87 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -4,9 +4,6 @@ # Check out the various configuration options (administration guide) at: # https://github.com/timescale/timescaledb-kubernetes/blob/master/charts/timescaledb-single/admin-guide.md -# Indicates if tobs helm chart is installed using the tobs CLI -cli: false - # Override the deployment namespace namespaceOverride: "" @@ -188,13 +185,13 @@ kube-prometheus-stack: # ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec remoteRead: # - {protocol}://{host}:{port}/{endpoint} - - url: "http://{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201/read" + - url: "http://{{ .Release.Name }}-promscale.{{ .Release.Namespace }}.svc:9201/read" readRecent: true # The remote_write spec configuration for Prometheus. # ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec remoteWrite: - - url: "http://{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201/write" + - url: "http://{{ .Release.Name }}-promscale.{{ .Release.Namespace }}.svc:9201/write" # Prometheus pod storage spec storageSpec: @@ -284,7 +281,7 @@ kube-prometheus-stack: # By default url of data source is set to ts-prom connector instance # deployed with this chart. If a connector isn't used this should be # set to the prometheus-server. - url: "http://{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201" + url: "http://{{ .Release.Name }}-promscale.{{ .Release.Namespace }}.svc:9201" timescale: datasource: enabled: true @@ -299,7 +296,7 @@ kube-prometheus-stack: port: 5432 jaeger: # Endpoint for integrating jaeger datasource in grafana. This should point to HTTP endpoint, not gRPC. - promscaleTracesQueryEndPoint: "{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201" + promscaleTracesQueryEndPoint: "{{ .Release.Name }}-promscale.{{ .Release.Namespace }}.svc:9201" kube-state-metrics: image: diff --git a/scripts/check-datasources.sh b/scripts/check-datasources.sh deleted file mode 100755 index 22b679ef..00000000 --- a/scripts/check-datasources.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash - -# TODO(paulfantom): consider using jsonnet for modifications from this script - -set -euo pipefail - -function query() { - local uid="$1" - local query="$2" - local format="$3" - - local body=$(cat <<-EOM -{ - "queries":[ - { - "datasource":{ - "uid":"$uid" - }, - "refId":"A", - "format":"$format", - "expr":"$query" - } - ], - "from":"now-5m", - "to":"now" -} -EOM -) - curl -H "Content-Type: application/json" -X POST -d "$body" "http://${GRAFANA_USER}:${GRAFANA_PASS}@localhost:3000/api/ds/query" 2>/dev/null | jq '.results.A' -} - - -# Get helm release name -RELEASE="$(helm list -o json | jq -r '.[0].name')" -NAMESPACE="$(helm list -o json | jq -r '.[0].namespace')" - -# Get grafana credentials -GRAFANA_USER="admin" -GRAFANA_PASS="$(kubectl get secret -n "${NAMESPACE}" "${RELEASE}-grafana" -o json | jq -r '.data["admin-password"]' | base64 -d)" - -# Cleanup port-forward on exit -trap 'kill $(jobs -p)' EXIT - -# Port-forward to grafana SVC -kubectl -n "${NAMESPACE}" port-forward svc/test-grafana 3000:80 & -sleep 5 - -SQL_QUERY="SELECT * FROM pg_extension WHERE extname = 'timescaledb_toolkit';" -RESULT_SQL=$(query "c4729dfb8ceeaa0372ef27403a3932695eee995d" "$SQL_QUERY" "table") -if [ "$(jq 'has("error")' <<< ${RESULT_SQL})" == "true" ]; then - echo "GRAFANA SQL DATASOURCE CANNOT QUERY DATA DUE TO:" - jq '.error' <<< ${RESULT_SQL} - exit 1 -fi - -RESULT_PRM=$(query "dc08d25c8f267b054f12002f334e6d3d32a853e4" "ALERTS" "time_series") -if [ "$(jq 'has("error")' <<< ${RESULT_PRM})" == "true" ]; then - echo "GRAFANA PROMQL DATASOURCE CANNOT QUERY DATA DUE TO:" - jq '.error' <<< ${RESULT_PRM} - exit 1 -fi - -echo "All queries passed" \ No newline at end of file