diff --git a/dashboards/Business_Logic_Monitoring-1510165692840.json b/dashboards/Business_Logic_Monitoring-1510165692840.json index ee69282..ca26208 100644 --- a/dashboards/Business_Logic_Monitoring-1510165692840.json +++ b/dashboards/Business_Logic_Monitoring-1510165692840.json @@ -1,8 +1,8 @@ { "__inputs": [ { - "name": "DS_PROMETHEUS_SERVER", - "label": "Prometheus Server", + "name": "DS_HTTP://35.198.163.2", + "label": "http://35.198.163.2", "description": "", "type": "datasource", "pluginId": "prometheus", @@ -14,7 +14,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "4.6.1" + "version": "4.6.3" }, { "type": "panel", @@ -58,7 +58,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS_SERVER}", + "datasource": "${DS_HTTP://35.198.163.2}", "fill": 1, "id": 1, "legend": { @@ -85,7 +85,7 @@ "steppedLine": false, "targets": [ { - "expr": "rate(post_count[1h])", + "expr": "rate(post_count{namespace=~\"$namespace\"}[1h])", "format": "time_series", "intervalFactor": 2, "refId": "A" @@ -144,7 +144,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS_SERVER}", + "datasource": "${DS_HTTP://35.198.163.2}", "fill": 1, "id": 2, "legend": { @@ -171,7 +171,7 @@ "steppedLine": false, "targets": [ { - "expr": "rate(comment_count[1h])", + "expr": "rate(comment_count{namespace=~\"$namespace\"}[1h])", "format": "time_series", "intervalFactor": 2, "refId": "A" @@ -259,5 +259,5 @@ }, "timezone": "", "title": "Business_Logic_Monitoring", - "version": 2 + "version": 1 } \ No newline at end of file diff --git a/dashboards/UI_Service_Monitoring-1510164048862.json b/dashboards/UI_Service_Monitoring-1510164048862.json index 374b285..3c03dea 100644 --- a/dashboards/UI_Service_Monitoring-1510164048862.json +++ b/dashboards/UI_Service_Monitoring-1510164048862.json @@ -1,8 +1,8 @@ { "__inputs": [ { - "name": "DS_PROMETHEUS_SERVER", - "label": "Prometheus Server", + "name": "DS_HTTP://35.198.163.2", + "label": "http://35.198.163.2", "description": "", "type": "datasource", "pluginId": "prometheus", @@ -14,7 +14,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "4.6.1" + "version": "4.6.3" }, { "type": "panel", @@ -48,6 +48,7 @@ "hideControls": false, "id": null, "links": [], + "refresh": false, "rows": [ { "collapse": false, @@ -58,7 +59,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS_SERVER}", + "datasource": "${DS_HTTP://35.198.163.2}", "fill": 1, "id": 1, "legend": { @@ -85,7 +86,7 @@ "steppedLine": false, "targets": [ { - "expr": "ui_request_count", + "expr": "ui_request_count{namespace=~\"$namespace\"}", "format": "time_series", "intervalFactor": 2, "refId": "A" @@ -144,7 +145,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS_SERVER}", + "datasource": "${DS_HTTP://35.198.163.2}", "fill": 1, "id": 3, "legend": { @@ -171,7 +172,7 @@ "steppedLine": false, "targets": [ { - "expr": "rate(ui_request_count{http_status=~\"^[45].*\"}[5m])", + "expr": "rate(ui_request_count{http_status=~\"^[45].*\",namespace=~\"$namespace\"}[5m])", "format": "time_series", "intervalFactor": 2, "refId": "A" @@ -230,7 +231,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS_SERVER}", + "datasource": "${DS_HTTP://35.198.163.2}", "fill": 1, "id": 4, "legend": { @@ -312,11 +313,32 @@ "style": "dark", "tags": [], "templating": { - "list": [] + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "${DS_HTTP://35.198.163.2}", + "hide": 0, + "includeAll": true, + "label": "Env", + "multi": true, + "name": "namespace", + "options": [], + "query": "label_values(namespace)", + "refresh": 1, + "regex": "/.*/", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] }, "time": { - "from": "now-15m", - "to": "now" + "from": "2018-01-17T15:05:16.631Z", + "to": "2018-01-17T15:35:16.631Z" }, "timepicker": { "refresh_intervals": [ diff --git a/kubernetes/Charts/prometheus/.helmignore b/kubernetes/Charts/prometheus/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/kubernetes/Charts/prometheus/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/kubernetes/Charts/prometheus/Chart.yaml b/kubernetes/Charts/prometheus/Chart.yaml new file mode 100755 index 0000000..6d3311e --- /dev/null +++ b/kubernetes/Charts/prometheus/Chart.yaml @@ -0,0 +1,14 @@ +name: prometheus +version: 5.0.0 +description: Prometheus is a monitoring system and time series database. +home: https://prometheus.io/ +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +sources: + - https://github.com/prometheus/alertmanager + - https://github.com/prometheus/prometheus + - https://github.com/prometheus/pushgateway + - https://github.com/kubernetes/kube-state-metrics +maintainers: + - name: mgoodness + email: mgoodness@gmail.com +engine: gotpl diff --git a/kubernetes/Charts/prometheus/README.md b/kubernetes/Charts/prometheus/README.md new file mode 100644 index 0000000..e6e7c28 --- /dev/null +++ b/kubernetes/Charts/prometheus/README.md @@ -0,0 +1,298 @@ +# Prometheus + +[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. + +## TL;DR; + +```console +$ helm install stable/prometheus +``` + +## Introduction + +This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.3+ with Beta APIs enabled + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release stable/prometheus +``` + +The command deploys Prometheus on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Prometheus 2.0 + +Prometheus version 2.0 has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/) + +Users of this chart will need to update their alerting rules to the new format before they can upgrade. + +## Upgrading from previous chart versions. + +As of version 5.0, this chart uses Prometheus 2.0. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data. + +### Example migration + +Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.0 while keeping your old data do the following: + +1. Update the `prometheus-old` release. Disable scraping and every component besides the prometheus server, similar to the configuration below: + + ``` + alertmanager: + enabled: false + alertmanagerFiles: + alertmanager.yml: "" + kubeStateMetrics: + enabled: false + nodeExporter: + enabled: false + pushgateway: + enabled: false + server: + extraArgs: + storage.local.retention: 720h + serverFiles: + alerts: "" + prometheus.yml: "" + rules: "" + ``` + +1. Deploy a new release of the chart with version 5.0+ using prometheus 2.0. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target. + + ``` + prometheus.yml: | + ... + remote_read: + - url: http://prometheus-old/api/v1/read + ... + ``` + + Old data will be available when you query the new prometheus instance. + +## Configuration + +The following tables lists the configurable parameters of the Prometheus chart and their default values. + +Parameter | Description | Default +--------- | ----------- | ------- +`alertmanager.enabled` | If true, create alertmanager | `true` +`alertmanager.name` | alertmanager container name | `alertmanager` +`alertmanager.image.repository` | alertmanager container image repository | `prom/alertmanager` +`alertmanager.image.tag` | alertmanager container image tag | `v0.5.1` +`alertmanager.image.pullPolicy` | alertmanager container image pull policy | `IfNotPresent` +`alertmanager.prefixURL` | The prefix slug at which the server can be accessed | `` +`alertmanager.baseURL` | The external url at which the server can be accessed | `` +`alertmanager.extraArgs` | Additional alertmanager container arguments | `{}` +`alertmanager.configMapOverrideName` | Prometheus alertmanager ConfigMap override where full-name is `{{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}}` and setting this value will prevent the default alertmanager ConfigMap from being generated | `""` +`alertmanager.ingress.enabled` | If true, alertmanager Ingress will be created | `false` +`alertmanager.ingress.annotations` | alertmanager Ingress annotations | `{}` +`alertmanager.ingress.hosts` | alertmanager Ingress hostnames | `[]` +`alertmanager.ingress.tls` | alertmanager Ingress TLS configuration (YAML) | `[]` +`alertmanager.nodeSelector` | node labels for alertmanager pod assignment | `{}` +`alertmanager.persistentVolume.enabled` | If true, alertmanager will create a Persistent Volume Claim | `true` +`alertmanager.persistentVolume.accessModes` | alertmanager data Persistent Volume access modes | `[ReadWriteOnce]` +`alertmanager.persistentVolume.annotations` | Annotations for alertmanager Persistent Volume Claim` | `{}` +`alertmanager.persistentVolume.existingClaim` | alertmanager data Persistent Volume existing claim name | `""` +`alertmanager.persistentVolume.mountPath` | alertmanager data Persistent Volume mount root path | `/data` +`alertmanager.persistentVolume.size` | alertmanager data Persistent Volume size | `2Gi` +`alertmanager.persistentVolume.storageClass` | alertmanager data Persistent Volume Storage Class | `unset` +`alertmanager.persistentVolume.subPath` | Subdirectory of alertmanager data Persistent Volume to mount | `""` +`alertmanager.podAnnotations` | annotations to be added to alertmanager pods | `{}` +`alertmanager.replicaCount` | desired number of alertmanager pods | `1` +`alertmanager.resources` | alertmanager pod resource requests & limits | `{}` +`alertmanager.serviceAccountName` | service account name for alertmanager to use (ignored if rbac.create=true) | `default` +`alertmanager.service.annotations` | annotations for alertmanager service | `{}` +`alertmanager.service.clusterIP` | internal alertmanager cluster service IP | `""` +`alertmanager.service.externalIPs` | alertmanager service external IP addresses | `[]` +`alertmanager.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`alertmanager.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`alertmanager.service.servicePort` | alertmanager service port | `80` +`alertmanager.service.type` | type of alertmanager service to create | `ClusterIP` +`alertmanagerFiles` | alertmanager ConfigMap entries | `alertmanager.yml` +`configmapReload.name` | configmap-reload container name | `configmap-reload` +`configmapReload.image.repository` | configmap-reload container image repository | `jimmidyson/configmap-reload` +`configmapReload.image.tag` | configmap-reload container image tag | `v0.1` +`configmapReload.image.pullPolicy` | configmap-reload container image pull policy | `IfNotPresent` +`configmapReload.resources` | configmap-reload pod resource requests & limits | `{}` +`kubeStateMetrics.enabled` | If true, create kube-state-metrics | `true` +`kubeStateMetrics.name` | kube-state-metrics container name | `kube-state-metrics` +`kubeStateMetrics.image.repository` | kube-state-metrics container image repository| `k8s.gcr.io/kube-state-metrics` +`kubeStateMetrics.image.tag` | kube-state-metrics container image tag | `v0.4.1` +`kubeStateMetrics.image.pullPolicy` | kube-state-metrics container image pull policy | `IfNotPresent` +`kubeStateMetrics.args` | kube-state-metrics container arguments | `{}` +`kubeStateMetrics.nodeSelector` | node labels for kube-state-metrics pod assignment | `{}` +`kubeStateMetrics.podAnnotations` | annotations to be added to kube-state-metrics pods | `{}` +`kubeStateMetrics.replicaCount` | desired number of kube-state-metrics pods | `1` +`kubeStateMetrics.resources` | kube-state-metrics resource requests and limits (YAML) | `{}` +`kubeStateMetrics.serviceAccountName` | service account name for kube-state-metrics to use (ignored if rbac.create=true) | `default` +`kubeStateMetrics.service.annotations` | annotations for kube-state-metrics service | `{prometheus.io/scrape: "true"}` +`kubeStateMetrics.service.clusterIP` | internal kube-state-metrics cluster service IP | `None` +`kubeStateMetrics.service.externalIPs` | kube-state-metrics service external IP addresses | `[]` +`kubeStateMetrics.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`kubeStateMetrics.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`kubeStateMetrics.service.servicePort` | kube-state-metrics service port | `80` +`kubeStateMetrics.service.type` | type of kube-state-metrics service to create | `ClusterIP` +`nodeExporter.enabled` | If true, create node-exporter | `true` +`nodeExporter.name` | node-exporter container name | `node-exporter` +`nodeExporter.image.repository` | node-exporter container image repository| `prom/node-exporter` +`nodeExporter.image.tag` | node-exporter container image tag | `v0.13.0` +`nodeExporter.image.pullPolicy` | node-exporter container image pull policy | `IfNotPresent` +`nodeExporter.extraArgs` | Additional node-exporter container arguments | `{}` +`nodeExporter.extraHostPathMounts` | Additional node-exporter hostPath mounts | `[]` +`nodeExporter.nodeSelector` | node labels for node-exporter pod assignment | `{}` +`nodeExporter.podAnnotations` | annotations to be added to node-exporter pods | `{}` +`nodeExporter.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`nodeExporter.resources` | node-exporter resource requests and limits (YAML) | `{}` +`nodeExporter.serviceAccountName` | service account name for node-exporter to use (ignored if rbac.create=true) | `default` +`nodeExporter.service.annotations` | annotations for node-exporter service | `{prometheus.io/scrape: "true"}` +`nodeExporter.service.clusterIP` | internal node-exporter cluster service IP | `None` +`nodeExporter.service.externalIPs` | node-exporter service external IP addresses | `[]` +`nodeExporter.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`nodeExporter.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`nodeExporter.service.servicePort` | node-exporter service port | `9100` +`nodeExporter.service.type` | type of node-exporter service to create | `ClusterIP` +`pushgateway.enabled` | If true, create pushgateway | `true` +`pushgateway.name` | pushgateway container name | `pushgateway` +`pushgateway.image.repository` | pushgateway container image repository | `prom/pushgateway` +`pushgateway.image.tag` | pushgateway container image tag | `v0.4.0` +`pushgateway.image.pullPolicy` | pushgateway container image pull policy | `IfNotPresent` +`pushgateway.extraArgs` | Additional pushgateway container arguments | `{}` +`pushgateway.ingress.enabled` | If true, pushgateway Ingress will be created | `false` +`pushgateway.ingress.annotations` | pushgateway Ingress annotations | `{}` +`pushgateway.ingress.hosts` | pushgateway Ingress hostnames | `[]` +`pushgateway.ingress.tls` | pushgateway Ingress TLS configuration (YAML) | `[]` +`pushgateway.nodeSelector` | node labels for pushgateway pod assignment | `{}` +`pushgateway.podAnnotations` | annotations to be added to pushgateway pods | `{}` +`pushgateway.replicaCount` | desired number of pushgateway pods | `1` +`pushgateway.resources` | pushgateway pod resource requests & limits | `{}` +`pushgateway.service.annotations` | annotations for pushgateway service | `{}` +`pushgateway.service.clusterIP` | internal pushgateway cluster service IP | `""` +`pushgateway.service.externalIPs` | pushgateway service external IP addresses | `[]` +`pushgateway.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`pushgateway.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`pushgateway.service.servicePort` | pushgateway service port | `9091` +`pushgateway.service.type` | type of pushgateway service to create | `ClusterIP` +`rbac.create` | If true, create & use RBAC resources | `false` +`server.name` | Prometheus server container name | `server` +`server.image.repository` | Prometheus server container image repository | `prom/prometheus` +`server.image.tag` | Prometheus server container image tag | `v1.5.1` +`server.image.pullPolicy` | Prometheus server container image pull policy | `IfNotPresent` +`server.extraArgs` | Additional Prometheus server container arguments | `{}` +`server.prefixURL` | The prefix slug at which the server can be accessed | `` +`server.baseURL` | The external url at which the server can be accessed | `` +`server.extraHostPathMounts` | Additional Prometheus server hostPath mounts | `[]` +`server.configMapOverrideName` | Prometheus server ConfigMap override where full-name is `{{.Release.Name}}-{{.Values.server.configMapOverrideName}}` and setting this value will prevent the default server ConfigMap from being generated | `""` +`server.ingress.enabled` | If true, Prometheus server Ingress will be created | `false` +`server.ingress.annotations` | Prometheus server Ingress annotations | `[]` +`server.ingress.hosts` | Prometheus server Ingress hostnames | `[]` +`server.ingress.tls` | Prometheus server Ingress TLS configuration (YAML) | `[]` +`server.nodeSelector` | node labels for Prometheus server pod assignment | `{}` +`server.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`server.persistentVolume.enabled` | If true, Prometheus server will create a Persistent Volume Claim | `true` +`server.persistentVolume.accessModes` | Prometheus server data Persistent Volume access modes | `[ReadWriteOnce]` +`server.persistentVolume.annotations` | Prometheus server data Persistent Volume annotations | `{}` +`server.persistentVolume.existingClaim` | Prometheus server data Persistent Volume existing claim name | `""` +`server.persistentVolume.mountPath` | Prometheus server data Persistent Volume mount root path | `/data` +`server.persistentVolume.size` | Prometheus server data Persistent Volume size | `8Gi` +`server.persistentVolume.storageClass` | Prometheus server data Persistent Volume Storage Class | `unset` +`server.persistentVolume.subPath` | Subdirectory of Prometheus server data Persistent Volume to mount | `""` +`server.podAnnotations` | annotations to be added to Prometheus server pods | `{}` +`server.replicaCount` | desired number of Prometheus server pods | `1` +`server.resources` | Prometheus server resource requests and limits | `{}` +`server.serviceAccountName` | service account name for server to use (ignored if rbac.create=true) | `default` +`server.service.annotations` | annotations for Prometheus server service | `{}` +`server.service.clusterIP` | internal Prometheus server cluster service IP | `""` +`server.service.externalIPs` | Prometheus server service external IP addresses | `[]` +`server.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`server.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`server.service.nodePort` | Port to be used as the service NodePort (ignored if `server.service.type` is not `NodePort`) | `0` +`server.service.servicePort` | Prometheus server service port | `80` +`server.service.type` | type of Prometheus server service to create | `ClusterIP` +`server.terminationGracePeriodSeconds` | Prometheus server Pod termination grace period | `300` +`server.retention` | (optional) Prometheus data retention | `""` +`serverFiles.alerts` | Prometheus server alerts configuration | `{}` +`serverFiles.rules` | Prometheus server rules configuration | `{}` +`serverFiles.prometheus.yml` | Prometheus server scrape configuration | example configuration +`networkPolicy.enabled` | Enable NetworkPolicy | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install stable/prometheus --name my-release \ + --set server.terminationGracePeriodSeconds=360 +``` + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install stable/prometheus --name my-release -f values.yaml +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +### ConfigMap Files +AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod. + +Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod. + +### Ingress TLS +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [kube-lego](https://github.com/jetstack/kube-lego)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```console +kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file: + +``` +server: + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: true + + ## Prometheus server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - prometheus.domain.com + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: prometheus-server-tls + hosts: + - prometheus.domain.com +``` + +### NetworkPolicy + +Enabling Network Policy for Prometheus will secure connections to Alert Manager +and Kube State Metrics by only accepting connections from Prometheus Server. +All inbound connections to Prometheus Server are still allowed. + +To enable network policy for Prometheus, install a networking plugin that +implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true. + +If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need +to manually create a networkpolicy which allows it. diff --git a/kubernetes/Charts/prometheus/custom_values.yml b/kubernetes/Charts/prometheus/custom_values.yml new file mode 100644 index 0000000..633a29f --- /dev/null +++ b/kubernetes/Charts/prometheus/custom_values.yml @@ -0,0 +1,869 @@ +rbac: + create: false + +alertmanager: + ## If false, alertmanager will not be installed + ## + enabled: false + + # Defines the serviceAccountName to use when `rbac.create=false` + serviceAccountName: default + + ## alertmanager container name + ## + name: alertmanager + + ## alertmanager container image + ## + image: + repository: prom/alertmanager + tag: v0.10.0 + pullPolicy: IfNotPresent + + ## Additional alertmanager container arguments + ## + extraArgs: {} + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + baseURL: "/" + + ## Additional alertmanager container environment variable + ## For instance to add a http_proxy + ## + extraEnv: {} + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ingress: + ## If true, alertmanager Ingress will be created + ## + enabled: false + + ## alertmanager Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## alertmanager Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - alertmanager.domain.com + + ## alertmanager Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - alertmanager.domain.com + + ## Alertmanager Deployment Strategy type + # strategy: + # type: Recreate + + ## Node labels for alertmanager pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + persistentVolume: + ## If true, alertmanager will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## alertmanager data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## alertmanager data Persistent Volume Claim annotations + ## + annotations: {} + + ## alertmanager data Persistent Volume existing claim name + ## Requires alertmanager.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## alertmanager data Persistent Volume mount root path + ## + mountPath: /data + + ## alertmanager data Persistent Volume size + ## + size: 2Gi + + ## alertmanager data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Subdirectory of alertmanager data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Annotations to be added to alertmanager pods + ## + podAnnotations: {} + + replicaCount: 1 + + ## alertmanager resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the alertmanager service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + # nodePort: 30000 + type: ClusterIP + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/jimmidyson/configmap-reload +## +configmapReload: + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.1 + pullPolicy: IfNotPresent + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +kubeStateMetrics: + ## If false, kube-state-metrics will not be installed + ## + enabled: true + + # Defines the serviceAccountName to use when `rbac.create=false` + serviceAccountName: default + + ## kube-state-metrics container name + ## + name: kube-state-metrics + + ## kube-state-metrics container image + ## + image: + repository: gcr.io/google_containers/kube-state-metrics + tag: v1.1.0 + pullPolicy: IfNotPresent + + ## Node labels for kube-state-metrics pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to kube-state-metrics pods + ## + podAnnotations: {} + + replicaCount: 1 + + ## kube-state-metrics resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 16Mi + # requests: + # cpu: 10m + # memory: 16Mi + + service: + annotations: + prometheus.io/scrape: "true" + labels: {} + + clusterIP: None + + ## List of IP addresses at which the kube-state-metrics service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + +nodeExporter: + ## If false, node-exporter will not be installed + ## + enabled: true + + # Defines the serviceAccountName to use when `rbac.create=false` + serviceAccountName: default + + ## node-exporter container name + ## + name: node-exporter + + ## node-exporter container image + ## + image: + repository: prom/node-exporter + tag: v0.15.1 + pullPolicy: IfNotPresent + + ## Custom Update Strategy + ## + updateStrategy: + type: OnDelete + + ## Additional node-exporter container arguments + ## + extraArgs: {} + + ## Additional node-exporter hostPath mounts + ## + extraHostPathMounts: [] + # - name: textfile-dir + # mountPath: /srv/txt_collector + # hostPath: /var/lib/node-exporter + # readOnly: true + + ## Node tolerations for node-exporter scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for node-exporter pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to node-exporter pods + ## + podAnnotations: {} + + ## node-exporter resource limits & requests + ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + + service: + annotations: + prometheus.io/scrape: "true" + labels: {} + + clusterIP: None + + ## List of IP addresses at which the node-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + hostPort: 9100 + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9100 + type: ClusterIP + +server: + ## Prometheus server container name + ## + name: server + + # Defines the serviceAccountName to use when `rbac.create=false` + serviceAccountName: default + + ## Prometheus server container image + ## + image: + repository: prom/prometheus + tag: v2.0.0 + pullPolicy: IfNotPresent + + ## (optional) alertmanager hostname + ## only used if alertmanager.enabled = false + alertmanagerHostname: "" + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + baseURL: "" + + ## Additional Prometheus server container arguments + ## + extraArgs: {} + + ## Additional Prometheus server hostPath mounts + ## + extraHostPathMounts: [] + # - name: certs-dir + # mountPath: /etc/kubernetes/certs + # hostPath: /etc/kubernetes/certs + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/server-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: true + + ## Prometheus server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Prometheus server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - reddit-prometheus + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-server-tls + # hosts: + # - prometheus.domain.com + + ## Server Deployment Strategy type + # strategy: + # type: Recreate + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## Prometheus server data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## Prometheus server data Persistent Volume annotations + ## + annotations: {} + + ## Prometheus server data Persistent Volume existing claim name + ## Requires server.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## Prometheus server data Persistent Volume mount root path + ## + mountPath: /data + + ## Prometheus server data Persistent Volume size + ## + size: 8Gi + + ## Prometheus server data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Subdirectory of Prometheus server data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Annotations to be added to Prometheus server pods + ## + podAnnotations: {} + # iam.amazonaws.com/role: prometheus + + replicaCount: 1 + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: LoadBalancer + + ## Prometheus server pod termination grace period + ## + terminationGracePeriodSeconds: 300 + + ## Prometheus data retention period (i.e 360h) + ## + retention: "" + +pushgateway: + ## If false, pushgateway will not be installed + ## + enabled: false + + ## pushgateway container name + ## + name: pushgateway + + ## pushgateway container image + ## + image: + repository: prom/pushgateway + tag: v0.4.0 + pullPolicy: IfNotPresent + + ## Additional pushgateway container arguments + ## + extraArgs: {} + + ingress: + ## If true, pushgateway Ingress will be created + ## + enabled: false + + ## pushgateway Ingress annotations + ## + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## pushgateway Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - pushgateway.domain.com + + ## pushgateway Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - pushgateway.domain.com + + ## Node labels for pushgateway pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to pushgateway pods + ## + podAnnotations: {} + + replicaCount: 1 + + ## pushgateway resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + service: + annotations: + prometheus.io/probe: pushgateway + labels: {} + clusterIP: "" + + ## List of IP addresses at which the pushgateway service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9091 + type: ClusterIP + +## alertmanager ConfigMap entries +## +alertmanagerFiles: + alertmanager.yml: |- + global: + # slack_api_url: '' + + receivers: + - name: default-receiver + # slack_configs: + # - channel: '@you' + # send_resolved: true + + route: + group_wait: 10s + group_interval: 5m + receiver: default-receiver + repeat_interval: 3h + +## Prometheus server ConfigMap entries +## +serverFiles: + alerts: {} + rules: {} + + prometheus.yml: + rule_files: + - /etc/config/rules + - /etc/config/alerts + + global: + scrape_interval: 30s + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + - job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + - job_name: 'kubernetes-service-endpoints' + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+)(?::\d+);(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + + - job_name: 'prometheus-pushgateway' + honor_labels: true + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true` + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods' + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: (.+):(?:\d+);(\d+) + replacement: ${1}:${2} + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name + + - job_name: 'reddit-ui' + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + - source_labels: [__meta_kubernetes_service_label_component] + action: keep + regex: ui + + - job_name: 'reddit-comment' + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + - source_labels: [__meta_kubernetes_service_label_component] + action: keep + regex: comment + + - job_name: 'reddit-post' + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + - source_labels: [__meta_kubernetes_service_label_component] + action: keep + regex: post + + - job_name: 'reddit-production' + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_service_label_app, __meta_kubernetes_namespace] + action: keep + regex: reddit;(production|staging)+ + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false \ No newline at end of file diff --git a/kubernetes/Charts/prometheus/templates/NOTES.txt b/kubernetes/Charts/prometheus/templates/NOTES.txt new file mode 100644 index 0000000..b4fcf54 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/NOTES.txt @@ -0,0 +1,100 @@ +The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.server.ingress.enabled -}} +From outside the cluster, the server URL(s) are: +{{- range .Values.server.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Prometheus server URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.server.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.server.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }} +{{- else if contains "ClusterIP" .Values.server.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.server.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090 +{{- end }} +{{- end }} + +{{- if .Values.server.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Server pod is terminated. ##### +################################################################################# +{{- end }} + +{{ if .Values.alertmanager.enabled }} +The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.alertmanager.ingress.enabled -}} +From outside the cluster, the alertmanager URL(s) are: +{{- range .Values.alertmanager.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Alertmanager URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.alertmanager.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }} +{{- else if contains "ClusterIP" .Values.alertmanager.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.alertmanager.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 +{{- end }} +{{- end }} + +{{- if .Values.alertmanager.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the AlertManager pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{ if .Values.pushgateway.enabled }} +The Prometheus PushGateway can be accessed via port {{ .Values.pushgateway.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.pushgateway.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.pushgateway.ingress.enabled -}} +From outside the cluster, the pushgateway URL(s) are: +{{- range .Values.pushgateway.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the PushGateway URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.pushgateway.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.pushgateway.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.pushgateway.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.pushgateway.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.pushgateway.service.servicePort }} +{{- else if contains "ClusterIP" .Values.pushgateway.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.pushgateway.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 +{{- end }} +{{- end }} +{{- end }} + +For more information on running Prometheus, visit: +https://prometheus.io/ diff --git a/kubernetes/Charts/prometheus/templates/_helpers.tpl b/kubernetes/Charts/prometheus/templates/_helpers.tpl new file mode 100644 index 0000000..baa0a94 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/_helpers.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified alertmanager name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.alertmanager.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified kube-state-metrics name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.kubeStateMetrics.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.kubeStateMetrics.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified node-exporter name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.nodeExporter.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified Prometheus server name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.server.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified pushgateway name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.pushgateway.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "prometheus.networkPolicy.apiVersion" -}} +{{- if and (ge .Capabilities.KubeVersion.Minor "4") (le .Capabilities.KubeVersion.Minor "6") -}} +{{- print "extensions/v1beta1" -}} +{{- else if ge .Capabilities.KubeVersion.Minor "7" -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} diff --git a/kubernetes/Charts/prometheus/templates/alertmanager-clusterrolebinding.yaml b/kubernetes/Charts/prometheus/templates/alertmanager-clusterrolebinding.yaml new file mode 100644 index 0000000..e6d9b4c --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/alertmanager-clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.alertmanager.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.alertmanager.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.alertmanager.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/alertmanager-configmap.yaml b/kubernetes/Charts/prometheus/templates/alertmanager-configmap.yaml new file mode 100644 index 0000000..c529365 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/alertmanager-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.alertmanager.enabled (empty .Values.alertmanager.configMapOverrideName) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.alertmanager.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.alertmanager.fullname" . }} +data: +{{ toYaml .Values.alertmanagerFiles | indent 2 }} +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/alertmanager-deployment.yaml b/kubernetes/Charts/prometheus/templates/alertmanager-deployment.yaml new file mode 100644 index 0000000..be3f1c8 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/alertmanager-deployment.yaml @@ -0,0 +1,97 @@ +{{- if .Values.alertmanager.enabled -}} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.alertmanager.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: + replicas: {{ .Values.alertmanager.replicaCount }} + {{- if .Values.server.strategy }} + strategy: +{{ toYaml .Values.server.strategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.alertmanager.podAnnotations }} + annotations: +{{ toYaml .Values.alertmanager.podAnnotations | indent 8 }} + {{- end }} + labels: + app: {{ template "prometheus.name" . }} + component: "{{ .Values.alertmanager.name }}" + release: {{ .Release.Name }} + spec: + serviceAccountName: {{ if .Values.rbac.create }}{{ template "prometheus.alertmanager.fullname" . }}{{ else }}"{{ .Values.alertmanager.serviceAccountName }}"{{ end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} + image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + args: + - --config.file=/etc/config/alertmanager.yml + - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} + {{- range $key, $value := .Values.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.alertmanager.baseURL }} + - --web.external-url={{ .Values.alertmanager.baseURL }} + {{- end }} + + ports: + - containerPort: 9093 + readinessProbe: + httpGet: + path: {{ .Values.alertmanager.prefixURL }}/#/status + port: 9093 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" + subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" + + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.name }} + image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://localhost:9093{{ .Values.alertmanager.prefixURL }}/-/reload + resources: +{{ toYaml .Values.configmapReload.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- if .Values.alertmanager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.affinity }} + affinity: +{{ toYaml .Values.alertmanager.affinity | indent 8 }} + {{- end }} + volumes: + - name: config-volume + configMap: + name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + - name: storage-volume + {{- if .Values.alertmanager.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.alertmanager.persistentVolume.existingClaim }}{{ .Values.alertmanager.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + emptyDir: {} + {{- end -}} +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/alertmanager-ingress.yaml b/kubernetes/Charts/prometheus/templates/alertmanager-ingress.yaml new file mode 100644 index 0000000..42e2216 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/alertmanager-ingress.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.alertmanager.fullname" . }} +{{- $servicePort := .Values.alertmanager.service.servicePort -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: +{{- if .Values.alertmanager.ingress.annotations }} + annotations: +{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }} +{{- end }} + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.alertmanager.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: + rules: + {{- range .Values.alertmanager.ingress.hosts }} + - host: {{ . }} + http: + paths: + - backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- if .Values.alertmanager.ingress.tls }} + tls: +{{ toYaml .Values.alertmanager.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/kubernetes/Charts/prometheus/templates/alertmanager-networkpolicy.yaml b/kubernetes/Charts/prometheus/templates/alertmanager-networkpolicy.yaml new file mode 100644 index 0000000..9f5a7ca --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/alertmanager-networkpolicy.yaml @@ -0,0 +1,26 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.alertmanager.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + podSelector: + matchLabels: + app: {{ template "prometheus.name" . }} + component: "{{ .Values.alertmanager.name }}" + release: {{ .Release.Name }} + ingress: + - from: + - podSelector: + matchLabels: + release: {{ .Release.Name }} + component: "{{ .Values.server.name }}" + - ports: + - port: 9093 +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/alertmanager-pvc.yaml b/kubernetes/Charts/prometheus/templates/alertmanager-pvc.yaml new file mode 100644 index 0000000..c93db4c --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/alertmanager-pvc.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.persistentVolume.enabled -}} +{{- if not .Values.alertmanager.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.alertmanager.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.alertmanager.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: + accessModes: +{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 4 }} +{{- if .Values.alertmanager.persistentVolume.storageClass }} +{{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" +{{- end }} +{{- end }} + resources: + requests: + storage: "{{ .Values.alertmanager.persistentVolume.size }}" +{{- end -}} +{{- end -}} diff --git a/kubernetes/Charts/prometheus/templates/alertmanager-service.yaml b/kubernetes/Charts/prometheus/templates/alertmanager-service.yaml new file mode 100644 index 0000000..2521e45 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/alertmanager-service.yaml @@ -0,0 +1,49 @@ +{{- if .Values.alertmanager.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.alertmanager.service.annotations }} + annotations: +{{ toYaml .Values.alertmanager.service.annotations | indent 4 }} +{{- end }} + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.alertmanager.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if .Values.alertmanager.service.labels }} +{{ toYaml .Values.alertmanager.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: +{{- if .Values.alertmanager.service.clusterIP }} + clusterIP: {{ .Values.alertmanager.service.clusterIP }} +{{- end }} +{{- if .Values.alertmanager.service.externalIPs }} + externalIPs: +{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.alertmanager.service.servicePort }} + protocol: TCP + targetPort: 9093 + {{- if .Values.alertmanager.service.nodePort }} + nodePort: {{ .Values.alertmanager.service.nodePort }} + {{- end }} + selector: + app: {{ template "prometheus.name" . }} + component: "{{ .Values.alertmanager.name }}" + release: {{ .Release.Name }} + type: "{{ .Values.alertmanager.service.type }}" +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/alertmanager-serviceaccount.yaml b/kubernetes/Charts/prometheus/templates/alertmanager-serviceaccount.yaml new file mode 100644 index 0000000..e778983 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/alertmanager-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.rbac.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.alertmanager.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/kube-state-metrics-clusterrole.yaml b/kubernetes/Charts/prometheus/templates/kube-state-metrics-clusterrole.yaml new file mode 100644 index 0000000..2b25cc2 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/kube-state-metrics-clusterrole.yaml @@ -0,0 +1,53 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.kubeStateMetrics.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - namespaces + - nodes + - persistentvolumeclaims + - pods + - services + - resourcequotas + - replicationcontrollers + - limitranges + - persistentvolumeclaims + verbs: + - list + - watch + - apiGroups: + - extensions + resources: + - daemonsets + - deployments + - replicasets + verbs: + - list + - watch + - apiGroups: + - apps + resources: + - statefulsets + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - list + - watch +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/kube-state-metrics-clusterrolebinding.yaml b/kubernetes/Charts/prometheus/templates/kube-state-metrics-clusterrolebinding.yaml new file mode 100644 index 0000000..cd3ad02 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/kube-state-metrics-clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.kubeStateMetrics.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/kube-state-metrics-deployment.yaml b/kubernetes/Charts/prometheus/templates/kube-state-metrics-deployment.yaml new file mode 100644 index 0000000..7ffdf15 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/kube-state-metrics-deployment.yaml @@ -0,0 +1,49 @@ +{{- if .Values.kubeStateMetrics.enabled -}} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.kubeStateMetrics.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} +spec: + replicas: {{ .Values.kubeStateMetrics.replicaCount }} + template: + metadata: + {{- if .Values.kubeStateMetrics.podAnnotations }} + annotations: +{{ toYaml .Values.kubeStateMetrics.podAnnotations | indent 8 }} + {{- end }} + labels: + app: {{ template "prometheus.name" . }} + component: "{{ .Values.kubeStateMetrics.name }}" + release: {{ .Release.Name }} + spec: + serviceAccountName: {{ if .Values.rbac.create }}{{ template "prometheus.kubeStateMetrics.fullname" . }}{{ else }}"{{ .Values.kubeStateMetrics.serviceAccountName }}"{{ end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.kubeStateMetrics.name }} + image: "{{ .Values.kubeStateMetrics.image.repository }}:{{ .Values.kubeStateMetrics.image.tag }}" + imagePullPolicy: "{{ .Values.kubeStateMetrics.image.pullPolicy }}" + {{- if .Values.kubeStateMetrics.args }} + args: + {{- range $key, $value := .Values.kubeStateMetrics.args }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 8080 + resources: +{{ toYaml .Values.kubeStateMetrics.resources | indent 12 }} + {{- if .Values.kubeStateMetrics.nodeSelector }} + nodeSelector: +{{ toYaml .Values.kubeStateMetrics.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.kubeStateMetrics.affinity }} + affinity: +{{ toYaml .Values.kubeStateMetrics.affinity | indent 8 }} + {{- end }} +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/kube-state-metrics-networkpolicy.yaml b/kubernetes/Charts/prometheus/templates/kube-state-metrics-networkpolicy.yaml new file mode 100644 index 0000000..3fc62e2 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/kube-state-metrics-networkpolicy.yaml @@ -0,0 +1,26 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.kubeStateMetrics.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + podSelector: + matchLabels: + app: {{ template "prometheus.name" . }} + component: "{{ .Values.kubeStateMetrics.name }}" + release: {{ .Release.Name }} + ingress: + - from: + - podSelector: + matchLabels: + release: {{ .Release.Name }} + component: "{{ .Values.server.name }}" + - ports: + - port: 8080 +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/kube-state-metrics-serviceaccount.yaml b/kubernetes/Charts/prometheus/templates/kube-state-metrics-serviceaccount.yaml new file mode 100644 index 0000000..7306688 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/kube-state-metrics-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.rbac.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.kubeStateMetrics.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/kube-state-metrics-svc.yaml b/kubernetes/Charts/prometheus/templates/kube-state-metrics-svc.yaml new file mode 100644 index 0000000..e11fa06 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/kube-state-metrics-svc.yaml @@ -0,0 +1,46 @@ +{{- if .Values.kubeStateMetrics.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.kubeStateMetrics.service.annotations }} + annotations: +{{ toYaml .Values.kubeStateMetrics.service.annotations | indent 4 }} +{{- end }} + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.kubeStateMetrics.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if .Values.kubeStateMetrics.service.labels }} +{{ toYaml .Values.kubeStateMetrics.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.kubeStateMetrics.fullname" . }} +spec: +{{- if .Values.kubeStateMetrics.service.clusterIP }} + clusterIP: {{ .Values.kubeStateMetrics.service.clusterIP }} +{{- end }} +{{- if .Values.kubeStateMetrics.service.externalIPs }} + externalIPs: +{{ toYaml .Values.kubeStateMetrics.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.kubeStateMetrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.kubeStateMetrics.service.loadBalancerIP }} +{{- end }} +{{- if .Values.kubeStateMetrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.kubeStateMetrics.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.kubeStateMetrics.service.servicePort }} + protocol: TCP + targetPort: 8080 + selector: + app: {{ template "prometheus.name" . }} + component: "{{ .Values.kubeStateMetrics.name }}" + release: {{ .Release.Name }} + type: "{{ .Values.kubeStateMetrics.service.type }}" +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/node-exporter-clusterrolebinding.yaml b/kubernetes/Charts/prometheus/templates/node-exporter-clusterrolebinding.yaml new file mode 100644 index 0000000..ed4710e --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/node-exporter-clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.nodeExporter.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.nodeExporter.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/node-exporter-daemonset.yaml b/kubernetes/Charts/prometheus/templates/node-exporter-daemonset.yaml new file mode 100644 index 0000000..db04c07 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/node-exporter-daemonset.yaml @@ -0,0 +1,79 @@ +{{- if .Values.nodeExporter.enabled -}} +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.nodeExporter.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +spec: + {{- if .Values.nodeExporter.updateStrategy }} + updateStrategy: +{{ toYaml .Values.nodeExporter.updateStrategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.nodeExporter.podAnnotations }} + annotations: +{{ toYaml .Values.nodeExporter.podAnnotations | indent 8 }} + {{- end }} + labels: + app: {{ template "prometheus.name" . }} + component: "{{ .Values.nodeExporter.name }}" + release: {{ .Release.Name }} + spec: + serviceAccountName: {{ if .Values.rbac.create }}{{ template "prometheus.nodeExporter.fullname" . }}{{ else }}"{{ .Values.nodeExporter.serviceAccountName }}"{{ end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.nodeExporter.name }} + image: "{{ .Values.nodeExporter.image.repository }}:{{ .Values.nodeExporter.image.tag }}" + imagePullPolicy: "{{ .Values.nodeExporter.image.pullPolicy }}" + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + {{- range $key, $value := .Values.nodeExporter.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + ports: + - name: metrics + containerPort: 9100 + hostPort: {{ .Values.nodeExporter.service.hostPort }} + resources: +{{ toYaml .Values.nodeExporter.resources | indent 12 }} + volumeMounts: + - name: proc + mountPath: /host/proc + readOnly: true + - name: sys + mountPath: /host/sys + readOnly: true + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: true + hostPID: true + {{- if .Values.nodeExporter.tolerations }} + tolerations: +{{ toYaml .Values.nodeExporter.tolerations | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeExporter.nodeSelector | indent 8 }} + {{- end }} + volumes: + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} +{{- end -}} diff --git a/kubernetes/Charts/prometheus/templates/node-exporter-service.yaml b/kubernetes/Charts/prometheus/templates/node-exporter-service.yaml new file mode 100644 index 0000000..6af14c2 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/node-exporter-service.yaml @@ -0,0 +1,46 @@ +{{- if .Values.nodeExporter.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.nodeExporter.service.annotations }} + annotations: +{{ toYaml .Values.nodeExporter.service.annotations | indent 4 }} +{{- end }} + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.nodeExporter.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if .Values.nodeExporter.service.labels }} +{{ toYaml .Values.nodeExporter.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +spec: +{{- if .Values.nodeExporter.service.clusterIP }} + clusterIP: {{ .Values.nodeExporter.service.clusterIP }} +{{- end }} +{{- if .Values.nodeExporter.service.externalIPs }} + externalIPs: +{{ toYaml .Values.nodeExporter.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.nodeExporter.service.loadBalancerIP }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.nodeExporter.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: metrics + port: {{ .Values.nodeExporter.service.servicePort }} + protocol: TCP + targetPort: 9100 + selector: + app: {{ template "prometheus.name" . }} + component: "{{ .Values.nodeExporter.name }}" + release: {{ .Release.Name }} + type: "{{ .Values.nodeExporter.service.type }}" +{{- end -}} diff --git a/kubernetes/Charts/prometheus/templates/node-exporter-serviceaccount.yaml b/kubernetes/Charts/prometheus/templates/node-exporter-serviceaccount.yaml new file mode 100644 index 0000000..1537984 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/node-exporter-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.rbac.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.nodeExporter.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/pushgateway-deployment.yaml b/kubernetes/Charts/prometheus/templates/pushgateway-deployment.yaml new file mode 100644 index 0000000..6458827 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/pushgateway-deployment.yaml @@ -0,0 +1,51 @@ +{{- if .Values.pushgateway.enabled -}} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.pushgateway.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.pushgateway.fullname" . }} +spec: + replicas: {{ .Values.pushgateway.replicaCount }} + template: + metadata: + {{- if .Values.pushgateway.podAnnotations }} + annotations: +{{ toYaml .Values.pushgateway.podAnnotations | indent 8 }} + {{- end }} + labels: + app: {{ template "prometheus.name" . }} + component: "{{ .Values.pushgateway.name }}" + release: {{ .Release.Name }} + spec: + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.pushgateway.name }} + image: "{{ .Values.pushgateway.image.repository }}:{{ .Values.pushgateway.image.tag }}" + imagePullPolicy: "{{ .Values.pushgateway.image.pullPolicy }}" + args: + {{- range $key, $value := .Values.pushgateway.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + ports: + - containerPort: 9091 + readinessProbe: + httpGet: + path: /#/status + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + resources: +{{ toYaml .Values.pushgateway.resources | indent 12 }} + {{- if .Values.pushgateway.nodeSelector }} + nodeSelector: +{{ toYaml .Values.pushgateway.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.affinity }} + affinity: +{{ toYaml .Values.pushgateway.affinity | indent 8 }} + {{- end }} +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/pushgateway-ingress.yaml b/kubernetes/Charts/prometheus/templates/pushgateway-ingress.yaml new file mode 100644 index 0000000..1581789 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/pushgateway-ingress.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.pushgateway.enabled .Values.pushgateway.ingress.enabled -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.pushgateway.fullname" . }} +{{- $servicePort := .Values.pushgateway.service.servicePort -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: +{{- if .Values.pushgateway.ingress.annotations }} + annotations: +{{ toYaml .Values.pushgateway.ingress.annotations | indent 4}} +{{- end }} + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.pushgateway.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.pushgateway.fullname" . }} +spec: + rules: + {{- range .Values.pushgateway.ingress.hosts }} + - host: {{ . }} + http: + paths: + - backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- if .Values.pushgateway.ingress.tls }} + tls: +{{ toYaml .Values.pushgateway.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/kubernetes/Charts/prometheus/templates/pushgateway-service.yaml b/kubernetes/Charts/prometheus/templates/pushgateway-service.yaml new file mode 100644 index 0000000..9686319 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/pushgateway-service.yaml @@ -0,0 +1,46 @@ +{{- if .Values.pushgateway.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.pushgateway.service.annotations }} + annotations: +{{ toYaml .Values.pushgateway.service.annotations | indent 4}} +{{- end }} + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.pushgateway.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if .Values.pushgateway.service.labels }} +{{ toYaml .Values.pushgateway.service.labels | indent 4}} +{{- end }} + name: {{ template "prometheus.pushgateway.fullname" . }} +spec: +{{- if .Values.pushgateway.service.clusterIP }} + clusterIP: {{ .Values.pushgateway.service.clusterIP }} +{{- end }} +{{- if .Values.pushgateway.service.externalIPs }} + externalIPs: +{{ toYaml .Values.pushgateway.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.pushgateway.service.loadBalancerIP }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.pushgateway.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.pushgateway.service.servicePort }} + protocol: TCP + targetPort: 9091 + selector: + app: {{ template "prometheus.name" . }} + component: "{{ .Values.pushgateway.name }}" + release: {{ .Release.Name }} + type: "{{ .Values.pushgateway.service.type }}" +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/server-clusterrole.yaml b/kubernetes/Charts/prometheus/templates/server-clusterrole.yaml new file mode 100644 index 0000000..d9c6e4d --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/server-clusterrole.yaml @@ -0,0 +1,35 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.server.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.server.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - services + - endpoints + - pods + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - nonResourceURLs: + - "/metrics" + verbs: + - get +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/server-clusterrolebinding.yaml b/kubernetes/Charts/prometheus/templates/server-clusterrolebinding.yaml new file mode 100644 index 0000000..3196491 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/server-clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.server.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.server.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.server.fullname" . }} +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/server-configmap.yaml b/kubernetes/Charts/prometheus/templates/server-configmap.yaml new file mode 100644 index 0000000..fb8d5f4 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/server-configmap.yaml @@ -0,0 +1,42 @@ +{{- if (empty .Values.server.configMapOverrideName) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.server.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.server.fullname" . }} +data: +{{- $root := . -}} +{{- range $key, $value := .Values.serverFiles }} + {{ $key }}: | +{{ toYaml $value | default "{}" | indent 4 }} +{{- if eq $key "prometheus.yml" -}} +{{- if $root.Values.alertmanager.enabled -}} + alerting: + alertmanagers: + - kubernetes_sd_configs: + - role: pod + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + relabel_configs: + - source_labels: [__meta_kubernetes_namespace] + regex: {{ $root.Release.Namespace }} ## maybe not works in configmap, you need assigned to 1 variable first. + action: keep + - source_labels: [__meta_kubernetes_pod_label_app] + regex: prometheus + action: keep + - source_labels: [__meta_kubernetes_pod_label_component] + regex: alertmanager + action: keep + - source_labels: [__meta_kubernetes_pod_container_port_number] + regex: + action: drop +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/kubernetes/Charts/prometheus/templates/server-deployment.yaml b/kubernetes/Charts/prometheus/templates/server-deployment.yaml new file mode 100644 index 0000000..d443b2f --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/server-deployment.yaml @@ -0,0 +1,112 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.server.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.server.fullname" . }} +spec: + replicas: {{ .Values.server.replicaCount }} + {{- if .Values.server.strategy }} + strategy: +{{ toYaml .Values.server.strategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: +{{ toYaml .Values.server.podAnnotations | indent 8 }} + {{- end }} + labels: + app: {{ template "prometheus.name" . }} + component: "{{ .Values.server.name }}" + release: {{ .Release.Name }} + spec: + securityContext: + runAsUser: 0 + serviceAccountName: {{ if .Values.rbac.create }}{{ template "prometheus.server.fullname" . }}{{ else }}"{{ .Values.server.serviceAccountName }}"{{ end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.name }} + image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://localhost:9090{{ .Values.server.prefixURL }}/-/reload + resources: +{{ toYaml .Values.configmapReload.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}" + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + args: + {{- if .Values.server.retention }} + - --storage.tsdb.retention={{ .Values.server.retention }} + {{- end }} + - --config.file=/etc/config/prometheus.yml + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + - --web.enable-lifecycle + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + ports: + - containerPort: 9090 + readinessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/status + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + - name: storage-volume + {{- if .Values.server.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + emptyDir: {} + {{- end -}} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} diff --git a/kubernetes/Charts/prometheus/templates/server-ingress.yaml b/kubernetes/Charts/prometheus/templates/server-ingress.yaml new file mode 100644 index 0000000..e6009d5 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/server-ingress.yaml @@ -0,0 +1,33 @@ +{{- if .Values.server.ingress.enabled -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.server.fullname" . }} +{{- $servicePort := .Values.server.service.servicePort -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: +{{- if .Values.server.ingress.annotations }} + annotations: +{{ toYaml .Values.server.ingress.annotations | indent 4 }} +{{- end }} + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.server.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.server.fullname" . }} +spec: + rules: + {{- range .Values.server.ingress.hosts }} + - host: {{ . }} + http: + paths: + - backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- if .Values.server.ingress.tls }} + tls: +{{ toYaml .Values.server.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/kubernetes/Charts/prometheus/templates/server-networkpolicy.yaml b/kubernetes/Charts/prometheus/templates/server-networkpolicy.yaml new file mode 100644 index 0000000..3f1cdcc --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/server-networkpolicy.yaml @@ -0,0 +1,21 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.server.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + podSelector: + matchLabels: + app: {{ template "prometheus.name" . }} + component: "{{ .Values.server.name }}" + release: {{ .Release.Name }} + ingress: + - ports: + - port: 9090 +{{- end }} diff --git a/kubernetes/Charts/prometheus/templates/server-pvc.yaml b/kubernetes/Charts/prometheus/templates/server-pvc.yaml new file mode 100644 index 0000000..c4711e9 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/server-pvc.yaml @@ -0,0 +1,31 @@ +{{- if .Values.server.persistentVolume.enabled -}} +{{- if not .Values.server.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.server.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.server.fullname" . }} +spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} +{{- if .Values.server.persistentVolume.storageClass }} +{{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" +{{- end }} +{{- end }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" +{{- end -}} +{{- end -}} diff --git a/kubernetes/Charts/prometheus/templates/server-service.yaml b/kubernetes/Charts/prometheus/templates/server-service.yaml new file mode 100644 index 0000000..be81694 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/server-service.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.service.annotations }} + annotations: +{{ toYaml .Values.server.service.annotations | indent 4 }} +{{- end }} + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.server.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if .Values.server.service.labels }} +{{ toYaml .Values.server.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} +spec: +{{- if .Values.server.service.clusterIP }} + clusterIP: {{ .Values.server.service.clusterIP }} +{{- end }} +{{- if .Values.server.service.externalIPs }} + externalIPs: +{{ toYaml .Values.server.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.server.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} +{{- end }} +{{- if .Values.server.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.server.service.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.service.nodePort }} + nodePort: {{ .Values.server.service.nodePort }} + {{- end }} + selector: + app: {{ template "prometheus.name" . }} + component: "{{ .Values.server.name }}" + release: {{ .Release.Name }} + type: "{{ .Values.server.service.type }}" diff --git a/kubernetes/Charts/prometheus/templates/server-serviceaccount.yaml b/kubernetes/Charts/prometheus/templates/server-serviceaccount.yaml new file mode 100644 index 0000000..a539b38 --- /dev/null +++ b/kubernetes/Charts/prometheus/templates/server-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.rbac.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "prometheus.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.server.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "prometheus.server.fullname" . }} +{{- end }} diff --git a/kubernetes/Charts/prometheus/values.yaml b/kubernetes/Charts/prometheus/values.yaml new file mode 100644 index 0000000..2183dc0 --- /dev/null +++ b/kubernetes/Charts/prometheus/values.yaml @@ -0,0 +1,855 @@ +rbac: + create: false + +alertmanager: + ## If false, alertmanager will not be installed + ## + enabled: true + + # Defines the serviceAccountName to use when `rbac.create=false` + serviceAccountName: default + + ## alertmanager container name + ## + name: alertmanager + + ## alertmanager container image + ## + image: + repository: prom/alertmanager + tag: v0.10.0 + pullPolicy: IfNotPresent + + ## Additional alertmanager container arguments + ## + extraArgs: {} + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access alertmanager + ## Maybe same with Ingress host name + baseURL: "/" + + ## Additional alertmanager container environment variable + ## For instance to add a http_proxy + ## + extraEnv: {} + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ingress: + ## If true, alertmanager Ingress will be created + ## + enabled: false + + ## alertmanager Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## alertmanager Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - alertmanager.domain.com + + ## alertmanager Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - alertmanager.domain.com + + ## Alertmanager Deployment Strategy type + # strategy: + # type: Recreate + + ## Node labels for alertmanager pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + persistentVolume: + ## If true, alertmanager will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## alertmanager data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## alertmanager data Persistent Volume Claim annotations + ## + annotations: {} + + ## alertmanager data Persistent Volume existing claim name + ## Requires alertmanager.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## alertmanager data Persistent Volume mount root path + ## + mountPath: /data + + ## alertmanager data Persistent Volume size + ## + size: 2Gi + + ## alertmanager data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Subdirectory of alertmanager data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Annotations to be added to alertmanager pods + ## + podAnnotations: {} + + replicaCount: 1 + + ## alertmanager resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the alertmanager service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + # nodePort: 30000 + type: ClusterIP + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/jimmidyson/configmap-reload +## +configmapReload: + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.1 + pullPolicy: IfNotPresent + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +kubeStateMetrics: + ## If false, kube-state-metrics will not be installed + ## + enabled: true + + # Defines the serviceAccountName to use when `rbac.create=false` + serviceAccountName: default + + ## kube-state-metrics container name + ## + name: kube-state-metrics + + ## kube-state-metrics container image + ## + image: + repository: k8s.gcr.io/kube-state-metrics + tag: v1.1.0 + pullPolicy: IfNotPresent + + ## kube-state-metrics container arguments + ## + args: {} + + ## Node labels for kube-state-metrics pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to kube-state-metrics pods + ## + podAnnotations: {} + + replicaCount: 1 + + ## kube-state-metrics resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 16Mi + # requests: + # cpu: 10m + # memory: 16Mi + + service: + annotations: + prometheus.io/scrape: "true" + labels: {} + + clusterIP: None + + ## List of IP addresses at which the kube-state-metrics service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + +nodeExporter: + ## If false, node-exporter will not be installed + ## + enabled: true + + # Defines the serviceAccountName to use when `rbac.create=false` + serviceAccountName: default + + ## node-exporter container name + ## + name: node-exporter + + ## node-exporter container image + ## + image: + repository: prom/node-exporter + tag: v0.15.2 + pullPolicy: IfNotPresent + + ## Custom Update Strategy + ## + updateStrategy: + type: OnDelete + + ## Additional node-exporter container arguments + ## + extraArgs: {} + + ## Additional node-exporter hostPath mounts + ## + extraHostPathMounts: [] + # - name: textfile-dir + # mountPath: /srv/txt_collector + # hostPath: /var/lib/node-exporter + # readOnly: true + + ## Node tolerations for node-exporter scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for node-exporter pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to node-exporter pods + ## + podAnnotations: {} + + ## node-exporter resource limits & requests + ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + + service: + annotations: + prometheus.io/scrape: "true" + labels: {} + + clusterIP: None + + ## List of IP addresses at which the node-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + hostPort: 9100 + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9100 + type: ClusterIP + +server: + ## Prometheus server container name + ## + name: server + + # Defines the serviceAccountName to use when `rbac.create=false` + serviceAccountName: default + + ## Prometheus server container image + ## + image: + repository: prom/prometheus + tag: v2.0.0 + pullPolicy: IfNotPresent + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access alertmanager + ## Maybe same with Ingress host name + baseURL: "" + + ## Additional Prometheus server container arguments + ## + extraArgs: {} + + ## Additional Prometheus server hostPath mounts + ## + extraHostPathMounts: [] + # - name: certs-dir + # mountPath: /etc/kubernetes/certs + # hostPath: /etc/kubernetes/certs + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/server-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: false + + ## Prometheus server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Prometheus server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - prometheus.domain.com + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-server-tls + # hosts: + # - prometheus.domain.com + + ## Server Deployment Strategy type + # strategy: + # type: Recreate + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## Prometheus server data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## Prometheus server data Persistent Volume annotations + ## + annotations: {} + + ## Prometheus server data Persistent Volume existing claim name + ## Requires server.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## Prometheus server data Persistent Volume mount root path + ## + mountPath: /data + + ## Prometheus server data Persistent Volume size + ## + size: 8Gi + + ## Prometheus server data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Subdirectory of Prometheus server data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Annotations to be added to Prometheus server pods + ## + podAnnotations: {} + # iam.amazonaws.com/role: prometheus + + replicaCount: 1 + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + ## Prometheus server pod termination grace period + ## + terminationGracePeriodSeconds: 300 + + ## Prometheus data retention period (i.e 360h) + ## + retention: "" + +pushgateway: + ## If false, pushgateway will not be installed + ## + enabled: true + + ## pushgateway container name + ## + name: pushgateway + + ## pushgateway container image + ## + image: + repository: prom/pushgateway + tag: v0.4.0 + pullPolicy: IfNotPresent + + ## Additional pushgateway container arguments + ## + extraArgs: {} + + ingress: + ## If true, pushgateway Ingress will be created + ## + enabled: false + + ## pushgateway Ingress annotations + ## + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## pushgateway Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - pushgateway.domain.com + + ## pushgateway Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - pushgateway.domain.com + + ## Node labels for pushgateway pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to pushgateway pods + ## + podAnnotations: {} + + replicaCount: 1 + + ## pushgateway resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + service: + annotations: + prometheus.io/probe: pushgateway + labels: {} + clusterIP: "" + + ## List of IP addresses at which the pushgateway service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9091 + type: ClusterIP + +## alertmanager ConfigMap entries +## +alertmanagerFiles: + alertmanager.yml: |- + global: + # slack_api_url: '' + + receivers: + - name: default-receiver + # slack_configs: + # - channel: '@you' + # send_resolved: true + + route: + group_wait: 10s + group_interval: 5m + receiver: default-receiver + repeat_interval: 3h + +## Prometheus server ConfigMap entries +## +serverFiles: + alerts: {} + rules: {} + + prometheus.yml: + rule_files: + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + - job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + + + - job_name: 'kubernetes-nodes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}:4194/proxy/metrics + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + - job_name: 'kubernetes-service-endpoints' + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+)(?::\d+);(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + + - job_name: 'prometheus-pushgateway' + honor_labels: true + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true` + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods' + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: (.+):(?:\d+);(\d+) + replacement: ${1}:${2} + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false