diff --git a/.aiignore b/.aiignore new file mode 100644 index 00000000..de153db3 --- /dev/null +++ b/.aiignore @@ -0,0 +1 @@ +artifacts diff --git a/.env.sample b/.env.sample index 64882c99..219cd4e1 100644 --- a/.env.sample +++ b/.env.sample @@ -1 +1,61 @@ +# devnet name, optional (default `my-devnet`) +DEVNET_NAME= + +# k8s global + +## (optional) ssh tunnel helpers (see `ssh tunnel` command) +SSH_HOST= +# ssh user (optional) +SSH_USER= +# ssh private key (optional) +SSH_PRIVATE_KEY= +SSH_TUNNEL_REMOTE_ADDRESS=127.0.0.1:16443 +SSH_TUNNEL_LOCAL_PORT=16444 + +## cluster name from kubeconfig file +K8S_KUBECTL_CLUSTER_NAME= + +## cluster context name from kubeconfig file +K8S_KUBECTL_DEFAULT_CONTEXT= +## global ingress hostname prefix for all ingresses for devnet, for keeping domains secret (example: `somesecret`) +GLOBAL_INGRESS_HOST_PREFIX= + +# nodes +ETH_NODES_INGRESS_HOSTNAME=$(DEVNET_NAME).devnet.local + +# dora ingress hostname (without http://) +DORA_INGRESS_HOSTNAME=dora.$(DEVNET_NAME).devnet.local + +# blockscout ingress hostnames (without http://) +BLOCKSCOUT_BACKEND_INGRESS_HOSTNAME=blockscout-backend.$(DEVNET_NAME).devnet.local +BLOCKSCOUT_FRONTEND_INGRESS_HOSTNAME=blockscout.$(DEVNET_NAME).devnet.local +BLOCKSCOUT_VERIFIER_INGRESS_HOSTNAME=blockscout-verifier.$(DEVNET_NAME).devnet.local + +# docker registry +## registry type (local | external) +DOCKER_REGISTRY_TYPE=external + +## registry ingress hostnames (without http://) +DOCKER_REGISTRY_LOCAL_INGRESS_HOSTNAME=container-registry-ui.devnet.local +DOCKER_REGISTRY_LOCAL_INGRESS_UI_HOSTNAME=container-registry-ui.devnet.local + +DOCKER_REGISTRY_EXTERNAL_HOSTNAME=container-registry-ui.devnet.local +DOCKER_REGISTRY_EXTERNAL_UI_HOSTNAME=container-registry-ui.devnet.local +DOCKER_REGISTRY_USERNAME= +DOCKER_REGISTRY_PASSWORD= + +# kapi ingress hostname (without http://) +KAPI_INGRESS_HOSTNAME=keys-api.$(DEVNET_NAME).devnet.local + +# ipfs kubo +KUBO_INGRESS_HOSTNAME=ipfs.$(DEVNET_NAME).devnet.local + +# no widget backend +NO_WIDGET_BACKEND_INGRESS_HOSTNAME=no-widget-backend.$(DEVNET_NAME).devnet.local + +# no widget backend +NO_WIDGET_INGRESS_HOSTNAME=operators-widget.$(DEVNET_NAME).devnet.local + +# oracles (optional) PINATA_JWT= +CSM_ORACLE_PINATA_JWT= diff --git a/IMPROVEMENTS.md b/IMPROVEMENTS.md new file mode 100644 index 00000000..23c9294f --- /dev/null +++ b/IMPROVEMENTS.md @@ -0,0 +1,21 @@ + +# List of features to improve +* Make the lido-local-devnet easily extensible +* Idempotence for all commands +* Merge services, state, artifacts into a new entity called "project" + * the devnet is a root project + * kapi, lido-core, lido-cli are subprojects + * each subproject has + * commands + * templates + * services + * artifacts + * has optional git repo + * workspace + * patches + * constants +* Add the ability to patch docker images with files, having special `pathes` folder +* Add the ability to patch git repos with files, having special `pathes` folder +* Improve artifact store to use NFS or S3 or Minio +* Improve artifact store to upload it to the k8s storage with password-credentials +* Look at cdk8s diff --git a/README.md b/README.md index e547ff70..93e888b2 100644 --- a/README.md +++ b/README.md @@ -23,44 +23,97 @@ Lido Local DevNet is a powerful tool for deploying and testing the Lido protocol - **npm** ([Install npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm)) - **Docker** 27+ ([Install Docker](https://www.docker.com/)) - **Docker Compose** V2 ([Install Docker Compose](https://docs.docker.com/compose/)) -- **Kurtosis** ([Install Kurtosis](https://www.kurtosistech.com/)) +- **Kurtosis 1.11.1+** ([Install Kurtosis](https://www.kurtosistech.com/)) - **Foundry tools** ([Install Foundry](https://book.getfoundry.sh/getting-started/installation)) - **Just** ([Install Just](https://github.com/casey/just)) +- **Make** 4+ (Install Make - Linux: `sudo apt-get install build-essential`) +- **Kubectl** v1.30.+ (for k8s deployments) ([Install Kubectl](https://kubernetes.io/docs/tasks/tools/))` +- **Helm** 3.12+ (for k8s deployments) ([Install Helm](https://helm.sh/docs/intro/install/))` --- -## Getting Started +## Getting Started (with k8s integration) -Follow these steps to set up the DevNet: +Original docs are located in `https://docs.kurtosis.com/k8s/` -### 1. Start Kurtosis -Kurtosis is required to launch Ethereum nodes: +### 1. Install dependencies ```sh -kurtosis engine start +yarn && yarn build:all ``` -### 2. Install dependencies +### 2. Create `.env` file and fill it with the required values ```sh -yarn && yarn build:all +cp .env.sample .env +``` + +### 3. (Optional) Turn on SSH Tunnel to the machine with k8s cluster +```sh +./bin/run.js ssh tunnel +``` + +### 4. Set the current context to the k8s cluster (if you have multiple clusters) + +Contexts can be found by running: `kubectl config get-contexts` + +```sh +kubectl config use-context # or whatever your k8s context is ``` -### 3. Launch the environment and deploy Lido smart contracts -Below is an example for launching the `pectra` test stand. If you need a different setup, refer to the [test stands documentation](./docs/commands/stands.md). +### 5. Ensure that you are connected to the k8s cluster + +The cluster can be accessible via SSH Tunnel. ```sh -./bin/run.js stands pectra --full +kubectl cluster-info +``` + +### 6. Change kurtosis config to work with the k8s cluster + +Update once your kurtosis config at `echo $(kurtosis config path)` location + +```yaml +config-version: 6 +should-send-metrics: false +kurtosis-clusters: + docker: + type: "docker" + cloud: + type: "kubernetes" + config: + kubernetes-cluster-name: "" # change the cluster name if needed + storage-class: "ssd-hostpath" + enclave-size-in-megabytes: 256 +``` + +### 7. Point kurtosis to the cluster +```sh +# tell kurtosis to work with k8s cluster +kurtosis cluster set cloud # or whatever your kurtosis cluster is +``` + +### 8. Start Kurtosis +Kurtosis is required to launch Ethereum nodes +```sh +kurtosis engine start +``` + +### 9. Launch the environment and deploy Lido smart contracts +Below is an example for launching the `fusaka` test stand. +If you need a different setup, refer to the [test stands documentation](./docs/commands/stands.md). + +```sh +./bin/run.js stands # (fusaka) or any other test stand name ``` For contract verification, use the `--verify` flag: ```sh -./bin/run.js stands pectra --full --verify +./bin/run.js stands --verify ``` For a full DSM infrastructure deployment, add the `--dsm` flag: ```sh -./bin/run.js stands pectra --full --verify --dsm +./bin/run.js stands --verify --dsm ``` -### 4. Interaction with Voting scripts - +### 10. (Optional) Interaction with Voting scripts Since voting scripts require Python and Brownie, install the necessary dependencies: ```sh @@ -85,7 +138,7 @@ After adding an account, proceed with the voting process. See the [voting docume ./bin/run.js voting enact-after-pectra ``` -### 5. Done! +### 11. Done! The network, infrastructure, and protocol have been successfully launched. --- @@ -100,13 +153,15 @@ To stop the DevNet and remove all services, run: ## Running Multiple Environments -To run multiple environments on a single machine, use the `--network ` flag: +To run multiple devnets on a single cluster, change the `DEVNET_NAME=` variable in `.env` file +All the commands will be executed in the context of the current devnet. +--- + +## DevNet info +To get the latest information on available services, run: ```sh -./bin/run.js stands pectra --full --network test-pectra1 +./bin/run.js chain info ``` -> **Note:** The `--network test-pectra1` flag must be used with all subsequent commands to interact with the specified environment. - ---- ## Available Services To get the latest information on available services, run: diff --git a/bin/dev.js b/bin/dev.js index dd748048..1edff3b8 100755 --- a/bin/dev.js +++ b/bin/dev.js @@ -1,6 +1,8 @@ #!/usr/bin/env -S node --loader ts-node/esm --disable-warning=ExperimentalWarning // eslint-disable-next-line n/shebang -import {execute} from '@oclif/core' +import {execute} from '@oclif/core'; +import * as dotenv from 'dotenv'; +dotenv.config({ path: '.env' }); await execute({development: true, dir: import.meta.url}) diff --git a/bin/run.js b/bin/run.js index dd50271f..4e9d2529 100755 --- a/bin/run.js +++ b/bin/run.js @@ -1,5 +1,7 @@ #!/usr/bin/env node -import {execute} from '@oclif/core' +import {execute} from '@oclif/core'; +import * as dotenv from 'dotenv'; +dotenv.config({ path: '.env' }); await execute({dir: import.meta.url}) diff --git a/config.schema.json b/config.schema.json new file mode 100644 index 00000000..b666c47a --- /dev/null +++ b/config.schema.json @@ -0,0 +1,153 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Lido Local Devnet Configuration", + "description": "Configuration schema for Lido local development network", + "type": "object", + "properties": { + "networks": { + "type": "array", + "description": "Array of network configurations", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Network identifier name" + }, + "chain": { + "type": "object", + "description": "Chain configuration with execution and consensus layer endpoints", + "properties": { + "elPrivate": { + "type": "string", + "format": "uri", + "description": "Private execution layer endpoint URL" + }, + "clPrivate": { + "type": "string", + "format": "uri", + "description": "Private consensus layer endpoint URL" + }, + "elPublic": { + "type": "string", + "format": "uri", + "description": "Public execution layer endpoint URL" + }, + "clPublic": { + "type": "string", + "format": "uri", + "description": "Public consensus layer endpoint URL" + } + }, + "required": ["elPrivate", "clPrivate", "elPublic", "clPublic"], + "additionalProperties": false + }, + "lido": { + "type": "object", + "description": "Lido protocol contract addresses", + "properties": { + "agent": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "Agent contract address" + }, + "voting": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "Voting contract address" + }, + "tokenManager": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "Token manager contract address" + }, + "sanityChecker": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "Sanity checker contract address" + }, + "accountingOracle": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "Accounting oracle contract address" + }, + "validatorExitBus": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "Validator exit bus contract address" + }, + "locator": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "Locator contract address" + } + }, + "required": ["agent", "voting", "tokenManager", "sanityChecker", "accountingOracle", "validatorExitBus", "locator"], + "additionalProperties": false + }, + "csm": { + "type": "object", + "description": "Community Staking Module configuration", + "properties": { + "accounting": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "CSM accounting contract address" + }, + "earlyAdoption": { + "type": "boolean", + "description": "Early adoption flag" + }, + "feeDistributor": { + "type": "integer", + "minimum": 0, + "description": "Fee distributor value" + }, + "feeOracle": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "Fee oracle contract address" + }, + "module": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "CSM module contract address" + }, + "verifier": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "Verifier contract address" + }, + "gateSeal": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "Gate seal contract address" + }, + "hashConsensus": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "Hash consensus contract address" + }, + "lidoLocator": { + "type": "string", + "pattern": "^0x[a-fA-F0-9]{40}$", + "description": "Lido locator contract address" + } + }, + "required": ["accounting", "earlyAdoption", "feeDistributor", "feeOracle", "module", "verifier", "gateSeal", "hashConsensus", "lidoLocator"], + "additionalProperties": false + }, + "walletMnemonic": { + "type": "string", + "description": "Wallet mnemonic phrase for the network" + } + }, + "required": ["name", "chain", "lido", "csm", "walletMnemonic"], + "additionalProperties": false + }, + "minItems": 1 + } + }, + "required": ["networks"], + "additionalProperties": false +} diff --git a/config.yml b/config.yml index 060e03b6..ccdedc7e 100644 --- a/config.yml +++ b/config.yml @@ -1,3 +1,7 @@ +# yaml-language-server: $schema=file://config.schema.json + +currentNetwork: devnet4 + networks: - name: devnet4 chain: diff --git a/helm/lido/lido-app/Chart.yaml b/helm/lido/lido-app/Chart.yaml new file mode 100644 index 00000000..d0ee4e53 --- /dev/null +++ b/helm/lido/lido-app/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: lido-app +description: Basic Lido application Helm chart +type: application +version: 1.0.1 +appVersion: "2.5.2" +kubeVersion: '>=1.19.0-0' diff --git a/helm/lido/lido-app/templates/NOTES.txt b/helm/lido/lido-app/templates/NOTES.txt new file mode 100644 index 00000000..995e7aca --- /dev/null +++ b/helm/lido/lido-app/templates/NOTES.txt @@ -0,0 +1,36 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "app.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "app.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "app.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "{{ include "app.selectorLabels" . }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} + +2. Backend application has been deployed with the following configuration: + - Replicas: {{ .Values.replicaCount }} + - Image: {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} + - Service type: {{ .Values.service.type }} + {{- if .Values.ingress.enabled }} + - Ingress enabled: {{ .Values.ingress.enabled }} + {{- end }} + +3. To get the status of the deployment: + kubectl get deployment {{ include "app.fullname" . }} --namespace {{ .Release.Namespace }} + +4. To view application logs:r + kubectl logs -f deployment/{{ include "app.fullname" . }} --namespace {{ .Release.Namespace }} diff --git a/helm/lido/lido-app/templates/_helpers.tpl b/helm/lido/lido-app/templates/_helpers.tpl new file mode 100644 index 00000000..6e13b3a0 --- /dev/null +++ b/helm/lido/lido-app/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "app.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "app.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "app.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "app.labels" -}} +helm.sh/chart: {{ include "app.chart" . }} +{{ include "app.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "app.selectorLabels" -}} +app.kubernetes.io/name: {{ include "app.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "app.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "app.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/helm/lido/lido-app/templates/deployment.yaml b/helm/lido/lido-app/templates/deployment.yaml new file mode 100644 index 00000000..7da76240 --- /dev/null +++ b/helm/lido/lido-app/templates/deployment.yaml @@ -0,0 +1,111 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "app.fullname" . }} + labels: + {{- include "app.labels" . | nindent 4 }} + {{- with .Values.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.additionalAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "app.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "app.selectorLabels" . | nindent 8 }} + {{- with .Values.additionalLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.additionalAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.image.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "app.serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ include "app.name" . }} + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- toYaml .Values.command | nindent 12 }} + {{- end }} + {{- if .Values.args }} + args: + {{- toYaml .Values.args | nindent 12 }} + {{- end }} + ports: + {{- range .Values.service.ports }} + - name: {{ .name }} + containerPort: {{ .targetPort }} + protocol: {{ .protocol }} + {{- end }} + {{- if .Values.livenessProbe }} + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + env: + {{- range $key, $value := .Values.env.variables }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- if .Values.pvcs }} + volumeMounts: + {{- range .Values.pvcs }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- if .subPath }} + subPath: {{ .subPath }} + {{- end }} + {{- if .readOnly }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.pvcs }} + volumes: + {{- range .Values.pvcs }} + - name: {{ .name }} + persistentVolumeClaim: + claimName: {{ include "app.fullname" $ }}-{{ .name }} + {{- end }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/helm/lido/lido-app/templates/ingress.yaml b/helm/lido/lido-app/templates/ingress.yaml new file mode 100644 index 00000000..4c4f0023 --- /dev/null +++ b/helm/lido/lido-app/templates/ingress.yaml @@ -0,0 +1,44 @@ +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "app.fullname" . }} + labels: + {{- include "app.labels" . | nindent 4 }} + {{- with .Values.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ .pathType }} + backend: + service: + name: {{ include "app.fullname" $ }} + port: + name: {{ .port }} + {{- end }} + {{- end }} +{{- end }} diff --git a/helm/lido/lido-app/templates/persistentvolumeclaim.yaml b/helm/lido/lido-app/templates/persistentvolumeclaim.yaml new file mode 100644 index 00000000..217aa662 --- /dev/null +++ b/helm/lido/lido-app/templates/persistentvolumeclaim.yaml @@ -0,0 +1,32 @@ +{{- range .Values.pvcs }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "app.fullname" $ }}-{{ .name }} + labels: + {{- include "app.labels" $ | nindent 4 }} + {{- with $.Values.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with $.Values.additionalAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- toYaml .accessModes | nindent 4 }} + {{- if .storageClassName }} + storageClassName: {{ .storageClassName }} + {{- end }} + resources: + requests: + storage: {{ .size }} + {{- if .selector }} + selector: + {{- toYaml .selector | nindent 4 }} + {{- end }} + {{- if .volumeMode }} + volumeMode: {{ .volumeMode }} + {{- end }} +{{- end }} diff --git a/helm/lido/lido-app/templates/service.yaml b/helm/lido/lido-app/templates/service.yaml new file mode 100644 index 00000000..066f8b1a --- /dev/null +++ b/helm/lido/lido-app/templates/service.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "app.fullname" . }} + labels: + {{- include "app.labels" . | nindent 4 }} + {{- with .Values.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.additionalAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + ports: + {{- if .Values.service.ports }} + {{- range .Values.service.ports }} + - name: {{ .name }} + port: {{ .port }} + targetPort: {{ .targetPort }} + {{- if .protocol }} + protocol: {{ .protocol }} + {{- else }} + protocol: TCP + {{- end }} + {{- end }} + {{- else }} + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + {{- end }} + selector: + {{- include "app.selectorLabels" . | nindent 4 }} diff --git a/helm/lido/lido-app/templates/serviceaccount.yaml b/helm/lido/lido-app/templates/serviceaccount.yaml new file mode 100644 index 00000000..903bd37b --- /dev/null +++ b/helm/lido/lido-app/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "app.serviceAccountName" . }} + labels: + {{- include "app.labels" . | nindent 4 }} + {{- with .Values.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/helm/lido/lido-app/values.schema.json b/helm/lido/lido-app/values.schema.json new file mode 100644 index 00000000..cd77af6d --- /dev/null +++ b/helm/lido/lido-app/values.schema.json @@ -0,0 +1,441 @@ +{ + "$schema": "https://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "nameOverride": { + "type": "string", + "description": "Override the full name of the release" + }, + "replicaCount": { + "type": "integer", + "minimum": 1, + "description": "Number of replicas for the backend deployment" + }, + "image": { + "type": "object", + "properties": { + "registry": { + "type": "string", + "description": "Docker registry to pull the image from" + }, + "repository": { + "type": "string", + "description": "Repository/image name" + }, + "tag": { + "type": "string", + "description": "Image tag" + }, + "pullPolicy": { + "type": "string", + "enum": ["Always", "Never", "IfNotPresent"], + "description": "Image pull policy" + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + }, + "required": ["name"] + }, + "description": "Image pull secrets for private docker registries" + } + }, + "required": ["registry", "repository", "tag", "pullPolicy"], + "additionalProperties": false + }, + "service": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["ClusterIP", "NodePort", "LoadBalancer", "ExternalName"], + "description": "Service type" + }, + "ports": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "port": { + "type": "integer", + "minimum": 1, + "maximum": 65535 + }, + "targetPort": { + "oneOf": [ + {"type": "integer", "minimum": 1, "maximum": 65535}, + {"type": "string"} + ] + }, + "protocol": { + "type": "string", + "enum": ["TCP", "UDP", "SCTP"] + } + }, + "required": ["name", "port", "targetPort", "protocol"] + }, + "description": "Multiple ports configuration" + } + }, + "required": ["type", "ports"], + "additionalProperties": false + }, + "ingress": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable/disable ingress" + }, + "className": { + "type": "string", + "description": "Ingress class name" + }, + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Ingress annotations" + }, + "hosts": { + "type": "array", + "items": { + "type": "object", + "properties": { + "host": { + "type": "string" + }, + "paths": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "pathType": { + "type": "string", + "enum": ["Exact", "Prefix", "ImplementationSpecific"] + } + }, + "required": ["path", "pathType"] + } + } + }, + "required": ["host", "paths"] + }, + "description": "Ingress hosts configuration" + }, + "tls": { + "type": "array", + "items": { + "type": "object", + "properties": { + "secretName": { + "type": "string" + }, + "hosts": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": ["secretName", "hosts"] + }, + "description": "TLS configuration" + } + }, + "required": ["enabled"], + "additionalProperties": false + }, + "command": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Container command" + }, + "args": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Container args" + }, + "env": { + "type": "object", + "properties": { + "variables": { + "type": "object", + "additionalProperties": { + "oneOf": [ + {"type": "string"}, + {"type": "number"}, + {"type": "boolean"} + ] + }, + "description": "Static environment variables" + } + }, + "additionalProperties": false + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + }, + "additionalProperties": false + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false, + "description": "Resource limits and requests" + }, + "nodeSelector": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Node selector" + }, + "tolerations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string", + "enum": ["Equal", "Exists"] + }, + "value": { + "type": "string" + }, + "effect": { + "type": "string", + "enum": ["NoSchedule", "PreferNoSchedule", "NoExecute"] + }, + "tolerationSeconds": { + "type": "integer", + "minimum": 0 + } + } + }, + "description": "Tolerations" + }, + "affinity": { + "type": "object", + "description": "Affinity rules" + }, + "securityContext": { + "type": "object", + "description": "Security context" + }, + "podSecurityContext": { + "type": "object", + "description": "Pod security context" + }, + "livenessProbe": { + "type": "object", + "properties": { + "httpGet": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "port": { + "oneOf": [ + {"type": "integer", "minimum": 1, "maximum": 65535}, + {"type": "string"} + ] + } + }, + "required": ["path", "port"] + }, + "exec": { + "type": "object", + "properties": { + "command": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": ["command"] + }, + "initialDelaySeconds": { + "type": "integer", + "minimum": 0 + }, + "periodSeconds": { + "type": "integer", + "minimum": 1 + }, + "timeoutSeconds": { + "type": "integer", + "minimum": 1 + }, + "failureThreshold": { + "type": "integer", + "minimum": 1 + }, + "successThreshold": { + "type": "integer", + "minimum": 1 + } + }, + "description": "Liveness probe" + }, + "readinessProbe": { + "type": "object", + "properties": { + "httpGet": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "port": { + "oneOf": [ + {"type": "integer", "minimum": 1, "maximum": 65535}, + {"type": "string"} + ] + } + }, + "required": ["path", "port"] + }, + "exec": { + "type": "object", + "properties": { + "command": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": ["command"] + }, + "initialDelaySeconds": { + "type": "integer", + "minimum": 0 + }, + "periodSeconds": { + "type": "integer", + "minimum": 1 + }, + "timeoutSeconds": { + "type": "integer", + "minimum": 1 + }, + "failureThreshold": { + "type": "integer", + "minimum": 1 + }, + "successThreshold": { + "type": "integer", + "minimum": 1 + } + }, + "description": "Readiness probe" + }, + "additionalLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Additional labels" + }, + "additionalAnnotations": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Additional annotations" + }, + "pvcs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "storageClassName": { + "type": "string" + }, + "accessModes": { + "type": "array", + "items": { + "type": "string", + "enum": ["ReadWriteOnce", "ReadOnlyMany", "ReadWriteMany", "ReadWriteOncePod"] + } + }, + "size": { + "type": "string" + }, + "mountPath": { + "type": "string" + } + }, + "required": ["name", "accessModes", "size", "mountPath"] + }, + "description": "Persistent Volume Claims configuration" + }, + "serviceAccount": { + "type": "object", + "properties": { + "create": { + "type": "boolean", + "description": "Create service account" + }, + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Service account annotations" + }, + "name": { + "type": "string", + "description": "Service account name (if not created, must exist)" + } + }, + "required": ["create", "name"], + "additionalProperties": false + } + } +} diff --git a/helm/lido/lido-app/values.yaml b/helm/lido/lido-app/values.yaml new file mode 100644 index 00000000..8c7fe234 --- /dev/null +++ b/helm/lido/lido-app/values.yaml @@ -0,0 +1,154 @@ +# Default values for backend. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +nameOverride: "" + +# Number of replicas for the backend deployment +replicaCount: 1 + +# Image configuration +image: + # Docker registry to pull the image from + registry: docker.io + # Repository/image name + repository: busybox + # Image tag (can be overridden) + tag: "latest" + # Image pull policy + pullPolicy: IfNotPresent + # Image pull secrets for private docker registries + imagePullSecrets: [] + # Example: + # imagePullSecrets: + # - name: myregistrykey + + + +# Service configuration +service: + # Service type + type: ClusterIP + # Multiple ports configuration + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + +# Ingress configuration +ingress: + # Enable/disable ingress + enabled: false + # Ingress class name + className: "public" + # Ingress annotations + annotations: {} + # Example annotations: + # annotations: + # nginx.ingress.kubernetes.io/rewrite-target: / + # cert-manager.io/cluster-issuer: letsencrypt-prod + + # Ingress hosts configuration + hosts: + - host: lido-app.local + paths: + - path: / + pathType: Prefix + port: http + + # TLS configuration + tls: [] + # Example TLS: + # tls: + # - secretName: backend-tls + # hosts: + # - backend.local + +# Container command +command: [] + +# Container args +args: [] + +# Environment variables +env: + # Static environment variables + variables: {} + # Example: + # variables: + # NODE_ENV: production + # API_URL: https://api.example.com + +# Resource limits and requests +resources: {} +# Example: +# resources: +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 250m +# memory: 256Mi + +# Node selector +nodeSelector: {} + +# Tolerations +tolerations: [] + +# Affinity rules +affinity: {} + +# Security context +securityContext: {} + +# Pod security context +podSecurityContext: {} + +# Liveness probe +livenessProbe: {} +# httpGet: +# path: /health +# port: http +# initialDelaySeconds: 30 +# periodSeconds: 10 + +# Readiness probe +readinessProbe: {} +# httpGet: +# path: /ready +# port: http +# initialDelaySeconds: 5 +# periodSeconds: 5 + +# Additional labels +additionalLabels: {} + +# Additional annotations +additionalAnnotations: {} + +# Persistent Volume Claims configuration +pvcs: [] +# Example PVC configuration: +# pvcs: +# - name: data-storage +# storageClassName: "ssh-hostpath" +# accessModes: +# - ReadWriteOnce +# size: 10Gi +# mountPath: /data +# - name: logs-storage +# storageClassName: "ssh-hostpath" +# accessModes: +# - ReadWriteOnce +# size: 5Gi +# mountPath: /logs + +# Service account +serviceAccount: + # Create service account + create: true + # Service account annotations + annotations: {} + # Service account name (if not created, must exist) + name: "" diff --git a/helm/lido/lido-council/Chart.lock b/helm/lido/lido-council/Chart.lock new file mode 100644 index 00000000..09425691 --- /dev/null +++ b/helm/lido/lido-council/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: lido-app + repository: file://../lido-app + version: 1.0.1 +digest: sha256:8a168e54af9a00f2953fbe69e5ada62110811d254bc1373771f0bd1248da4988 +generated: "2025-09-25T16:39:14.60799767+03:00" diff --git a/helm/lido/lido-council/Chart.yaml b/helm/lido/lido-council/Chart.yaml new file mode 100644 index 00000000..2b3f5a46 --- /dev/null +++ b/helm/lido/lido-council/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +name: lido-council +description: Lido Council Daemon Helm chart +type: application +version: 1.0.0 +appVersion: "2.5.2" +kubeVersion: '>=1.19.0-0' +dependencies: + - name: "lido-app" + version: "1.0.1" + repository: "file://../lido-app" diff --git a/helm/lido/lido-council/charts/lido-app-1.0.1.tgz b/helm/lido/lido-council/charts/lido-app-1.0.1.tgz new file mode 100644 index 00000000..31b7134d Binary files /dev/null and b/helm/lido/lido-council/charts/lido-app-1.0.1.tgz differ diff --git a/helm/lido/lido-council/values.yaml b/helm/lido/lido-council/values.yaml new file mode 100644 index 00000000..6cc53e63 --- /dev/null +++ b/helm/lido/lido-council/values.yaml @@ -0,0 +1,99 @@ +lido-app: + nameOverride: lido-council + + # Number of replicas for the backend deployment + replicaCount: 1 + + # Image configuration + image: + registry: docker.io + repository: lidofinance/lido-council-daemon + tag: "dev" + pullPolicy: Always + imagePullSecrets: + - name: registry-pull-secret + + service: + type: ClusterIP + ports: + - name: http + port: 9040 + targetPort: 9040 + protocol: TCP + + # Container command + command: [ ] + + # Container args + args: [] + + # Environment variables + env: + # Static environment variables + variables: + PORT: 3000 + LOG_LEVEL: ${LOG_LEVEL} + LOG_FORMAT: ${LOG_FORMAT} + RPC_URL: ${RPC_URL} + WALLET_PRIVATE_KEY: ${WALLET_PRIVATE_KEY_1} + KEYS_API_HOST: ${KEYS_API_HOST} + KEYS_API_PORT: ${KEYS_API_PORT} + PUBSUB_SERVICE: ${PUBSUB_SERVICE} + EVM_CHAIN_DATA_BUS_ADDRESS: ${EVM_CHAIN_DATA_BUS_ADDRESS} + EVM_CHAIN_DATA_BUS_PROVIDER_URL: ${EVM_CHAIN_DATA_BUS_PROVIDER_URL} + + # Resource limits and requests + resources: { } + # Example: + # resources: + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + + # Node selector + nodeSelector: { } + + # Tolerations + tolerations: [ ] + + # Affinity rules + affinity: { } + + # Security context + securityContext: { } + + # Pod security context + podSecurityContext: { } + + # Liveness probe + livenessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 30 + periodSeconds: 10 + + # Readiness probe + readinessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 5 + periodSeconds: 5 + + # Additional labels + additionalLabels: { } + + # Additional annotations + additionalAnnotations: { } + + # Persistent Volume Claims configuration + pvcs: + - name: council-cache + accessModes: + - ReadWriteOnce + size: 16Gi + mountPath: /council/cache/ diff --git a/helm/lido/lido-csm-prover-tool/Chart.lock b/helm/lido/lido-csm-prover-tool/Chart.lock new file mode 100644 index 00000000..cdd819fe --- /dev/null +++ b/helm/lido/lido-csm-prover-tool/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: lido-app + repository: file://../lido-app + version: 1.0.1 +digest: sha256:8a168e54af9a00f2953fbe69e5ada62110811d254bc1373771f0bd1248da4988 +generated: "2025-10-03T15:29:25.191253+04:00" diff --git a/helm/lido/lido-csm-prover-tool/Chart.yaml b/helm/lido/lido-csm-prover-tool/Chart.yaml new file mode 100644 index 00000000..2063330f --- /dev/null +++ b/helm/lido/lido-csm-prover-tool/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +name: lido-csm-prover-tool +description: Lido CSM Prover Tool Helm chart +type: application +version: 1.0.0 +appVersion: "2.5.2" +kubeVersion: '>=1.19.0-0' +dependencies: + - name: "lido-app" + version: "1.0.1" + repository: "file://../lido-app" diff --git a/helm/lido/lido-csm-prover-tool/charts/lido-app-1.0.1.tgz b/helm/lido/lido-csm-prover-tool/charts/lido-app-1.0.1.tgz new file mode 100644 index 00000000..358dff63 Binary files /dev/null and b/helm/lido/lido-csm-prover-tool/charts/lido-app-1.0.1.tgz differ diff --git a/helm/lido/lido-csm-prover-tool/values.yaml b/helm/lido/lido-csm-prover-tool/values.yaml new file mode 100644 index 00000000..36dccf78 --- /dev/null +++ b/helm/lido/lido-csm-prover-tool/values.yaml @@ -0,0 +1,115 @@ +lido-app: + nameOverride: lido-csm-prover-tool + + # Number of replicas for the backend deployment + replicaCount: 1 + + # Image configuration + image: + registry: docker.io + repository: lidofinance/csm-prover-tool + tag: "dev" + pullPolicy: Always + imagePullSecrets: + - name: registry-pull-secret + # Example: + # imagePullSecrets: + # - name: myregistrykey + + service: + type: ClusterIP + ports: + - name: metrics + port: 9090 + targetPort: 9090 + protocol: TCP + - name: http-health + port: 8081 + targetPort: 8081 + protocol: TCP + + # Container command + command: [ ] + + # Container args + args: [ ] + + # Environment variables + env: + # Static environment variables + variables: + PROMETHEUS_PORT: 9000 + HEALTHCHECK_SERVER_PORT: 8080 + CHAIN_ID: ${CHAIN_ID} + EL_RPC_URLS: ${EL_RPC_URLS} + CL_API_URLS: ${CL_API_URLS} + KEYSAPI_API_URLS: ${KEYSAPI_API_URLS} + CSM_ADDRESS: ${CSM_ADDRESS} + VERIFIER_ADDRESS: ${VERIFIER_ADDRESS} + TX_SIGNER_PRIVATE_KEY: ${TX_SIGNER_PRIVATE_KEY} + START_ROOT: ${START_ROOT} + + + # Resource limits and requests + resources: { } + # Example: + # resources: + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + + # Node selector + nodeSelector: { } + + # Tolerations + tolerations: [ ] + + # Affinity rules + affinity: { } + + # Security context + securityContext: { } + + # Pod security context + podSecurityContext: { } + + # Liveness probe + livenessProbe: + exec: + command: ["curl", "-f", "http://localhost:8080/health"] + initialDelaySeconds: 30 + periodSeconds: 10 + + # Readiness probe + readinessProbe: + exec: + command: ["curl", "-f", "http://localhost:8080/health"] + initialDelaySeconds: 5 + periodSeconds: 5 + + # Additional labels + additionalLabels: { } + + # Additional annotations + additionalAnnotations: { } + + # Persistent Volume Claims configuration + pvcs: [ ] + # Example PVC configuration: + # pvcs: + # - name: data-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 10Gi + # mountPath: /data + # - name: logs-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 5Gi + # mountPath: /logs + diff --git a/helm/lido/lido-docker-registry/Chart.lock b/helm/lido/lido-docker-registry/Chart.lock new file mode 100644 index 00000000..6c04c1ab --- /dev/null +++ b/helm/lido/lido-docker-registry/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: docker-registry-ui + repository: file://../../vendor/docker-registry-ui + version: 1.1.4-patched +digest: sha256:4a61c82cd046410dce73fc2d85664cda1fdcedfa7e8c514efb025bfd52cff1f0 +generated: "2025-09-17T02:09:02.675330592+03:00" diff --git a/helm/lido/lido-docker-registry/Chart.yaml b/helm/lido/lido-docker-registry/Chart.yaml new file mode 100644 index 00000000..ea78ac1d --- /dev/null +++ b/helm/lido/lido-docker-registry/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +name: lido-docker-registry +description: Lido Docker Registry for Devnets +type: application +version: 0.1.0 +appVersion: "2.5.2" +kubeVersion: '>=1.19.0-0' +dependencies: +- name: "docker-registry-ui" + version: "1.1.4-patched" + repository: "file://../../vendor/docker-registry-ui" diff --git a/helm/lido/lido-docker-registry/charts/docker-registry-ui-1.1.4-patched.tgz b/helm/lido/lido-docker-registry/charts/docker-registry-ui-1.1.4-patched.tgz new file mode 100644 index 00000000..6e6367a5 Binary files /dev/null and b/helm/lido/lido-docker-registry/charts/docker-registry-ui-1.1.4-patched.tgz differ diff --git a/helm/lido/lido-docker-registry/values.yaml b/helm/lido/lido-docker-registry/values.yaml new file mode 100644 index 00000000..68d2ee4a --- /dev/null +++ b/helm/lido/lido-docker-registry/values.yaml @@ -0,0 +1,263 @@ +global: + # Lido specific values + lido: + registryHostname: "registry.example.com" + registryScheme: "https" + registryUiHostname: "registry-ui.example.com" + registryUiScheme: "https" + +## docker-registry-ui subchart configuration +docker-registry-ui: + ## Global + global: + # Set the prefix used for all resources in the Helm chart. If not set, + # the prefix will be ``. + name: null + # The default array of objects containing image pull secret names that will be applied. + imagePullSecrets: [ ] + # The default image policy for images: `IfNotPresent`, `Always`, `Never` + imagePullPolicy: IfNotPresent + + ## User Interface + ui: + # Number of replicas for the Deployment. + replicas: 1 + # Title of the registry + title: "Docker registry UI" + # UI behave as a proxy of the registry + proxy: false + # The URL of your docker registry, may be a service (when proxy is on) or an external URL. + dockerRegistryUrl: '{{ printf "%s://%s" .Values.global.lido.registryScheme .Values.global.lido.registryHostname }}' + # Override the pull URL + pullUrl: null + # Remove the menu that show the dialogs to add, remove and change the endpoint of your docker registry. + singleRegistry: true + # By default, the UI will check on every requests if your registry is secured or not (you will see `401` responses in your console). Set to `true` if your registry uses Basic Authentication and divide by two the number of call to your registry. + registrySecured: true + + # Show number of tags per images on catalog page. This will produce + nb images requests, not recommended on large registries. + showCatalogNbTags: false + # Limit the number of elements in the catalog page. + catalogElementsLimit: 1000 + # Expand by default all repositories in catalog + catalogDefaultExpanded: false + # Set the minimum repository/namespace to expand (e.g. `joxit/docker-registry-ui` `joxit/` is the repository/namespace). Can be 0 to disable branching. + catalogMinBranches: 1 + # Set the maximum repository/namespace to expand (e.g. `joxit/docker-registry-ui` `joxit/` is the repository/namespace). Can be 0 to disable branching. + catalogMaxBranches: 1 + + # Allow delete of images + deleteImages: true + # Show content digest in docker tag list. + showContentDigest: false + # Set the default order for the taglist page, could be `num-asc;alpha-asc`, `num-desc;alpha-asc`, `num-asc;alpha-desc`, `num-desc;alpha-desc`, `alpha-asc;num-asc`, `alpha-asc;num-desc`, `alpha-desc;num-asc` or `alpha-desc;num-desc`. + taglistOrder: alpha-asc;num-desc + # Set the number of tags to display in one page. + taglistPageSize: 100 + + # Expose custom labels in history page, custom labels will be processed like maintainer label. + historyCustomLabels: [ ] + + # Update the default Nginx configuration and **set custom headers** for your backend docker registry. Only when `ui.proxy` is used. + # Example: + # nginxProxyHeaders: + # [ { my-heeader-name: my-header-value } ] + nginxProxyHeaders: [ ] + # Update the default Nginx configuration and **forward custom headers** to your backend docker registry. Only when `ui.proxy` is used. + # Example: + # nginxProxyPassHeaders: [ my-first-header, my-second-header ] + nginxProxyPassHeaders: [ ] + # Add header Control-Cache: no-store, no-cache on requests to registry server. + # This needs to update your registry configuration with : `Access-Control-Allow-Headers: ['Authorization', 'Accept', 'Cache-Control']` + useControlCacheHeader: false + # Use root or nginx user inside the container, when this is false the target port must be greater or equal to 1024. + runAsRoot: false + + # Select the default theme to apply, values can be `auto`, `dark` and `light` + defaultTheme: "auto" + + theme: + # Custom background color for the UI + background: "" + # Custom primary text color for the UI + primaryText: "" + # Custom netral color for the UI (icons) + neutralText: "" + # Custom accent color for the UI (buttons) + accentText: "" + # Custom hover background color for the UI + hoverBackground: "" + # Custom header background color for the UI + headerBackground: "" + # Custom header text color for the UI + headerText: "" + # Custom footer background color for the UI + footerBackground: "" + # Custom footer text color for the UI + footerText: "" + # Custom footer neutral color for the UI (links) + footerNeutralText: "" + + # The name and tag of the docker image of the interface + image: joxit/docker-registry-ui:2.5.2 + # Override default image pull secrets + imagePullSecrets: "-" + # Override default pull policy + imagePullPolicy: "-" + # The resource settings for user interface pod. + resources: { } + # Optional YAML string to specify a nodeSelector config. + nodeSelector: { } + # Optional YAML string to specify tolerations. + tolerations: [ ] + # This value defines the [affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) + # for server pods. + affinity: { } + # Annotations to apply to the user interface deployment. + annotations: { } + # Optional YAML string that will be appended to the deployment spec. + additionalSpec: { } + + service: + # Type of service: `LoadBalancer`, `ClusterIP` or `NodePort`. If using `NodePort` service + # type, you must set the desired `nodePorts` setting below. + type: ClusterIP + # Ports that will be exposed on the service + port: 80 + # The port to listhen on the container. If under 1024, the user must be root + targetPort: 3000 + # If using a `NodePort` service type, you must specify the desired `nodePort` for each exposed port. + nodePort: null + # Annotations to apply to the user interface service. + annotations: { } + # Optional YAML string that will be appended to the Service spec. + additionalSpec: { } + + ingress: + # Enable the ingress for the user interface. + enabled: true + # Fully qualified domain name of a network host. + host: '{{ .Values.global.lido.registryUiHostname }}' + # Path is matched against the path of an incoming request. + path: / + # Determines the interpretation of the Path matching, must be Prefix to serve assets. + pathType: Prefix + # The name of an IngressClass cluster resource. + ingressClassName: public + # TLS configuration + tls: + - hosts: + - '{{ .Values.global.lido.registryUiHostname }}' + secretName: container-registry-ui-tls-cert + # Annotations to apply to the user interface ingress. + annotations: + "cert-manager.io/cluster-issuer": "letsencrypt" + "nginx.ingress.kubernetes.io/enable-cors": "true" + "nginx.ingress.kubernetes.io/proxy-body-size": "100m" + "nginx.ingress.kubernetes.io/cors-allow-credentials": "true" + "nginx.ingress.kubernetes.io/cors-allow-origin": '{{ printf "%s://%s" .Values.global.lido.registryUiScheme .Values.global.lido.registryUiHostname }}' + "nginx.ingress.kubernetes.io/cors-allow-methods": "GET, POST, PUT, DELETE, OPTIONS" + "nginx.ingress.kubernetes.io/cors-allow-headers": "DNT,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization" + # If you want a custom path, you can try this example: + # path: /ui(/|$)(.*) + # annotations: + # { nginx.ingress.kubernetes.io/rewrite-target: /$2 } + + ## Registry Server + registry: + # Enable the registry server. + enabled: true + # The name and tag of the docker registry server image + image: registry:2.8.2 + # Override default image pull secrets + imagePullSecrets: "-" + # Override default pull policy + imagePullPolicy: "-" + # Configuration for the data directory. When null it will create an emptyDir. + dataVolume: null + # Persistence configuration for registry data + persistence: + # Enable persistence for registry data + enabled: true + # Storage class to use for the PVC. Use "-" to disable dynamic provisioning + storageClass: "ssd-hostpath" + # Access modes for the PVC + accessModes: + - ReadWriteOnce + # Size of the persistent volume + size: 50Gi + # Annotations for the PVC + annotations: { } + # Selector for existing PV (optional) + selector: { } + # The resource settings for registry server pod. + resources: { } + # Optional YAML string to specify a nodeSelector config. + nodeSelector: { } + # Optional YAML string to specify tolerations. + tolerations: [ ] + # This value defines the [affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) + # for server pods. + affinity: { } + # Annotations to apply to the registry server deployment. + annotations: { } + # Optional YAML string that will be appended to the deployment spec. + additionalSpec: { } + # Extra Environmental Variables for Registry + extraEnv: [ ] + + auth: + basic: + # Enable basic auth for Registry. + enabled: true + # Basic auth realm. + realm: Docker registry + # Full path for htpasswd file. Note that filename should match the secret key. + htpasswdPath: /etc/docker/registry/auth/htpasswd + # htpasswd secret name volume to mount. + secretName: 'registry-auth-secret' + + service: + # Type of service: `LoadBalancer`, `ClusterIP` or `NodePort`. If using `NodePort` service + # type, you must set the desired `nodePorts` setting below. + type: ClusterIP + # Ports that will be exposed on the service + port: 5000 + # The port to listhen on the container. + targetPort: 5000 + # If using a `NodePort` service type, you must specify the desired `nodePort` for each exposed port. + nodePort: null + # Annotations to apply to the registry server service. + annotations: { } + # Optional YAML string that will be appended to the Service spec. + additionalSpec: { } + + ingress: + # Enable the ingress for the registry server. + enabled: true + # Fully qualified domain name of a network host. + host: '{{ .Values.global.lido.registryHostname }}' + # Path is matched against the path of an incoming request. + path: / + # Determines the interpretation of the Path matching, must be Prefix to serve assets. + pathType: Prefix + # The name of an IngressClass cluster resource. + ingressClassName: public + # TLS configuration + tls: + - hosts: + - '{{ .Values.global.lido.registryHostname }}' + secretName: container-registry-tls-cert + # Annotations to apply to the registry server ingress. + annotations: + "cert-manager.io/cluster-issuer": "letsencrypt" + "nginx.ingress.kubernetes.io/proxy-body-size": "1024m" + "nginx.ingress.kubernetes.io/enable-cors": "true" + "nginx.ingress.kubernetes.io/cors-allow-credentials": "true" + "nginx.ingress.kubernetes.io/cors-allow-origin": '{{ printf "%s://%s" .Values.global.lido.registryUiScheme .Values.global.lido.registryUiHostname }}' + "nginx.ingress.kubernetes.io/cors-allow-methods": "GET, POST, PUT, DELETE, OPTIONS" + "nginx.ingress.kubernetes.io/cors-allow-headers": "DNT,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization" + # If you want a custom path, you can try this example: + # path: /api(/|$)(.*) + # annotations: + # { nginx.ingress.kubernetes.io/rewrite-target: /$2 } diff --git a/helm/lido/lido-dsm-bot/Chart.lock b/helm/lido/lido-dsm-bot/Chart.lock new file mode 100644 index 00000000..e54840a8 --- /dev/null +++ b/helm/lido/lido-dsm-bot/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: lido-app + repository: file://../lido-app + version: 1.0.1 +digest: sha256:8a168e54af9a00f2953fbe69e5ada62110811d254bc1373771f0bd1248da4988 +generated: "2025-09-25T16:38:59.449244236+03:00" diff --git a/helm/lido/lido-dsm-bot/Chart.yaml b/helm/lido/lido-dsm-bot/Chart.yaml new file mode 100644 index 00000000..33b5cf5e --- /dev/null +++ b/helm/lido/lido-dsm-bot/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +name: lido-dsm-bot +description: Lido Dsm Bot (depositor, pause, unvetter) chart +type: application +version: 1.0.0 +appVersion: "2.5.2" +kubeVersion: '>=1.19.0-0' +dependencies: + - name: "lido-app" + version: "1.0.1" + repository: "file://../lido-app" diff --git a/helm/lido/lido-dsm-bot/charts/lido-app-1.0.1.tgz b/helm/lido/lido-dsm-bot/charts/lido-app-1.0.1.tgz new file mode 100644 index 00000000..09125e08 Binary files /dev/null and b/helm/lido/lido-dsm-bot/charts/lido-app-1.0.1.tgz differ diff --git a/helm/lido/lido-dsm-bot/values.yaml b/helm/lido/lido-dsm-bot/values.yaml new file mode 100644 index 00000000..eaafd035 --- /dev/null +++ b/helm/lido/lido-dsm-bot/values.yaml @@ -0,0 +1,102 @@ +lido-app: + nameOverride: lido-dsm-bot + + # Number of replicas for the backend deployment + replicaCount: 1 + + # Image configuration + image: + registry: docker.io + repository: lidofinance/OVERRIDE-ME + tag: "dev" + pullPolicy: Always + imagePullSecrets: + - name: registry-pull-secret + + service: + type: ClusterIP + ports: + - name: metrics + port: 9000 + targetPort: 9000 + protocol: TCP + - name: http-health + port: 9010 + targetPort: 9010 + protocol: TCP + + # Container command + command: [ ] + + # Container args + args: ["depositor"] + + # Environment variables + env: + # Static environment variables + variables: {} + + # Resource limits and requests + resources: { } + # Example: + # resources: + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + + # Node selector + nodeSelector: { } + + # Tolerations + tolerations: [ ] + + # Affinity rules + affinity: { } + + # Security context + securityContext: { } + + # Pod security context + podSecurityContext: { } + + # Liveness probe + livenessProbe: + httpGet: + path: / + port: http-health + initialDelaySeconds: 30 + periodSeconds: 10 + + # Readiness probe + readinessProbe: + httpGet: + path: / + port: http-health + initialDelaySeconds: 5 + periodSeconds: 5 + + # Additional labels + additionalLabels: { } + + # Additional annotations + additionalAnnotations: { } + + # Persistent Volume Claims configuration + pvcs: [ ] + # Example PVC configuration: + # pvcs: + # - name: data-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 10Gi + # mountPath: /data + # - name: logs-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 5Gi + # mountPath: /logs diff --git a/helm/lido/lido-kapi/Chart.lock b/helm/lido/lido-kapi/Chart.lock new file mode 100644 index 00000000..f1edce49 --- /dev/null +++ b/helm/lido/lido-kapi/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: lido-app + repository: file://../lido-app + version: 1.0.1 +- name: postgresql + repository: file://../../vendor/postgresql + version: 16.7.26 +digest: sha256:2077c2883cae6c9eb753da2d8fe46cd06721be6bb16d6eb109b7b55bcb6460d5 +generated: "2025-09-25T16:38:47.341940352+03:00" diff --git a/helm/lido/lido-kapi/Chart.yaml b/helm/lido/lido-kapi/Chart.yaml new file mode 100644 index 00000000..57599c7e --- /dev/null +++ b/helm/lido/lido-kapi/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v2 +name: lido-kapi +description: Lido KAPI Helm chart +type: application +version: 1.0.0 +appVersion: "2.5.2" +kubeVersion: '>=1.19.0-0' +dependencies: + - name: "lido-app" + version: "1.0.1" + repository: "file://../lido-app" + - name: "postgresql" + version: "16.7.26" + repository: "file://../../vendor/postgresql" diff --git a/helm/lido/lido-kapi/charts/lido-app-1.0.1.tgz b/helm/lido/lido-kapi/charts/lido-app-1.0.1.tgz new file mode 100644 index 00000000..bf1a8552 Binary files /dev/null and b/helm/lido/lido-kapi/charts/lido-app-1.0.1.tgz differ diff --git a/helm/lido/lido-kapi/charts/postgresql-16.7.26.tgz b/helm/lido/lido-kapi/charts/postgresql-16.7.26.tgz new file mode 100644 index 00000000..4477df6d Binary files /dev/null and b/helm/lido/lido-kapi/charts/postgresql-16.7.26.tgz differ diff --git a/helm/lido/lido-kapi/values.yaml b/helm/lido/lido-kapi/values.yaml new file mode 100644 index 00000000..07503d3b --- /dev/null +++ b/helm/lido/lido-kapi/values.yaml @@ -0,0 +1,164 @@ +lido-app: + nameOverride: lido-kapi + # Default values for backend. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + + # Number of replicas for the backend deployment + replicaCount: 1 + + # Image configuration + image: + # Docker registry to pull the image from + registry: docker.io + # Repository/image name + repository: lidofinance/lido-keys-api + # Image tag (can be overridden) + tag: "dev" + # Image pull policy + pullPolicy: Always + # Image pull secrets for private registries + imagePullSecrets: + - name: registry-pull-secret + + service: + type: ClusterIP + ports: + - name: http + port: 3000 + targetPort: 3000 + protocol: TCP + + # Ingress configuration + ingress: + # Enable/disable ingress + enabled: true + # Ingress class name + className: "public" + # Ingress annotations + annotations: {} + # Example annotations: + # annotations: + # nginx.ingress.kubernetes.io/rewrite-target: / + # cert-manager.io/cluster-issuer: letsencrypt-prod + + # Ingress hosts configuration + hosts: + - host: kapi.local + paths: + - path: / + pathType: Prefix + port: http + + # TLS configuration + tls: [] + # Example TLS: + # tls: + # - secretName: backend-tls + # hosts: + # - backend.local + + # Environment variables + env: + # Static environment variables + variables: + NODE_ENV: production + PORT: 3000 + CORS_WHITELIST_REGEXP: ${CORS_WHITELIST_REGEXP} + GLOBAL_THROTTLE_TTL: ${GLOBAL_THROTTLE_TTL} + GLOBAL_THROTTLE_LIMIT: ${GLOBAL_THROTTLE_LIMIT} + GLOBAL_CACHE_TTL: ${GLOBAL_CACHE_TTL} + LOG_LEVEL: info + LOG_FORMAT: json + PROVIDERS_URLS: ${PROVIDERS_URLS} + CL_API_URLS: ${CL_API_URLS} + IS_DEVNET_MODE: ${IS_DEVNET_MODE} + CHAIN_ID: ${CHAIN_ID} + DB_NAME: kapi-db + DB_PORT: 5432 + DB_HOST: lido-kapi-1-postgresql + DB_USER: postgres + DB_PASSWORD: changemeasap + JOB_INTERVAL_REGISTRY: ${JOB_INTERVAL_REGISTRY} + VALIDATOR_REGISTRY_ENABLE: ${VALIDATOR_REGISTRY_ENABLE} + JOB_INTERVAL_VALIDATORS_REGISTRY: ${JOB_INTERVAL_VALIDATORS_REGISTRY} + LIDO_LOCATOR_DEVNET_ADDRESS: ${LIDO_LOCATOR_DEVNET_ADDRESS} + CURATED_MODULE_DEVNET_ADDRESS: ${CURATED_MODULE_DEVNET_ADDRESS} + CSM_MODULE_DEVNET_ADDRESS: ${CSM_MODULE_DEVNET_ADDRESS} + STAKING_ROUTER_DEVNET_ADDRESS: ${STAKING_ROUTER_DEVNET_ADDRESS} + + # Resource limits and requests + resources: {} + # Example: + # resources: + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + + # Node selector + nodeSelector: {} + + # Tolerations + tolerations: [] + + # Affinity rules + affinity: {} + + # Security context + securityContext: {} + + # Pod security context + podSecurityContext: {} + + # Liveness probe + livenessProbe: + httpGet: + path: /v1/status + port: http + initialDelaySeconds: 30 + periodSeconds: 10 + + # Readiness probe + readinessProbe: + httpGet: + path: /v1/status + port: http + initialDelaySeconds: 15 + periodSeconds: 10 + + # Additional labels + additionalLabels: {} + + # Additional annotations + additionalAnnotations: {} + + +postgresql: + global: + postgresql: + auth: + postgresPassword: "changemeasap" + username: "admin" + password: "changemeasap" + database: "kapi-db" + existingSecret: "" + secretKeys: + adminPasswordKey: "" + userPasswordKey: "" + replicationPasswordKey: "" + service: + ports: + postgresql: "" + image: + registry: docker.io + repository: bitnami/postgresql + tag: "latest" + digest: "" + pullPolicy: IfNotPresent + primary: + resourcesPreset: "2xlarge" + resources: { } + diff --git a/helm/lido/lido-kubo/Chart.lock b/helm/lido/lido-kubo/Chart.lock new file mode 100644 index 00000000..ed698a4b --- /dev/null +++ b/helm/lido/lido-kubo/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: lido-app + repository: file://../lido-app + version: 1.0.1 +digest: sha256:8a168e54af9a00f2953fbe69e5ada62110811d254bc1373771f0bd1248da4988 +generated: "2025-09-25T16:38:37.747373651+03:00" diff --git a/helm/lido/lido-kubo/Chart.yaml b/helm/lido/lido-kubo/Chart.yaml new file mode 100644 index 00000000..c8411f23 --- /dev/null +++ b/helm/lido/lido-kubo/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +name: lido-kubo +description: Lido Kubo IPFS chart +type: application +version: 1.0.0 +appVersion: "2.5.2" +kubeVersion: '>=1.19.0-0' +dependencies: + - name: "lido-app" + version: "1.0.1" + repository: "file://../lido-app" diff --git a/helm/lido/lido-kubo/charts/lido-app-1.0.1.tgz b/helm/lido/lido-kubo/charts/lido-app-1.0.1.tgz new file mode 100644 index 00000000..40b68873 Binary files /dev/null and b/helm/lido/lido-kubo/charts/lido-app-1.0.1.tgz differ diff --git a/helm/lido/lido-kubo/values.yaml b/helm/lido/lido-kubo/values.yaml new file mode 100644 index 00000000..b420c9c3 --- /dev/null +++ b/helm/lido/lido-kubo/values.yaml @@ -0,0 +1,115 @@ +lido-app: + nameOverride: lido-kubo + + # Number of replicas for the backend deployment + replicaCount: 1 + + # Image configuration + image: + registry: docker.io + repository: lidofinance/kubo + tag: "dev" + pullPolicy: Always + imagePullSecrets: + - name: registry-pull-secret + + service: + type: ClusterIP + ports: + - name: http + port: 8080 + targetPort: 8080 + protocol: TCP + - name: http-api + port: 5001 + targetPort: 5001 + protocol: TCP + + # Ingress configuration + ingress: + # Enable/disable ingress + enabled: true + # Ingress class name + className: "public" + # Ingress annotations + annotations: {} + # Example annotations: + # annotations: + # nginx.ingress.kubernetes.io/rewrite-target: / + # cert-manager.io/cluster-issuer: letsencrypt-prod + + # Ingress hosts configuration + hosts: + - host: kubo-api.local + paths: + - path: / + pathType: Prefix + port: http-api + + # Container command + command: [ ] + + # Container args + args: ["daemon", "--migrate=true", "--agent-version-suffix=docker"] + + # Environment variables + env: + # Static environment variables + variables: + CHAIN: ${CHAIN} + + # Resource limits and requests + resources: { } + # Example: + # resources: + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + + # Node selector + nodeSelector: { } + + # Tolerations + tolerations: [ ] + + # Affinity rules + affinity: { } + + # Security context + securityContext: { } + + # Pod security context + podSecurityContext: { } + + # Liveness probe + livenessProbe: + httpGet: + path: /debug/metrics/prometheus + port: http-api + initialDelaySeconds: 30 + periodSeconds: 10 + + # Readiness probe + readinessProbe: + httpGet: + path: /debug/metrics/prometheus + port: http-api + initialDelaySeconds: 5 + periodSeconds: 5 + + # Additional labels + additionalLabels: { } + + # Additional annotations + additionalAnnotations: { } + + # Persistent Volume Claims configuration + pvcs: + - name: ipfs-data + accessModes: + - ReadWriteOnce + size: 16Gi + mountPath: /data/ipfs diff --git a/helm/lido/lido-late-prover-bot/Chart.lock b/helm/lido/lido-late-prover-bot/Chart.lock new file mode 100644 index 00000000..c7d8cec0 --- /dev/null +++ b/helm/lido/lido-late-prover-bot/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: lido-app + repository: file://../lido-app + version: 1.0.1 +digest: sha256:8a168e54af9a00f2953fbe69e5ada62110811d254bc1373771f0bd1248da4988 +generated: "2025-10-02T18:44:00.307023+04:00" diff --git a/helm/lido/lido-late-prover-bot/Chart.yaml b/helm/lido/lido-late-prover-bot/Chart.yaml new file mode 100644 index 00000000..a32422d0 --- /dev/null +++ b/helm/lido/lido-late-prover-bot/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +name: lido-late-prover-bot +description: Lido Late Prover Bot Helm chart +type: application +version: 1.0.0 +appVersion: "2.5.2" +kubeVersion: '>=1.19.0-0' +dependencies: + - name: "lido-app" + version: "1.0.1" + repository: "file://../lido-app" diff --git a/helm/lido/lido-late-prover-bot/charts/lido-app-1.0.1.tgz b/helm/lido/lido-late-prover-bot/charts/lido-app-1.0.1.tgz new file mode 100644 index 00000000..f0b2e422 Binary files /dev/null and b/helm/lido/lido-late-prover-bot/charts/lido-app-1.0.1.tgz differ diff --git a/helm/lido/lido-late-prover-bot/values.yaml b/helm/lido/lido-late-prover-bot/values.yaml new file mode 100644 index 00000000..0549dce7 --- /dev/null +++ b/helm/lido/lido-late-prover-bot/values.yaml @@ -0,0 +1,111 @@ +lido-app: + nameOverride: lido-late-prover-bot + + # Number of replicas for the backend deployment + replicaCount: 1 + + # Image configuration + image: + registry: docker.io + repository: lidofinance/late-prover-bot + tag: "dev" + pullPolicy: Always + imagePullSecrets: + - name: registry-pull-secret + # Example: + # imagePullSecrets: + # - name: myregistrykey + + service: + type: ClusterIP + ports: + - name: metrics + port: 9090 + targetPort: 9090 + protocol: TCP + - name: http-health + port: 8081 + targetPort: 8081 + protocol: TCP + + # Container command + command: [ ] + + # Container args + args: [ ] + + # Environment variables + env: + # Static environment variables + variables: + PROMETHEUS_PORT: 9000 + HEALTHCHECK_SERVER_PORT: 9010 + CHAIN_ID: ${CHAIN_ID} + EL_RPC_URLS: ${EL_RPC_URLS} + CL_API_URLS: ${CL_API_URLS} + LIDO_LOCATOR_ADDRESS: ${LIDO_LOCATOR_ADDRESS} + TX_SIGNER_PRIVATE_KEY: ${TX_SIGNER_PRIVATE_KEY} + + # Resource limits and requests + resources: { } + # Example: + # resources: + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + + # Node selector + nodeSelector: { } + + # Tolerations + tolerations: [ ] + + # Affinity rules + affinity: { } + + # Security context + securityContext: { } + + # Pod security context + podSecurityContext: { } + + # Liveness probe + livenessProbe: + exec: + command: ["curl", "-f", "http://localhost:8081/health"] + initialDelaySeconds: 30 + periodSeconds: 10 + + # Readiness probe + readinessProbe: + exec: + command: ["curl", "-f", "http://localhost:8081/health"] + initialDelaySeconds: 5 + periodSeconds: 5 + + # Additional labels + additionalLabels: { } + + # Additional annotations + additionalAnnotations: { } + + # Persistent Volume Claims configuration + pvcs: [ ] + # Example PVC configuration: + # pvcs: + # - name: data-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 10Gi + # mountPath: /data + # - name: logs-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 5Gi + # mountPath: /logs + diff --git a/helm/lido/lido-no-widget-backend/Chart.lock b/helm/lido/lido-no-widget-backend/Chart.lock new file mode 100644 index 00000000..f8306672 --- /dev/null +++ b/helm/lido/lido-no-widget-backend/Chart.lock @@ -0,0 +1,12 @@ +dependencies: +- name: lido-app + repository: file://../lido-app + version: 1.0.1 +- name: lido-app + repository: file://../lido-app + version: 1.0.1 +- name: postgresql + repository: file://../../vendor/postgresql + version: 16.7.26 +digest: sha256:4b1fb883fe68913461690541345344f90303faf0c6c7970228b81e8bd0550b4f +generated: "2025-09-25T16:38:17.990681222+03:00" diff --git a/helm/lido/lido-no-widget-backend/Chart.yaml b/helm/lido/lido-no-widget-backend/Chart.yaml new file mode 100644 index 00000000..6dc5bb01 --- /dev/null +++ b/helm/lido/lido-no-widget-backend/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v2 +name: lido-no-widget-backend +description: Lido Node Operators Widget Backend chart +type: application +version: 1.0.0 +appVersion: "2.5.2" +kubeVersion: '>=1.19.0-0' +dependencies: + - name: "lido-app" + alias: "api" + version: "1.0.1" + repository: "file://../lido-app" + - name: "lido-app" + alias: "worker" + version: "1.0.1" + repository: "file://../lido-app" + - name: "postgresql" + version: "16.7.26" + repository: "file://../../vendor/postgresql" diff --git a/helm/lido/lido-no-widget-backend/charts/lido-app-1.0.1.tgz b/helm/lido/lido-no-widget-backend/charts/lido-app-1.0.1.tgz new file mode 100644 index 00000000..4ea5234d Binary files /dev/null and b/helm/lido/lido-no-widget-backend/charts/lido-app-1.0.1.tgz differ diff --git a/helm/lido/lido-no-widget-backend/charts/postgresql-16.7.26.tgz b/helm/lido/lido-no-widget-backend/charts/postgresql-16.7.26.tgz new file mode 100644 index 00000000..f203ec0d Binary files /dev/null and b/helm/lido/lido-no-widget-backend/charts/postgresql-16.7.26.tgz differ diff --git a/helm/lido/lido-no-widget-backend/values.yaml b/helm/lido/lido-no-widget-backend/values.yaml new file mode 100644 index 00000000..69e0c2da --- /dev/null +++ b/helm/lido/lido-no-widget-backend/values.yaml @@ -0,0 +1,317 @@ +api: + nameOverride: lido-no-widget-backend-api + + # Number of replicas for the backend deployment + replicaCount: 1 + + # Image configuration + image: + registry: docker.io + repository: lido/no-widget-backend + tag: "dev" + pullPolicy: Always + imagePullSecrets: + - name: registry-pull-secret + + service: + type: ClusterIP + ports: + - name: http + port: 3000 + targetPort: 3000 + protocol: TCP + + # Container command + command: [ ] + + # Container args + args: [] + + # Ingress configuration + ingress: + # Enable/disable ingress + enabled: true + # Ingress class name + className: "public" + # Ingress annotations + annotations: {} + # Example annotations: + # annotations: + # nginx.ingress.kubernetes.io/rewrite-target: / + # cert-manager.io/cluster-issuer: letsencrypt-prod + + # Ingress hosts configuration + hosts: + - host: no-widget-backend-api.local + paths: + - path: / + pathType: Prefix + port: http + + # TLS configuration + tls: [] + # Example TLS: + # tls: + # - secretName: backend-tls + # hosts: + # - backend.local + + # Environment variables + env: + # Static environment variables + variables: + NODE_ENV: ${NODE_ENV:-development} + PORT: ${PORT:-3000} + CORS_WHITELIST_REGEXP: ${CORS_WHITELIST_REGEXP} + GLOBAL_THROTTLE_TTL: ${GLOBAL_THROTTLE_TTL:-5} + GLOBAL_THROTTLE_LIMIT: ${GLOBAL_THROTTLE_LIMIT:-100} + GLOBAL_CACHE_TTL: ${GLOBAL_CACHE_TTL:-1} + SENTRY_DSN: ${SENTRY_DSN} + LOG_LEVEL: ${LOG_LEVEL:-info} + LOG_FORMAT: ${LOG_FORMAT:-json} + KEYS_API_HOST: ${KEYS_API_HOST} + EL_API_URLS: ${EL_API_URLS} + CHAIN_ID: ${CHAIN_ID} + PG_HOST: lido-no-widget-backend-postgresql + POSTGRES_USER: postgres + POSTGRES_PASSWORD: changemeasap + POSTGRES_DB: no-widget-backend-db + PG_PORT: 5432 + + # Resource limits and requests + resources: { } + # Example: + # resources: + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + + # Node selector + nodeSelector: { } + + # Tolerations + tolerations: [ ] + + # Affinity rules + affinity: { } + + # Security context + securityContext: { } + + # Pod security context + podSecurityContext: { } + + # Liveness probe + livenessProbe: + httpGet: + path: /v1/status + port: http + initialDelaySeconds: 30 + periodSeconds: 10 + + # Readiness probe + readinessProbe: + httpGet: + path: /v1/status + port: http + initialDelaySeconds: 15 + periodSeconds: 5 + + # Additional labels + additionalLabels: { } + + # Additional annotations + additionalAnnotations: { } + + # Persistent Volume Claims configuration + pvcs: [ ] + # Example PVC configuration: + # pvcs: + # - name: data-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 10Gi + # mountPath: /data + # - name: logs-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 5Gi + # mountPath: /logs + +worker: + nameOverride: lido-no-widget-backend-worker + + # Number of replicas for the backend deployment + replicaCount: 1 + + # Image configuration + image: + registry: docker.io + repository: lido/no-widget-backend + tag: "dev" + pullPolicy: Always + imagePullSecrets: + - name: registry-pull-secret + + service: + type: ClusterIP + ports: + - name: http + port: 3000 + targetPort: 3000 + protocol: TCP + + # Container command + command: ["yarn", "start:worker"] + + # Container args + args: [] + + # Ingress configuration + ingress: + # Enable/disable ingress + enabled: false + # Ingress class name + className: "public" + # Ingress annotations + annotations: {} + # Example annotations: + # annotations: + # nginx.ingress.kubernetes.io/rewrite-target: / + # cert-manager.io/cluster-issuer: letsencrypt-prod + + # Ingress hosts configuration + hosts: + - host: no-widget-backend-worker.local + paths: + - path: / + pathType: Prefix + port: http + + # TLS configuration + tls: [] + # Example TLS: + # tls: + # - secretName: backend-tls + # hosts: + # - backend.local + + # Environment variables + env: + # Static environment variables + variables: + NODE_ENV: ${NODE_ENV:-development} + PORT: ${PORT:-3000} + CORS_WHITELIST_REGEXP: ${CORS_WHITELIST_REGEXP} + GLOBAL_THROTTLE_TTL: ${GLOBAL_THROTTLE_TTL:-5} + GLOBAL_THROTTLE_LIMIT: ${GLOBAL_THROTTLE_LIMIT:-100} + GLOBAL_CACHE_TTL: ${GLOBAL_CACHE_TTL:-1} + SENTRY_DSN: ${SENTRY_DSN} + LOG_LEVEL: ${LOG_LEVEL:-info} + LOG_FORMAT: ${LOG_FORMAT:-json} + KEYS_API_HOST: ${KEYS_API_HOST} + EL_API_URLS: ${EL_API_URLS} + CHAIN_ID: ${CHAIN_ID} + DEVNET_GENESIS_FORK_VERSION: ${DEVNET_GENESIS_FORK_VERSION} + LIDO_DEVNET_ADDRESS: ${LIDO_DEVNET_ADDRESS} + PG_HOST: lido-no-widget-backend-postgresql + POSTGRES_USER: postgres + POSTGRES_PASSWORD: changemeasap + POSTGRES_DB: no-widget-backend-db + PG_PORT: 5432 + + # Resource limits and requests + resources: { } + # Example: + # resources: + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + + # Node selector + nodeSelector: { } + + # Tolerations + tolerations: [ ] + + # Affinity rules + affinity: { } + + # Security context + securityContext: { } + + # Pod security context + podSecurityContext: { } + + # Liveness probe + livenessProbe: + httpGet: + path: /v1/status + port: http + initialDelaySeconds: 30 + periodSeconds: 10 + + # Readiness probe + readinessProbe: + httpGet: + path: /v1/status + port: http + initialDelaySeconds: 10 + periodSeconds: 5 + + # Additional labels + additionalLabels: { } + + # Additional annotations + additionalAnnotations: { } + + # Persistent Volume Claims configuration + pvcs: [ ] + # Example PVC configuration: + # pvcs: + # - name: data-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 10Gi + # mountPath: /data + # - name: logs-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 5Gi + # mountPath: /logs + +postgresql: + global: + postgresql: + auth: + postgresPassword: "changemeasap" + username: "admin" + password: "changemeasap" + database: "no-widget-backend-db" + existingSecret: "" + secretKeys: + adminPasswordKey: "" + userPasswordKey: "" + replicationPasswordKey: "" + service: + ports: + postgresql: "" + image: + registry: docker.io + repository: bitnami/postgresql + tag: 17.6.0-debian-12-r0 + digest: "" + pullPolicy: IfNotPresent + primary: + resourcesPreset: "2xlarge" + resources: { } diff --git a/helm/lido/lido-no-widget/Chart.lock b/helm/lido/lido-no-widget/Chart.lock new file mode 100644 index 00000000..403fab8b --- /dev/null +++ b/helm/lido/lido-no-widget/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: lido-app + repository: file://../lido-app + version: 1.0.1 +digest: sha256:5bf51314dd3340ef4c7c75e5e0f9ce1805f5395ac182fe13fbbe2608820eb051 +generated: "2025-09-25T16:38:30.766118321+03:00" diff --git a/helm/lido/lido-no-widget/Chart.yaml b/helm/lido/lido-no-widget/Chart.yaml new file mode 100644 index 00000000..4c2a720b --- /dev/null +++ b/helm/lido/lido-no-widget/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v2 +name: lido-no-widget +description: Lido Node Operators Widget chart +type: application +version: 1.0.0 +appVersion: "2.5.2" +kubeVersion: '>=1.19.0-0' +dependencies: + - name: "lido-app" + alias: "web" + version: "1.0.1" + repository: "file://../lido-app" diff --git a/helm/lido/lido-no-widget/charts/lido-app-1.0.1.tgz b/helm/lido/lido-no-widget/charts/lido-app-1.0.1.tgz new file mode 100644 index 00000000..66482eda Binary files /dev/null and b/helm/lido/lido-no-widget/charts/lido-app-1.0.1.tgz differ diff --git a/helm/lido/lido-no-widget/values.yaml b/helm/lido/lido-no-widget/values.yaml new file mode 100644 index 00000000..7e8a2b4b --- /dev/null +++ b/helm/lido/lido-no-widget/values.yaml @@ -0,0 +1,132 @@ +web: + nameOverride: lido-no-widget + + # Number of replicas for the backend deployment + replicaCount: 1 + + # Image configuration + image: + registry: docker.io + repository: lido/no-widget + tag: "dev" + pullPolicy: Always + imagePullSecrets: + - name: registry-pull-secret + + service: + type: ClusterIP + ports: + - name: http + port: 3000 + targetPort: 3000 + protocol: TCP + + # Container command + command: [ ] + + # Container args + args: [] + + # Ingress configuration + ingress: + # Enable/disable ingress + enabled: true + # Ingress class name + className: "public" + # Ingress annotations + annotations: {} + # Example annotations: + # annotations: + # nginx.ingress.kubernetes.io/rewrite-target: / + # cert-manager.io/cluster-issuer: letsencrypt-prod + + # Ingress hosts configuration + hosts: + - host: no-widget.local + paths: + - path: / + pathType: Prefix + port: http + + # TLS configuration + tls: [] + # Example TLS: + # tls: + # - secretName: backend-tls + # hosts: + # - backend.local + + # Environment variables + env: + # Static environment variables + variables: + NODE_ENV: ${NODE_ENV:-development} + EL_RPC_URLS_17000: + BACKEND_URL_17000: + SUPPORTED_CHAINS: 17000 # for any devnet, ignore real devnet chain ID + DEFAULT_CHAIN: 17000 # for any devnet + + # Resource limits and requests + resources: { } + # Example: + # resources: + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + + # Node selector + nodeSelector: { } + + # Tolerations + tolerations: [ ] + + # Affinity rules + affinity: { } + + # Security context + securityContext: { } + + # Pod security context + podSecurityContext: { } + + # Liveness probe + livenessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 30 + periodSeconds: 10 + + # Readiness probe + readinessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 15 + periodSeconds: 5 + + # Additional labels + additionalLabels: { } + + # Additional annotations + additionalAnnotations: { } + + # Persistent Volume Claims configuration + pvcs: [ ] + # Example PVC configuration: + # pvcs: + # - name: data-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 10Gi + # mountPath: /data + # - name: logs-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 5Gi + # mountPath: /logs diff --git a/helm/lido/lido-oracle/Chart.lock b/helm/lido/lido-oracle/Chart.lock new file mode 100644 index 00000000..ae5c886b --- /dev/null +++ b/helm/lido/lido-oracle/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: lido-app + repository: file://../lido-app + version: 1.0.1 +digest: sha256:8a168e54af9a00f2953fbe69e5ada62110811d254bc1373771f0bd1248da4988 +generated: "2025-09-25T16:35:24.146706969+03:00" diff --git a/helm/lido/lido-oracle/Chart.yaml b/helm/lido/lido-oracle/Chart.yaml new file mode 100644 index 00000000..86863348 --- /dev/null +++ b/helm/lido/lido-oracle/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +name: lido-oracle +description: Lido Oracle Helm chart +type: application +version: 1.0.0 +appVersion: "2.5.2" +kubeVersion: '>=1.19.0-0' +dependencies: + - name: "lido-app" + version: "1.0.1" + repository: "file://../lido-app" diff --git a/helm/lido/lido-oracle/charts/lido-app-1.0.1.tgz b/helm/lido/lido-oracle/charts/lido-app-1.0.1.tgz new file mode 100644 index 00000000..17f086a5 Binary files /dev/null and b/helm/lido/lido-oracle/charts/lido-app-1.0.1.tgz differ diff --git a/helm/lido/lido-oracle/values.yaml b/helm/lido/lido-oracle/values.yaml new file mode 100644 index 00000000..85729d40 --- /dev/null +++ b/helm/lido/lido-oracle/values.yaml @@ -0,0 +1,116 @@ +lido-app: + nameOverride: lido-oracle + + # Number of replicas for the backend deployment + replicaCount: 1 + + # Image configuration + image: + registry: docker.io + repository: lidofinance/oracle + tag: "dev" + pullPolicy: Always + imagePullSecrets: + - name: registry-pull-secret + # Example: + # imagePullSecrets: + # - name: myregistrykey + + service: + type: ClusterIP + ports: + - name: metrics + port: 9000 + targetPort: 9000 + protocol: TCP + - name: http-health + port: 9010 + targetPort: 9010 + protocol: TCP + + # Container command + command: [ ] + + # Container args + args: ["accounting"] + + # Environment variables + env: + # Static environment variables + variables: + PROMETHEUS_PORT: 9000 + HEALTHCHECK_SERVER_PORT: 9010 + EXECUTION_CLIENT_URI: ${EXECUTION_CLIENT_URI} + CONSENSUS_CLIENT_URI: ${CONSENSUS_CLIENT_URI} + KEYS_API_URI: ${KEYS_API_URI} + MEMBER_PRIV_KEY: ${MEMBER_PRIV_KEY} + LIDO_LOCATOR_ADDRESS: ${LIDO_LOCATOR_ADDRESS} + CSM_MODULE_ADDRESS: ${CSM_MODULE_ADDRESS} + SUBMIT_DATA_DELAY_IN_SLOTS: ${SUBMIT_DATA_DELAY_IN_SLOTS} + ALLOW_REPORTING_IN_BUNKER_MODE: false + CSM_ORACLE_MAX_CONCURRENCY: "1" + KUBO_HOST: http://kubo:5001 + + # Resource limits and requests + resources: { } + # Example: + # resources: + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + + # Node selector + nodeSelector: { } + + # Tolerations + tolerations: [ ] + + # Affinity rules + affinity: { } + + # Security context + securityContext: { } + + # Pod security context + podSecurityContext: { } + + # Liveness probe + livenessProbe: + exec: + command: ["curl", "-f", "http://localhost:9010/healthcheck"] + initialDelaySeconds: 30 + periodSeconds: 10 + + # Readiness probe + readinessProbe: + exec: + command: ["curl", "-f", "http://localhost:9010/healthcheck"] + initialDelaySeconds: 5 + periodSeconds: 5 + + # Additional labels + additionalLabels: { } + + # Additional annotations + additionalAnnotations: { } + + # Persistent Volume Claims configuration + pvcs: [ ] + # Example PVC configuration: + # pvcs: + # - name: data-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 10Gi + # mountPath: /data + # - name: logs-storage + # storageClassName: "ssh-hostpath" + # accessModes: + # - ReadWriteOnce + # size: 5Gi + # mountPath: /logs + diff --git a/helm/vendor/blockscout-stack/.helmignore b/helm/vendor/blockscout-stack/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/helm/vendor/blockscout-stack/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm/vendor/blockscout-stack/CHANGELOG.md b/helm/vendor/blockscout-stack/CHANGELOG.md new file mode 100644 index 00000000..d7cd8e3a --- /dev/null +++ b/helm/vendor/blockscout-stack/CHANGELOG.md @@ -0,0 +1,342 @@ +# ChangeLog + +## 3.3.0 + +### Feature + +- Redirect ingress now supports multiple domains (this is breaking change, if config.redirect is used) + +## 3.2.3 + +### Feature + +- Add ability to set deployment annotations + +## 3.2.2 + +### Fix + +- Adding missing stats pod annotations + +## 3.2.1 + +### Fix + +- Fixing nft resizer worker concurrency value in deployment + +## 3.2.0 + +### Feature + +- Adding parameter for nft resizer worker concurrency + +## 3.1.0 + +### Feature + +- Add ability to set base path for stats component + +## 3.0.0 + +### Feature + +- Change default docker registry for backend image from DockerHub to GH ContainerRegistry. Works only for 8.0.0 blockscout version, if you are using any previous version, do not update helm chart to 3.x +- API-only image removal, now configuration of API-only mode can be made in runtime with DISABLE_INDEXER=true +- Add prometheus rules for missing batch alerts + +## 2.2.0 + +### Feature + +- Shared certificate for frontend and backend if they run on same domain +- Adding possibility to redirect to frontend domain from any other domain (Helpfull when moving from one domain to another) + +## 2.1.1 + +### Fix + +- Adding imagePullSecret to backend migration job + +## 2.1.0 + +### Feature + +- Pointing sitemap.xml to frontend instance as it now served by frontend since 1.38.0 + +## 2.0.3 + +### Fix + +- Fixing sitemap.xml ingress pathType to pass NGINX validation + +## 2.0.2 + +### Fix + +- IPFS Gateway configuration for NFT resizer + +## 2.0.1 + +### Fix + +- Fix Monitoring when separateApi is enabled + +## 2.0.0 + +### Major Update + +- Updated app version to 7.0.0 +- Changed API healthcheck path (breaking change) +- Update supported Postgresql versions + +## 1.11.1 + +### Feature + +- Health path for blackbox exporter as parameter + +## 1.11.0 + +### Feature + +- Pinned stats version to 2.4 and made `STATS__BLOCKSCOUT_API_URL` env mandatory + +## 1.10.0 + +### Feature + +- ServiceMonitor for blackbox probing blockscout backend + +## 1.9.2 + +### Fix + +- Adding ONE missing variable (BLOCKSCOUT_HOST) to indexer pod when running separate from API. + +## 1.9.1 + +### Fix + +- Adding missing variables to indexer pod when running separate from API. + +## 1.9.0 + +### Feature + +- Adding support for NFT storage +- Adding security context for migration jobs + +## 1.8.0 + +### Feature + +- Custom volume mount for blockscout backend deployment + +## 1.7.1 + +### Fix + +- Service selector labels now support fullnameOverride variable instead of Release.Name + +## 1.7.0 + +### Feature + +- Whitelist for metrics paths to avoid public access in secure environment + +## 1.6.11 + +### Fix + +- Stats: Rewrite check for conditional env, allow 'main', and 'latest' tags. + +## 1.6.10 + +### Feature + +- Support new stats (2.2.0) with required env + +## 1.6.9 + +### Fix + +- Enable stats probes by default + +## 1.6.8 + +### Fix + +- Rollback to 1.6.6 as pre-install hook deletes secret because of bug in helm + +## 1.6.7 + +### Fix + +- Adding hook annotations for blockscout secret to be created with migration job on new installation + +## 1.6.6 + +### Fix + +- Replace MODE with APPLICATION_MODE variable. + +## 1.6.5 + +### Fix + +- Fixing setting MODE env variable for indexer application. + +## 1.6.4 + +### Feature + +- Adding MODE env variable for backend to distinguish API/indexer applications. + +## 1.6.3 + +### Feature + +- Adding extraEnv for user-ops indexer and stats services deployments + +### Fixes + +- Fixed a typo in stats and user-ops indexer, where ```replicaCount``` was named ```replicasCount``` and was thus marked as undefined. + +## 1.6.2 + +### Features + +- Adding services (name service, user-ops) configuration to .config section + +## 1.6.1 + +### Features + +- Expose new backend ingress path - public metrics + +## 1.6.0 + +### Features + +- Adding possibility to run separate api and indexer deployment. More information on this can be found [here](https://docs.blockscout.com/for-developers/information-and-settings/separate-indexer-web-app-and-api). The minimum required backend version is 6.6.0 + +## 1.5.1 + +### Fixes + +- Do not refer to `envFromSecret` when not defined for backend/frontend + +## 1.5.0 + +### Feature + +- Add PodMonitor for frontend + +## 1.4.4 + +### Feature + +- Add `extraEnv` and `envFrom` for backend/frontend to refer to an existing Secret/ConfigMap +- Create Secrets for backend/frontend only when data is specified + +## 1.4.3 + +### Fixes + +- Making stats configmap name unique + +## 1.4.2 + +### Fixes + +- Fixing stats url condition in frontend deployment + +## 1.4.1 + +### Fixes + +- Fixing stats volume attachment + +## 1.4.0 + +### Feature + +- Adding user-ops-indexer service + +## 1.3.4 + +### Fixes + +- Fixed custom TLS secret name reference for frontend and stats ingresses + +## 1.3.3 + +### Feature + +- Custom secret name for ingress TLS + +## 1.3.2 + +### Fixes + +- Dual token condition for gnosis chain + +## 1.3.1 + +### Feature + +- Adding dualToken parameter for networks like Gnosis Chain + +## 1.3.0 + +### Feature + +- Update blockscout app to 5.3.0 +- Decrease blockscout frontend initialDelaySeconds to 30 + +## 1.2.0 + +### Feature + +- Stats currency symbol now is passed from ```config.network.currency.symbol``` (Stats version 1.4.1 and above required) + +## 1.1.4 + +### Fixes + +- Adjust default frontend resource +- Increase default servicemonitor timeout + +## 1.1.3 + +### Fixes + +- Fixing path for auth + +## 1.1.2 + +### Fixes + +- Fixing path to /sitemap.xml + +## 1.1.1 + +### Fixes + +- Fixing path to /socket + +## 1.1.0 + +### Features + +- Updating ingress paths for frontend and backend (this change requires at least v1.9.0 of frontend and 5.2.2 for backend) + +## 1.0.4 + +### Features + +- Added README, CHANGELOG + +### Fixes + +- Fixed fixing prometheus serviceMonitor always on [issue](https://github.com/blockscout/helm-charts/issues/1) diff --git a/helm/vendor/blockscout-stack/Chart.yaml b/helm/vendor/blockscout-stack/Chart.yaml new file mode 100644 index 00000000..33b8fda7 --- /dev/null +++ b/helm/vendor/blockscout-stack/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +appVersion: 8.1.1 +description: A Helm chart to deploy Blockscout stack +name: blockscout-stack +type: application +version: 3.3.0 diff --git a/helm/vendor/blockscout-stack/README.md b/helm/vendor/blockscout-stack/README.md new file mode 100644 index 00000000..e59abe7e --- /dev/null +++ b/helm/vendor/blockscout-stack/README.md @@ -0,0 +1,51 @@ +# blockscout-stack + +![Version: 1.0.4](https://img.shields.io/badge/Version-1.0.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 5.2.2](https://img.shields.io/badge/AppVersion-5.2.2-informational?style=flat-square) + +A Helm chart to deploy Blockscout stack ([backend](https://github.com/blockscout/blockscout), [frontend](https://github.com/blockscout/frontend) and [stats](https://github.com/blockscout/blockscout-rs/tree/main/stats)) to kubernetes cluster + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3+ +- PostgreSQL version 12 to 17 +- Redis (if accounts blockscout feature is enabled) + +## Get Helm Repository Info + +```console +helm repo add blockscout https://blockscout.github.io/helm-charts +helm repo update +``` + +_See [`helm repo`](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Helm Chart + +```console +helm install [RELEASE_NAME] blockscout/blockscout-stack +``` +_See [configuration](#configuration) below._ +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ +## Uninstall Helm Chart + +```console +helm uninstall [RELEASE_NAME] +``` +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ +This removes all the Kubernetes components associated with the chart and deletes the release. + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] blockscout/blockscout-stack +``` + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: + +```console +helm show values blockscout/blockscout-stack +``` +This chart does not contain default values for required ENV variables, before running it you should read carefully docs for [blockscout](https://docs.blockscout.com/setup/env-variables), [frontend](https://github.com/blockscout/frontend/blob/main/docs/ENVS.md) and [stats](https://github.com/blockscout/blockscout-rs/tree/main/stats) diff --git a/helm/vendor/blockscout-stack/templates/_helpers.tpl b/helm/vendor/blockscout-stack/templates/_helpers.tpl new file mode 100644 index 00000000..b13a5922 --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "blockscout-stack.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "blockscout-stack.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "blockscout-stack.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "blockscout-stack.labels" -}} +helm.sh/chart: {{ include "blockscout-stack.chart" . }} +{{ include "blockscout-stack.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "blockscout-stack.selectorLabels" -}} +app.kubernetes.io/name: {{ include "blockscout-stack.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "blockscout-stack.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "blockscout-stack.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/helm/vendor/blockscout-stack/templates/blockscout-deployment.yaml b/helm/vendor/blockscout-stack/templates/blockscout-deployment.yaml new file mode 100644 index 00000000..fd74d320 --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/blockscout-deployment.yaml @@ -0,0 +1,570 @@ +{{- if .Values.blockscout.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "blockscout-stack.fullname" . }}-blockscout + labels: + app: {{ include "blockscout-stack.fullname" . }}-blockscout + {{- include "blockscout-stack.labels" . | nindent 4 }} + {{- with .Values.blockscout.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.blockscout.separateApi.enabled }} + replicas: {{ .Values.blockscout.separateApi.replicaCount }} + {{- else }} + replicas: {{ .Values.blockscout.replicaCount }} + {{- end }} + selector: + matchLabels: + app: {{ include "blockscout-stack.fullname" . }}-blockscout + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/blockscout-secret.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: {{ include "blockscout-stack.fullname" . }}-blockscout + {{- include "blockscout-stack.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "blockscout-stack.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.blockscout.podSecurityContext | nindent 8 }} + {{- if and .Values.blockscout.init.enabled (not .Values.blockscout.separateApi.enabled) }} + initContainers: + - name: init-migrations + securityContext: + {{- toYaml .Values.blockscout.securityContext | nindent 12 }} + image: "{{ .Values.blockscout.image.repository }}:{{ .Values.blockscout.image.tag }}" + resources: + {{- toYaml .Values.blockscout.resources | nindent 12 }} + {{- with .Values.blockscout.init.command }} + command: {{ . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.blockscout.init.args }} + args: {{ . | toYaml | nindent 12 }} + {{- end }} + env: + - name: PORT + value: "4000" + - name: CHAIN_ID + value: {{ .Values.config.network.id | quote }} + {{- if and .Values.config.network.currency.symbol (not .Values.config.network.currency.dualToken) }} + - name: COIN + value: {{ .Values.config.network.currency.symbol | quote }} + - name: COIN_NAME + value: {{ .Values.config.network.currency.symbol | quote }} + {{- end }} + {{- if .Values.config.account.enabled }} + - name: ACCOUNT_ENABLED + value: "true" + {{- end }} + {{- if .Values.config.testnet }} + - name: SHOW_TESTNET_LABEL + value: "true" + {{- end }} + {{- if .Values.frontend.enabled }} + - name: API_V2_ENABLED + value: "true" + {{- end }} + {{- if .Values.blockscout.ingress.enabled }} + - name: BLOCKSCOUT_HOST + value: {{ .Values.blockscout.ingress.hostname | quote }} + {{- end }} + {{- range $key, $value := .Values.blockscout.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range .Values.blockscout.extraEnv }} + - {{ toYaml . | nindent 12 | trim }} + {{- end }} + {{- if or .Values.blockscout.envFromSecret .Values.blockscout.envFrom }} + envFrom: + {{- if .Values.blockscout.envFromSecret }} + - secretRef: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-env + {{- end }} + {{- range .Values.blockscout.envFrom }} + - {{ toYaml . | nindent 12 | trim }} + {{- end }} + {{- end }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-blockscout + securityContext: + {{- toYaml .Values.blockscout.securityContext | nindent 12 }} + image: "{{ .Values.blockscout.image.repository }}:{{ .Values.blockscout.image.tag }}" + resources: + {{- if .Values.blockscout.separateApi.enabled }} + {{- toYaml .Values.blockscout.separateApi.resources | nindent 12 }} + {{- else}} + {{- toYaml .Values.blockscout.resources | nindent 12 }} + {{- end }} + imagePullPolicy: {{ .Values.blockscout.image.pullPolicy }} + {{- with .Values.blockscout.command }} + command: {{ . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.blockscout.args }} + args: {{ . | toYaml | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: 4000 + protocol: TCP + {{- if and .Values.blockscout.nftStorage.enabled (not .Values.blockscout.separateApi.enabled) }} + - name: epmd + containerPort: 4369 + protocol: TCP + - name: epmd-dyn-1 + containerPort: 9138 + protocol: TCP + - name: epmd-dyn-2 + containerPort: 9139 + protocol: TCP + {{- end }} + env: + {{- if .Values.blockscout.separateApi.enabled }} + - name: APPLICATION_MODE + value: "api" + {{- else }} + - name: APPLICATION_MODE + value: "all" + {{- end }} + - name: PORT + value: "4000" + {{- if and .Values.blockscout.nftStorage.enabled (not .Values.blockscout.separateApi.enabled) }} + - name: NFT_MEDIA_HANDLER_ENABLED + value: 'true' + - name: NFT_MEDIA_HANDLER_REMOTE_DISPATCHER_NODE_MODE_ENABLED + value: 'true' + - name: RELEASE_NODE + value: 'indexer@{{ .Release.Name }}-blockscout-svc.{{ .Release.Namespace }}.svc.cluster.local' + - name: RELEASE_DISTRIBUTION + value: 'name' + - name: NFT_MEDIA_HANDLER_BACKFILL_ENABLED + value: 'true' + - name: ERL_AFLAGS + value: "-kernel inet_dist_listen_min 9138 inet_dist_listen_max 9139" + {{- end }} + - name: CHAIN_ID + value: {{ .Values.config.network.id | quote }} + {{- if and .Values.config.network.currency.symbol (not .Values.config.network.currency.dualToken) }} + - name: COIN + value: {{ .Values.config.network.currency.symbol | quote }} + - name: COIN_NAME + value: {{ .Values.config.network.currency.symbol | quote }} + {{- end }} + {{- if .Values.config.account.enabled }} + - name: ACCOUNT_ENABLED + value: "true" + {{- end }} + {{- if .Values.config.testnet }} + - name: SHOW_TESTNET_LABEL + value: "true" + {{- end }} + {{- if .Values.frontend.enabled }} + - name: API_V2_ENABLED + value: "true" + {{- end }} + {{- if .Values.blockscout.ingress.enabled }} + - name: BLOCKSCOUT_HOST + value: {{ .Values.blockscout.ingress.hostname | quote }} + {{- end }} + {{- if .Values.config.nameService.enabled }} + - name: MICROSERVICE_BENS_ENABLED + value: 'true' + - name: MICROSERVICE_BENS_URL + value: {{ .Values.config.nameService.url }} + {{- end }} + {{- if .Values.userOpsIndexer.enabled }} + - name: MICROSERVICE_ACCOUNT_ABSTRACTION_ENABLED + value: 'true' + - name: MICROSERVICE_ACCOUNT_ABSTRACTION_URL + value: "https://{{ .Values.userOpsIndexer.ingress.hostname }}" + {{- end }} + {{- if .Values.blockscout.separateApi.enabled }} + - name: DISABLE_INDEXER + value: "true" + - name: DISABLE_WEBAPP + value: "true" + {{- end }} + {{- range $key, $value := .Values.blockscout.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range .Values.blockscout.extraEnv }} + - {{ toYaml . | nindent 12 | trim }} + {{- end }} + {{- if or .Values.blockscout.envFromSecret .Values.blockscout.envFrom }} + envFrom: + {{- if .Values.blockscout.envFromSecret }} + - secretRef: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-env + {{- end }} + {{- range .Values.blockscout.envFrom }} + - {{ toYaml . | nindent 12 | trim }} + {{- end }} + {{- end }} + {{- if .Values.blockscout.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.blockscout.readinessProbe.path }} + port: http + scheme: HTTP + {{- with .Values.blockscout.readinessProbe.params }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.blockscout.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.blockscout.livenessProbe.path }} + port: http + scheme: HTTP + {{- with .Values.blockscout.livenessProbe.params }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- with .Values.blockscout.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.blockscout.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.blockscout.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ . | toYaml | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +--- +{{- if .Values.blockscout.separateApi.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-indexer + labels: + app: {{ include "blockscout-stack.fullname" . }}-blockscout-indexer + {{- include "blockscout-stack.labels" . | nindent 4 }} + {{- with .Values.blockscout.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.blockscout.replicaCount }} + selector: + matchLabels: + app: {{ include "blockscout-stack.fullname" . }}-blockscout-indexer + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/blockscout-secret.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: {{ include "blockscout-stack.fullname" . }}-blockscout-indexer + {{- include "blockscout-stack.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "blockscout-stack.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.blockscout.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }}-blockscout + securityContext: + {{- toYaml .Values.blockscout.securityContext | nindent 12 }} + image: "{{ .Values.blockscout.image.repository }}:{{ .Values.blockscout.image.tag }}" + resources: + {{- toYaml .Values.blockscout.resources | nindent 12 }} + imagePullPolicy: {{ .Values.blockscout.image.pullPolicy }} + {{- with .Values.blockscout.command }} + command: {{ . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.blockscout.args }} + args: {{ . | toYaml | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: 4000 + protocol: TCP + {{- if .Values.blockscout.nftStorage.enabled }} + - name: epmd + containerPort: 4369 + protocol: TCP + - name: epmd-dyn-1 + containerPort: 9138 + protocol: TCP + - name: epmd-dyn-2 + containerPort: 9139 + protocol: TCP + {{- end }} + env: + - name: APPLICATION_MODE + value: "indexer" + - name: PORT + value: "4000" + - name: CHAIN_ID + value: {{ .Values.config.network.id | quote }} + {{- if .Values.config.account.enabled }} + - name: ACCOUNT_ENABLED + value: "true" + {{- end }} + {{- if .Values.config.testnet }} + - name: SHOW_TESTNET_LABEL + value: "true" + {{- end }} + {{- if .Values.blockscout.ingress.enabled }} + - name: BLOCKSCOUT_HOST + value: {{ .Values.blockscout.ingress.hostname | quote }} + {{- end }} + {{- if .Values.config.nameService.enabled }} + - name: MICROSERVICE_BENS_ENABLED + value: 'true' + - name: MICROSERVICE_BENS_URL + value: {{ .Values.config.nameService.url }} + {{- end }} + {{- if .Values.userOpsIndexer.enabled }} + - name: MICROSERVICE_ACCOUNT_ABSTRACTION_ENABLED + value: 'true' + - name: MICROSERVICE_ACCOUNT_ABSTRACTION_URL + value: "https://{{ .Values.userOpsIndexer.ingress.hostname }}" + {{- end }} + - name: DISABLE_WEBAPP + value: "true" + - name: API_V1_READ_METHODS_DISABLED + value: "true" + - name: API_V1_WRITE_METHODS_DISABLED + value: "true" + {{- if .Values.blockscout.nftStorage.enabled }} + - name: NFT_MEDIA_HANDLER_ENABLED + value: 'true' + - name: NFT_MEDIA_HANDLER_REMOTE_DISPATCHER_NODE_MODE_ENABLED + value: 'true' + - name: RELEASE_NODE + value: 'indexer@{{ .Release.Name }}-blockscout-indexer-svc.{{ .Release.Namespace }}.svc.cluster.local' + - name: RELEASE_DISTRIBUTION + value: 'name' + - name: NFT_MEDIA_HANDLER_BACKFILL_ENABLED + value: 'true' + - name: ERL_AFLAGS + value: "-kernel inet_dist_listen_min 9138 inet_dist_listen_max 9139" + {{- end }} + {{- if and .Values.config.network.currency.symbol (not .Values.config.network.currency.dualToken) }} + - name: COIN + value: {{ .Values.config.network.currency.symbol | quote }} + - name: COIN_NAME + value: {{ .Values.config.network.currency.symbol | quote }} + {{- end }} + {{- range $key, $value := .Values.blockscout.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range .Values.blockscout.extraEnv }} + - {{ toYaml . | nindent 12 | trim }} + {{- end }} + {{- if or .Values.blockscout.envFromSecret .Values.blockscout.envFrom }} + envFrom: + {{- if .Values.blockscout.envFromSecret }} + - secretRef: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-env + {{- end }} + {{- range .Values.blockscout.envFrom }} + - {{ toYaml . | nindent 12 | trim }} + {{- end }} + {{- end }} + {{- if .Values.blockscout.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.blockscout.readinessProbe.path }} + port: http + scheme: HTTP + {{- with .Values.blockscout.readinessProbe.params }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.blockscout.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.blockscout.livenessProbe.path }} + port: http + scheme: HTTP + {{- with .Values.blockscout.livenessProbe.params }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- with .Values.blockscout.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.blockscout.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.blockscout.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ . | toYaml | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} +--- +{{- if .Values.blockscout.nftStorage.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-resizer + labels: + app: {{ .Release.Name }}-blockscout-resizer + {{- include "blockscout-stack.labels" . | nindent 4 }} + {{- with .Values.blockscout.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.blockscout.replicaCount }} + selector: + matchLabels: + app: {{ .Release.Name }}-blockscout-resizer + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/blockscout-secret.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: {{ .Release.Name }}-blockscout-resizer + {{- include "blockscout-stack.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "blockscout-stack.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.blockscout.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }}-blockscout + securityContext: + {{- toYaml .Values.blockscout.securityContext | nindent 12 }} + image: "{{ .Values.blockscout.image.repository }}:{{ .Values.blockscout.image.tag }}" + resources: + {{- toYaml .Values.blockscout.nftStorage.resources | nindent 12 }} + imagePullPolicy: {{ .Values.blockscout.image.pullPolicy }} + {{- with .Values.blockscout.command }} + command: {{ . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.blockscout.args }} + args: {{ . | toYaml | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: 4000 + protocol: TCP + - name: epmd + containerPort: 4369 + protocol: TCP + - name: epmd-dyn-1 + containerPort: 9140 + protocol: TCP + - name: epmd-dyn-2 + containerPort: 9141 + protocol: TCP + env: + - name: PORT + value: "4000" + - name: NFT_MEDIA_TMP_DIR + value: './images' + - name: NFT_MEDIA_HANDLER_ENABLED + value: 'true' + - name: NFT_MEDIA_HANDLER_REMOTE_DISPATCHER_NODE_MODE_ENABLED + value: 'true' + - name: NFT_MEDIA_HANDLER_IS_WORKER + value: 'true' + - name: NFT_MEDIA_HANDLER_NODES_MAP + {{- if .Values.blockscout.separateApi.enabled }} + value: '{"indexer@{{ .Release.Name }}-blockscout-indexer-svc.{{ .Release.Namespace }}.svc.cluster.local": "/{{ .Release.Name }}"}' + {{- else }} + value: '{"indexer@{{ .Release.Name }}-blockscout-svc.{{ .Release.Namespace }}.svc.cluster.local": "/{{ .Release.Name }}"}' + {{- end }} + - name: NFT_MEDIA_HANDLER_WORKER_CONCURRENCY + value: {{ .Values.blockscout.nftStorage.workerConcurrency | quote }} + - name: RELEASE_NODE + value: 'worker@{{ .Release.Name }}-blockscout-resizer-svc.{{ .Release.Namespace }}.svc.cluster.local' + - name: RELEASE_DISTRIBUTION + value: 'name' + - name: ERL_AFLAGS + value: "-kernel inet_dist_listen_min 9140 inet_dist_listen_max 9141" + - name: IPFS_GATEWAY_URL + value: {{ .Values.blockscout.nftStorage.ipfsGateway | quote }} + envFrom: + - secretRef: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-resizer + {{- if .Values.blockscout.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.blockscout.readinessProbe.path }} + port: http + scheme: HTTP + {{- with .Values.blockscout.readinessProbe.params }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.blockscout.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.blockscout.livenessProbe.path }} + port: http + scheme: HTTP + {{- with .Values.blockscout.livenessProbe.params }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.blockscout.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ . | toYaml | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/helm/vendor/blockscout-stack/templates/blockscout-ingress.yaml b/helm/vendor/blockscout-stack/templates/blockscout-ingress.yaml new file mode 100644 index 00000000..5160ebea --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/blockscout-ingress.yaml @@ -0,0 +1,57 @@ +{{- if .Values.blockscout.enabled }} +{{- if .Values.blockscout.ingress.enabled }} +{{- $fullName := include "blockscout-stack.fullname" . -}} +{{- $svcPort := .Values.blockscout.service.port -}} +{{- if and .Values.blockscout.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }}-blockscout-ingress + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} + {{- with .Values.blockscout.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.blockscout.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.blockscout.ingress.className }} + {{- end }} + {{- if .Values.blockscout.ingress.tls.enabled }} + tls: + - hosts: + - {{ .Values.blockscout.ingress.hostname | quote }} + secretName: {{ .Values.blockscout.ingress.tls.secretName | default (printf "%s-blockscout-tls" $fullName) }} + {{- end }} + rules: + - host: {{ .Values.blockscout.ingress.hostname | quote }} + http: + paths: + {{- range .Values.blockscout.ingress.paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }}-blockscout-svc + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }}-blockscout-svc + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/blockscout-migration-job.yaml b/helm/vendor/blockscout-stack/templates/blockscout-migration-job.yaml new file mode 100644 index 00000000..2e46667d --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/blockscout-migration-job.yaml @@ -0,0 +1,100 @@ +{{- if .Values.blockscout.separateApi.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-migrations + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-weight: "-5" + helm.sh/hook-delete-policy: hook-succeeded + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + template: + spec: + securityContext: + {{- toYaml .Values.blockscout.podSecurityContext | nindent 8 }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: blockscout-migrations + image: "{{ .Values.blockscout.image.repository }}:{{ .Values.blockscout.image.tag }}" + imagePullPolicy: {{ .Values.blockscout.image.pullPolicy }} + securityContext: + {{- toYaml .Values.blockscout.securityContext | nindent 12 }} + {{- with .Values.blockscout.init.command }} + command: {{ . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.blockscout.init.args }} + args: {{ . | toYaml | nindent 12 }} + {{- end }} + env: + - name: PORT + value: "4000" + - name: CHAIN_ID + value: {{ .Values.config.network.id | quote }} + {{- if and .Values.config.network.currency.symbol (not .Values.config.network.currency.dualToken) }} + - name: COIN + value: {{ .Values.config.network.currency.symbol | quote }} + - name: COIN_NAME + value: {{ .Values.config.network.currency.symbol | quote }} + {{- end }} + {{- if .Values.config.account.enabled }} + - name: ACCOUNT_ENABLED + value: "true" + {{- end }} + {{- if .Values.config.testnet }} + - name: SHOW_TESTNET_LABEL + value: "true" + {{- end }} + {{- if .Values.frontend.enabled }} + - name: API_V2_ENABLED + value: "true" + {{- end }} + {{- if .Values.blockscout.ingress.enabled }} + - name: BLOCKSCOUT_HOST + value: {{ .Values.blockscout.ingress.hostname | quote }} + {{- end }} + {{- if .Values.blockscout.separateApi.enabled }} + - name: DISABLE_INDEXER + value: "true" + - name: DISABLE_WEBAPP + value: "true" + {{- end }} + {{- range $key, $value := .Values.blockscout.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range .Values.blockscout.extraEnv }} + - {{ toYaml . | nindent 12 | trim }} + {{- end }} + {{- if or .Values.blockscout.envFromSecret .Values.blockscout.envFrom }} + envFrom: + {{- if .Values.blockscout.envFromSecret }} + - secretRef: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-env + {{- end }} + {{- range .Values.blockscout.envFrom }} + - {{ toYaml . | nindent 12 | trim }} + {{- end }} + {{- end }} + restartPolicy: OnFailure + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.blockscout.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ . | toYaml | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + backoffLimit: 4 +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/blockscout-prometheus-rules.yaml b/helm/vendor/blockscout-stack/templates/blockscout-prometheus-rules.yaml new file mode 100644 index 00000000..486abc85 --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/blockscout-prometheus-rules.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.config.prometheus.enabled .Values.config.prometheus.rules.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "blockscout-stack.fullname" . }} + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + groups: + - name: {{ include "blockscout-stack.fullname" . }}-blockscout + rules: + - alert: "{{`{{ $labels.job }}`}} has no new batches" + expr: time() - latest_batch_timestamp{job="{{ include "blockscout-stack.fullname" . }}"{{- if .Values.blockscout.separateApi.enabled }},service="{{ include "blockscout-stack.fullname" . }}-blockscout-indexer-svc"{{- end }}} > {{ .Values.config.prometheus.rules.batchTimeMultiplier }}*batch_average_time{job="{{ include "blockscout-stack.fullname" . }}"{{- if .Values.blockscout.separateApi.enabled }},service="{{ include "blockscout-stack.fullname" . }}-blockscout-indexer-svc"{{- end }}} + for: 5m + labels: + severity: warning + service: "blockscout-stack" + {{- with .Values.config.prometheus.rules.labels }} + {{- toYaml . | nindent 10 }} + {{- end }} + annotations: + summary: "Instance {{`{{ $labels.job }}`}} in namespace {{`{{ $labels.namespace }}`}} has no new batches" + description: "Latest batch at {{`{{ $labels.job }}`}} was for more than {{ .Values.config.prometheus.rules.batchTimeMultiplier }} average batch time ago." +{{- end }} diff --git a/helm/vendor/blockscout-stack/templates/blockscout-secret.yaml b/helm/vendor/blockscout-stack/templates/blockscout-secret.yaml new file mode 100644 index 00000000..3f2483fb --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/blockscout-secret.yaml @@ -0,0 +1,34 @@ +{{- if and .Values.blockscout.enabled .Values.blockscout.envFromSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-env + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +type: Opaque +data: +{{- if .Values.blockscout.nftStorage.enabled }} + RELEASE_COOKIE: {{ .Values.blockscout.nftStorage.cookie | b64enc }} + NFT_MEDIA_HANDLER_AWS_PUBLIC_BUCKET_URL: {{ .Values.blockscout.nftStorage.bucketUrl | b64enc }} +{{- end }} +{{- range $key, $value := .Values.blockscout.envFromSecret }} + {{ $key }}: {{ $value | b64enc }} +{{- end }} +--- +{{- if .Values.blockscout.nftStorage.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-resizer + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +type: Opaque +data: + RELEASE_COOKIE: {{ .Values.blockscout.nftStorage.cookie | b64enc }} + NFT_MEDIA_HANDLER_AWS_BUCKET_HOST: {{ .Values.blockscout.nftStorage.bucketHost | b64enc }} + NFT_MEDIA_HANDLER_AWS_ACCESS_KEY_ID: {{ .Values.blockscout.nftStorage.accessKey | b64enc }} + NFT_MEDIA_HANDLER_AWS_SECRET_ACCESS_KEY: {{ .Values.blockscout.nftStorage.secretKey | b64enc }} + NFT_MEDIA_HANDLER_AWS_BUCKET_NAME: {{ .Values.blockscout.nftStorage.bucketName | b64enc }} + NFT_MEDIA_HANDLER_AWS_PUBLIC_BUCKET_URL: {{ .Values.blockscout.nftStorage.bucketUrl | b64enc }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/blockscout-service.yaml b/helm/vendor/blockscout-stack/templates/blockscout-service.yaml new file mode 100644 index 00000000..0417d3e6 --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/blockscout-service.yaml @@ -0,0 +1,92 @@ +{{- if .Values.blockscout.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-svc + labels: + app: {{ include "blockscout-stack.fullname" . }}-blockscout-svc + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + type: {{ .Values.blockscout.service.type }} + ports: + - port: {{ .Values.blockscout.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if and .Values.blockscout.nftStorage.enabled (not .Values.blockscout.separateApi.enabled) }} + - port: 4369 + targetPort: epmd + protocol: TCP + name: epmd + - port: 9138 + targetPort: 9138 + protocol: TCP + name: epmd-dyn-1 + - port: 9139 + targetPort: 9139 + protocol: TCP + name: epmd-dyn-2 + {{- end }} + selector: + app: {{ include "blockscout-stack.fullname" . }}-blockscout +--- +{{- if .Values.blockscout.separateApi.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-indexer-svc + labels: + app: {{ .Release.Name }}-blockscout-indexer-svc + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + type: {{ .Values.blockscout.service.type }} + ports: + - port: {{ .Values.blockscout.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if .Values.blockscout.nftStorage.enabled }} + - port: 4369 + targetPort: epmd + protocol: TCP + name: epmd + - port: 9138 + targetPort: 9138 + protocol: TCP + name: epmd-dyn-1 + - port: 9139 + targetPort: 9139 + protocol: TCP + name: epmd-dyn-2 + {{- end }} + selector: + app: {{ .Release.Name }}-blockscout-indexer +{{- end }} +--- +{{- if .Values.blockscout.nftStorage.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-resizer-svc + labels: + app: {{ .Release.Name }}-blockscout-resizer-svc + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + type: {{ .Values.blockscout.service.type }} + ports: + - port: 4369 + targetPort: epmd + protocol: TCP + name: epmd + - port: 9140 + targetPort: 9140 + protocol: TCP + name: epmd-dyn-1 + - port: 9141 + targetPort: 9141 + protocol: TCP + name: epmd-dyn-2 + selector: + app: {{ .Release.Name }}-blockscout-resizer +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/blockscout-servicemonitor-blackbox.yaml b/helm/vendor/blockscout-stack/templates/blockscout-servicemonitor-blackbox.yaml new file mode 100644 index 00000000..5ba4b46a --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/blockscout-servicemonitor-blackbox.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.blockscout.ingress.enabled .Values.config.prometheus.blackbox.enabled }} +{{- $url := printf "https://%s%s" .Values.blockscout.ingress.hostname .Values.config.prometheus.blackbox.path }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-bb + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + endpoints: + - scrapeTimeout: 5s + port: http + path: "/probe" + interval: 15s + params: + module: + - http_2xx + target: + - {{ $url }} + metricRelabelings: + - targetLabel: instance + replacement: {{ $url }} + - targetLabel: target + replacement: {{ include "blockscout-stack.fullname" . }} + namespaceSelector: + any: true + selector: + matchLabels: + app.kubernetes.io/name: prometheus-blackbox-exporter + app.kubernetes.io/instance: prometheus-blackbox-exporter +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/blockscout-servicemonitor.yaml b/helm/vendor/blockscout-stack/templates/blockscout-servicemonitor.yaml new file mode 100644 index 00000000..6e757fee --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/blockscout-servicemonitor.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.blockscout.enabled .Values.config.prometheus.enabled }} +{{- if .Values.blockscout.separateApi.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-indexer + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + endpoints: + - scrapeTimeout: 30s + port: http + path: /metrics + metricRelabelings: + - targetLabel: job + replacement: {{ include "blockscout-stack.fullname" . }} + selector: + matchLabels: + app: {{ include "blockscout-stack.fullname" . }}-blockscout-indexer-svc +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-api + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + endpoints: + - scrapeTimeout: 30s + port: http + path: /metrics + metricRelabelings: + - targetLabel: job + replacement: {{ include "blockscout-stack.fullname" . }} + selector: + matchLabels: + app: {{ include "blockscout-stack.fullname" . }}-blockscout-svc +{{- else }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "blockscout-stack.fullname" . }}-blockscout-svm + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + endpoints: + - scrapeTimeout: 30s + port: http + path: /metrics + metricRelabelings: + - targetLabel: job + replacement: {{ include "blockscout-stack.fullname" . }} + selector: + matchLabels: + app: {{ include "blockscout-stack.fullname" . }}-blockscout-svc +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/frontend-deployment.yaml b/helm/vendor/blockscout-stack/templates/frontend-deployment.yaml new file mode 100644 index 00000000..b586e802 --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/frontend-deployment.yaml @@ -0,0 +1,148 @@ +{{- if .Values.frontend.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "blockscout-stack.fullname" . }}-frontend + labels: + app: {{ include "blockscout-stack.fullname" . }}-frontend + {{- include "blockscout-stack.labels" . | nindent 4 }} + {{- with .Values.frontend.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.frontend.replicaCount }} + selector: + matchLabels: + app: {{ include "blockscout-stack.fullname" . }}-frontend + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/frontend-secret.yaml") . | sha256sum }} + {{- if eq .Values.frontend.image.pullPolicy "Always" }} + releaseTime: {{ dateInZone "2006-01-02 15:04:05Z" (now) "UTC"| quote }} + {{- end }} + labels: + app: {{ include "blockscout-stack.fullname" . }}-frontend + {{- include "blockscout-stack.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "blockscout-stack.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.frontend.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }}-frontend + securityContext: + {{- toYaml .Values.frontend.securityContext | nindent 12 }} + image: "{{ .Values.frontend.image.repository }}:{{ .Values.frontend.image.tag }}" + resources: + {{- toYaml .Values.frontend.resources | nindent 12 }} + imagePullPolicy: {{ .Values.frontend.image.pullPolicy }} + ports: + - name: http + containerPort: 3000 + protocol: TCP + env: + - name: NEXT_PUBLIC_NETWORK_ID + value: {{ .Values.config.network.id | quote }} + - name: NEXT_PUBLIC_NETWORK_NAME + value: {{ .Values.config.network.name | quote }} + - name: NEXT_PUBLIC_NETWORK_SHORT_NAME + value: {{ .Values.config.network.shortname | quote }} + - name: NEXT_PUBLIC_NETWORK_CURRENCY_NAME + value: {{ .Values.config.network.currency.name | quote }} + {{- if and .Values.config.network.currency.symbol (not .Values.config.network.currency.dualToken) }} + - name: NEXT_PUBLIC_NETWORK_CURRENCY_SYMBOL + value: {{ .Values.config.network.currency.symbol | quote }} + {{- end }} + - name: NEXT_PUBLIC_NETWORK_CURRENCY_DECIMALS + value: {{ .Values.config.network.currency.decimals | quote }} + {{- if .Values.config.account.enabled }} + - name: NEXT_PUBLIC_IS_ACCOUNT_SUPPORTED + value: "true" + {{- end }} + {{- if .Values.config.testnet }} + - name: NEXT_PUBLIC_IS_TESTNET + value: "true" + {{- end }} + {{- if .Values.blockscout.ingress.enabled }} + - name: NEXT_PUBLIC_API_HOST + value: {{ .Values.blockscout.ingress.hostname | quote }} + {{- end }} + {{- if .Values.frontend.ingress.enabled }} + - name: NEXT_PUBLIC_APP_HOST + value: {{ .Values.frontend.ingress.hostname | quote }} + {{- end }} + {{- if and .Values.stats.ingress.enabled .Values.stats.enabled }} + - name: NEXT_PUBLIC_STATS_API_HOST + value: "https://{{ .Values.stats.ingress.hostname }}" + {{- if ne .Values.stats.basePath "/" }} + - name: NEXT_PUBLIC_STATS_API_BASE_PATH + value: {{ .Values.stats.basePath | quote }} + {{- end }} + {{- end }} + {{- if .Values.config.prometheus.enabled }} + - name: PROMETHEUS_METRICS_ENABLED + value: "true" + {{- end }} + {{- if .Values.config.nameService.enabled }} + - name: NEXT_PUBLIC_NAME_SERVICE_API_HOST + value: {{ .Values.config.nameService.url }} + {{- end }} + {{- if .Values.userOpsIndexer.enabled }} + - name: NEXT_PUBLIC_HAS_USER_OPS + value: 'true' + {{- end }} + {{- range $key, $value := .Values.frontend.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range .Values.frontend.extraEnv }} + - {{ toYaml . | nindent 12 | trim }} + {{- end }} + {{- if or .Values.frontend.envFromSecret .Values.frontend.envFrom }} + envFrom: + {{- if .Values.frontend.envFromSecret }} + - secretRef: + name: {{ include "blockscout-stack.fullname" . }}-frontend-env + {{- end }} + {{- range .Values.frontend.envFrom }} + - {{ toYaml . | nindent 12 | trim }} + {{- end }} + {{- end }} + {{- if .Values.frontend.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.frontend.readinessProbe.path }} + port: http + scheme: HTTP + {{- with .Values.frontend.readinessProbe.params }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.frontend.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.frontend.livenessProbe.path }} + port: http + scheme: HTTP + {{- with .Values.frontend.livenessProbe.params }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/helm/vendor/blockscout-stack/templates/frontend-ingress.yaml b/helm/vendor/blockscout-stack/templates/frontend-ingress.yaml new file mode 100644 index 00000000..d5491cec --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/frontend-ingress.yaml @@ -0,0 +1,115 @@ +{{- if .Values.frontend.enabled }} +{{- if .Values.frontend.ingress.enabled }} +{{- $fullName := include "blockscout-stack.fullname" . -}} +{{- $svcPort := .Values.frontend.service.port -}} +{{- if and .Values.frontend.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }}-frontend-ingress + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} + {{- with .Values.frontend.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.frontend.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.frontend.ingress.className }} + {{- end }} + {{- if .Values.frontend.ingress.tls.enabled }} + tls: + - hosts: + - {{ .Values.frontend.ingress.hostname | quote }} + {{- if eq .Values.frontend.ingress.hostname .Values.blockscout.ingress.hostname}} + secretName: {{ .Values.blockscout.ingress.tls.secretName | default (printf "%s-blockscout-tls" $fullName) }} + {{- else }} + secretName: {{ .Values.frontend.ingress.tls.secretName | default (printf "%s-frontend-tls" $fullName) }} + {{- end }} + {{- end }} + rules: + - host: {{ .Values.frontend.ingress.hostname | quote }} + http: + paths: + {{- range .Values.frontend.ingress.paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- else }} + pathType: Prefix + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }}-frontend-svc + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }}-frontend-svc + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +--- +{{- if .Values.config.prometheus.ingressWhitelist.enabled }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }}-frontend-metrics-ingress + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} + annotations: + {{- with .Values.config.prometheus.ingressWhitelist.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.frontend.ingress.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.frontend.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.frontend.ingress.className }} + {{- end }} + {{- if .Values.frontend.ingress.tls.enabled }} + tls: + - hosts: + - {{ .Values.frontend.ingress.hostname | quote }} + {{- if eq .Values.frontend.ingress.hostname .Values.blockscout.ingress.hostname }} + secretName: {{ .Values.blockscout.ingress.tls.secretName | default (printf "%s-blockscout-tls" $fullName) }} + {{- else }} + secretName: {{ .Values.frontend.ingress.tls.secretName | default (printf "%s-frontend-tls" $fullName) }} + {{- end }} + {{- end }} + rules: + - host: {{ .Values.frontend.ingress.hostname | quote }} + http: + paths: + - path: /node-api/metrics + pathType: Prefix + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }}-frontend-svc + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }}-frontend-svc + servicePort: {{ $svcPort }} + {{- end }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/frontend-podmonitor.yaml b/helm/vendor/blockscout-stack/templates/frontend-podmonitor.yaml new file mode 100644 index 00000000..0348ad2c --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/frontend-podmonitor.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.frontend.enabled .Values.config.prometheus.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "blockscout-stack.fullname" . }}-frontend-podmonitor + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + podMetricsEndpoints: + - port: http + path: /node-api/metrics + selector: + matchLabels: + app: {{ include "blockscout-stack.fullname" . }}-frontend +{{- end }} diff --git a/helm/vendor/blockscout-stack/templates/frontend-secret.yaml b/helm/vendor/blockscout-stack/templates/frontend-secret.yaml new file mode 100644 index 00000000..381ce4aa --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/frontend-secret.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.frontend.enabled .Values.frontend.envFromSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "blockscout-stack.fullname" . }}-frontend-env + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $value := .Values.frontend.envFromSecret }} + {{ $key }}: {{ $value | b64enc }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/frontend-service.yaml b/helm/vendor/blockscout-stack/templates/frontend-service.yaml new file mode 100644 index 00000000..a040251b --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/frontend-service.yaml @@ -0,0 +1,18 @@ +{{- if .Values.frontend.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "blockscout-stack.fullname" . }}-frontend-svc + labels: + app: {{ include "blockscout-stack.fullname" . }}-frontend-svc + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + type: {{ .Values.frontend.service.type }} + ports: + - port: {{ .Values.frontend.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + app: {{ include "blockscout-stack.fullname" . }}-frontend +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/redirect-ingress.yaml b/helm/vendor/blockscout-stack/templates/redirect-ingress.yaml new file mode 100644 index 00000000..060dd6f1 --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/redirect-ingress.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.config.redirect.enabled (eq .Values.frontend.ingress.hostname .Values.blockscout.ingress.hostname) -}} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "blockscout-stack.fullname" . }}-redirect-ingress + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} + annotations: + {{- tpl (.Values.config.redirect.ingress.annotations | toYaml) . | nindent 4 }} +spec: + {{- with .Values.config.redirect.ingress.className }} + ingressClassName: {{ . }} + {{- end }} + {{- if .Values.config.redirect.ingress.tls.enabled }} + tls: + - hosts: + {{- range .Values.config.redirect.hostnames }} + - {{ . }} + {{- end }} + secretName: {{ include "blockscout-stack.fullname" . }}-redirect-ingress-tls + {{- end }} + rules: + {{- range .Values.config.redirect.hostnames }} + - host: {{ . | quote }} + {{- end }} +{{- end }} diff --git a/helm/vendor/blockscout-stack/templates/serviceaccount.yaml b/helm/vendor/blockscout-stack/templates/serviceaccount.yaml new file mode 100644 index 00000000..93766aee --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "blockscout-stack.serviceAccountName" . }} + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/helm/vendor/blockscout-stack/templates/stats-cm.yaml b/helm/vendor/blockscout-stack/templates/stats-cm.yaml new file mode 100644 index 00000000..72c7933e --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/stats-cm.yaml @@ -0,0 +1,14 @@ +{{- if .Values.stats.enabled }} +{{- if .Values.stats.files.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "blockscout-stack.fullname" . }}-stats-cm + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +data: + {{- range $key, $value := .Values.stats.files.list }} + {{ $key }}: {{ toYaml $value | indent 2 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/helm/vendor/blockscout-stack/templates/stats-deployment.yaml b/helm/vendor/blockscout-stack/templates/stats-deployment.yaml new file mode 100644 index 00000000..efd1e26c --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/stats-deployment.yaml @@ -0,0 +1,130 @@ +{{- if .Values.stats.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "blockscout-stack.fullname" . }}-stats + labels: + app: {{ include "blockscout-stack.fullname" . }}-stats + {{- include "blockscout-stack.labels" . | nindent 4 }} + {{- with .Values.stats.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.stats.replicaCount }} + selector: + matchLabels: + app: {{ include "blockscout-stack.fullname" . }}-stats + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/stats-secret.yaml") . | sha256sum }} + {{- if eq .Values.stats.image.pullPolicy "Always" }} + releaseTime: {{ dateInZone "2006-01-02 15:04:05Z" (now) "UTC"| quote }} + {{- end }} + {{- with .Values.stats.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: {{ include "blockscout-stack.fullname" . }}-stats + {{- include "blockscout-stack.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "blockscout-stack.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.stats.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }}-stats + securityContext: + {{- toYaml .Values.stats.securityContext | nindent 12 }} + image: "{{ .Values.stats.image.repository }}:{{ .Values.stats.image.tag }}" + resources: + {{- toYaml .Values.stats.resources | nindent 12 }} + imagePullPolicy: {{ .Values.stats.image.pullPolicy }} + ports: + - name: http + containerPort: 8050 + protocol: TCP + - name: http-metrics + containerPort: 6060 + protocol: TCP + env: + - name: STATS__BLOCKSCOUT_API_URL + value: "https://{{ .Values.blockscout.ingress.hostname }}" + {{- if .Values.config.network.currency.symbol }} + - name: STATS_CHARTS__TEMPLATE_VALUES__NATIVE_COIN_SYMBOL + value: {{ .Values.config.network.currency.symbol | quote }} + {{- end }} + {{- if and (ne .Values.stats.basePath "/" ) (not (hasKey .Values.stats.env "STATS__SERVER__HTTP__BASE_PATH")) }} + - name: STATS__SERVER__HTTP__BASE_PATH + value: "{{ .Values.stats.basePath }}" + {{- end }} + {{- range $key, $value := .Values.stats.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range .Values.stats.extraEnv }} + - {{ toYaml . | nindent 12 | trim }} + {{- end }} + envFrom: + - secretRef: + name: {{ include "blockscout-stack.fullname" . }}-stats-env + {{- if .Values.stats.files.enabled }} + volumeMounts: + {{- range $key, $value := .Values.stats.files.list }} + - mountPath: {{ $.Values.stats.files.mountPath }}/{{ $key }} + name: stats-cm + subPath: {{ $key }} + {{- end }} + {{- end }} + {{- if .Values.stats.readinessProbe.enabled }} + readinessProbe: + httpGet: + {{- if eq .Values.stats.basePath "/" }} + path: {{ .Values.stats.readinessProbe.path }} + {{- else }} + path: "{{ .Values.stats.basePath }}{{ .Values.stats.livenessProbe.path }}" + {{- end }} + port: http + scheme: HTTP + {{- with .Values.stats.readinessProbe.params }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.stats.livenessProbe.enabled }} + livenessProbe: + httpGet: + {{- if eq .Values.stats.basePath "/" }} + path: {{ .Values.stats.livenessProbe.path }} + {{- else }} + path: "{{ .Values.stats.basePath }}{{ .Values.stats.livenessProbe.path }}" + {{- end }} + port: http + scheme: HTTP + {{- with .Values.stats.livenessProbe.params }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.stats.files.enabled }} + volumes: + - configMap: + name: {{ include "blockscout-stack.fullname" . }}-stats-cm + defaultMode: 0777 + name: stats-cm + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/helm/vendor/blockscout-stack/templates/stats-ingress.yaml b/helm/vendor/blockscout-stack/templates/stats-ingress.yaml new file mode 100644 index 00000000..68b1e65a --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/stats-ingress.yaml @@ -0,0 +1,59 @@ +{{- if .Values.stats.enabled }} +{{- if .Values.stats.ingress.enabled }} +{{- $fullName := include "blockscout-stack.fullname" . -}} +{{- $svcPort := .Values.stats.service.port -}} +{{- if and .Values.stats.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }}-stats-ingress + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} + {{- with .Values.stats.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.stats.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.stats.ingress.className }} + {{- end }} + {{- if .Values.stats.ingress.tls.enabled }} + tls: + - hosts: + - {{ .Values.stats.ingress.hostname | quote }} + secretName: {{ .Values.stats.ingress.tls.secretName | default (printf "%s-stats-tls" $fullName) }} + {{- end }} + rules: + - host: {{ .Values.stats.ingress.hostname | quote }} + http: + paths: + {{- range .Values.stats.ingress.paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- else }} + pathType: Prefix + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }}-stats-svc + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }}-stats-svc + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/stats-secret.yaml b/helm/vendor/blockscout-stack/templates/stats-secret.yaml new file mode 100644 index 00000000..2e1c6a1b --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/stats-secret.yaml @@ -0,0 +1,13 @@ +{{- if .Values.stats.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "blockscout-stack.fullname" . }}-stats-env + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $value := .Values.stats.envFromSecret }} + {{ $key }}: {{ $value | b64enc }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/stats-service.yaml b/helm/vendor/blockscout-stack/templates/stats-service.yaml new file mode 100644 index 00000000..318e9be5 --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/stats-service.yaml @@ -0,0 +1,22 @@ +{{- if .Values.stats.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "blockscout-stack.fullname" . }}-stats-svc + labels: + app: {{ include "blockscout-stack.fullname" . }}-stats-svc + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + type: {{ .Values.stats.service.type }} + ports: + - port: {{ .Values.stats.service.port }} + targetPort: http + protocol: TCP + name: http + - port: {{ .Values.stats.service.metricsPort }} + targetPort: http-metrics + protocol: TCP + name: http-metrics + selector: + app: {{ include "blockscout-stack.fullname" . }}-stats +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/stats-servicemonitor.yaml b/helm/vendor/blockscout-stack/templates/stats-servicemonitor.yaml new file mode 100644 index 00000000..1b7a095b --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/stats-servicemonitor.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.stats.enabled .Values.config.prometheus.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "blockscout-stack.fullname" . }}-stats + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + endpoints: + - scrapeTimeout: 10s + port: http-metrics + path: /metrics + selector: + matchLabels: + app: {{ include "blockscout-stack.fullname" . }}-stats-svc +{{- end }} diff --git a/helm/vendor/blockscout-stack/templates/user-ops-indexer-deployment.yaml b/helm/vendor/blockscout-stack/templates/user-ops-indexer-deployment.yaml new file mode 100644 index 00000000..be5ca366 --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/user-ops-indexer-deployment.yaml @@ -0,0 +1,113 @@ +{{- if .Values.userOpsIndexer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer + labels: + app: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer + {{- include "blockscout-stack.labels" . | nindent 4 }} + {{- with .Values.userOpsIndexer.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.userOpsIndexer.replicaCount }} + selector: + matchLabels: + app: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/user-ops-indexer-secret.yaml") . | sha256sum }} + {{- if eq .Values.userOpsIndexer.image.pullPolicy "Always" }} + releaseTime: {{ dateInZone "2006-01-02 15:04:05Z" (now) "UTC"| quote }} + {{- end }} + labels: + app: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer + {{- include "blockscout-stack.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "blockscout-stack.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.userOpsIndexer.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }}-user-ops-indexer + securityContext: + {{- toYaml .Values.userOpsIndexer.securityContext | nindent 12 }} + image: "{{ .Values.userOpsIndexer.image.repository }}:{{ .Values.userOpsIndexer.image.tag }}" + resources: + {{- toYaml .Values.userOpsIndexer.resources | nindent 12 }} + imagePullPolicy: {{ .Values.userOpsIndexer.image.pullPolicy }} + ports: + - name: http + containerPort: 8050 + protocol: TCP + {{- if .Values.userOpsIndexer.service.grpc.enabled }} + - name: grpc + containerPort: 8051 + protocol: TCP + {{- end }} + - name: http-metrics + containerPort: 6060 + protocol: TCP + env: + - name: USER_OPS_INDEXER__SERVER__HTTP__ENABLED + value: 'true' + - name: USER_OPS_INDEXER__SERVER__HTTP__ADDR + value: '0.0.0.0:8050' + - name: USER_OPS_INDEXER__SERVER__GRPC__ENABLED + value: {{ .Values.userOpsIndexer.service.grpc.enabled | quote }} + - name: USER_OPS_INDEXER__SERVER__GRPC__ADDR + value: '0.0.0.0:8051' + - name: USER_OPS_INDEXER__METRICS__ENABLED + value: 'true' + - name: USER_OPS_INDEXER__METRICS__ADDR + value: '0.0.0.0:6060' + - name: USER_OPS_INDEXER__METRICS__ROUTE + value: '/metrics' + {{- range $key, $value := .Values.userOpsIndexer.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range .Values.userOpsIndexer.extraEnv }} + - {{ toYaml . | nindent 12 | trim }} + {{- end }} + envFrom: + - secretRef: + name: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer-env + {{- if .Values.userOpsIndexer.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.userOpsIndexer.readinessProbe.path }} + port: http + scheme: HTTP + {{- with .Values.userOpsIndexer.readinessProbe.params }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.userOpsIndexer.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.userOpsIndexer.livenessProbe.path }} + port: http + scheme: HTTP + {{- with .Values.userOpsIndexer.livenessProbe.params }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/user-ops-indexer-ingress.yaml b/helm/vendor/blockscout-stack/templates/user-ops-indexer-ingress.yaml new file mode 100644 index 00000000..844ad4d3 --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/user-ops-indexer-ingress.yaml @@ -0,0 +1,59 @@ +{{- if .Values.userOpsIndexer.enabled }} +{{- if .Values.userOpsIndexer.ingress.enabled }} +{{- $fullName := include "blockscout-stack.fullname" . -}} +{{- $svcPort := .Values.userOpsIndexer.service.port -}} +{{- if and .Values.userOpsIndexer.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }}-user-ops-indexer-ingress + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} + {{- with .Values.userOpsIndexer.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.userOpsIndexer.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.userOpsIndexer.ingress.className }} + {{- end }} + {{- if .Values.userOpsIndexer.ingress.tls.enabled }} + tls: + - hosts: + - {{ .Values.userOpsIndexer.ingress.hostname | quote }} + secretName: {{ .Values.userOpsIndexer.ingress.tls.secretName | default (printf "%s-user-ops-indexer-tls" $fullName) }} + {{- end }} + rules: + - host: {{ .Values.userOpsIndexer.ingress.hostname | quote }} + http: + paths: + {{- range .Values.userOpsIndexer.ingress.paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- else }} + pathType: Prefix + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }}-user-ops-indexer-svc + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }}-user-ops-indexer-svc + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/user-ops-indexer-secret.yaml b/helm/vendor/blockscout-stack/templates/user-ops-indexer-secret.yaml new file mode 100644 index 00000000..e54593a4 --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/user-ops-indexer-secret.yaml @@ -0,0 +1,13 @@ +{{- if .Values.userOpsIndexer.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer-env + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $value := .Values.userOpsIndexer.envFromSecret }} + {{ $key }}: {{ $value | b64enc }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/user-ops-indexer-service.yaml b/helm/vendor/blockscout-stack/templates/user-ops-indexer-service.yaml new file mode 100644 index 00000000..ee1e28b6 --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/user-ops-indexer-service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.userOpsIndexer.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer-svc + labels: + app: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer-svc + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + type: {{ .Values.userOpsIndexer.service.type }} + ports: + - port: {{ .Values.userOpsIndexer.service.port }} + targetPort: http + protocol: TCP + name: http + - port: {{ .Values.userOpsIndexer.service.metricsPort }} + targetPort: http-metrics + protocol: TCP + name: http-metrics + selector: + app: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer +--- +{{- if .Values.userOpsIndexer.service.grpc.enabled }} +kind: Service +apiVersion: v1 +metadata: + name: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer-grpc-svc + labels: + app: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer-svc + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + type: {{ .Values.userOpsIndexer.service.type }} + ports: + - port: {{ .Values.userOpsIndexer.service.grpc.port }} + targetPort: grpc + protocol: TCP + name: grpc + selector: + app: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-stack/templates/user-ops-indexer-servicemonitor.yaml b/helm/vendor/blockscout-stack/templates/user-ops-indexer-servicemonitor.yaml new file mode 100644 index 00000000..e7ab2f17 --- /dev/null +++ b/helm/vendor/blockscout-stack/templates/user-ops-indexer-servicemonitor.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.userOpsIndexer.enabled .Values.config.prometheus.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer + labels: + {{- include "blockscout-stack.labels" . | nindent 4 }} +spec: + endpoints: + - scrapeTimeout: 10s + port: http-metrics + path: /metrics + selector: + matchLabels: + app: {{ include "blockscout-stack.fullname" . }}-user-ops-indexer-svc +{{- end }} diff --git a/helm/vendor/blockscout-stack/values.yaml b/helm/vendor/blockscout-stack/values.yaml new file mode 100644 index 00000000..5488f84b --- /dev/null +++ b/helm/vendor/blockscout-stack/values.yaml @@ -0,0 +1,500 @@ +# Default values for blockscout-stack. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Provide a name in place of blockscout-stack for `app:` labels +## +nameOverride: "" +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +## +imagePullSecrets: [] +## Blockscout configuration options +## +config: + network: + id: 1 + name: Ether + shortname: Ether + currency: + name: Ether + symbol: ETH + decimals: 18 + # if network uses dual token model like gnosis (in most case it should be set to false) + dualToken: false + account: + enabled: false + testnet: false + nameService: + enabled: false + url: "" + ## Creates redirect from additional domain to frontend domain + ## Works only if backend and frontend runs on single domain + redirect: + enabled: false + hostnames: ["extra-chart-example.local"] + ingress: + className: nginx + annotations: + nginx.ingress.kubernetes.io/server-snippet: | + if ($request_uri !~ ^/.well-known/(.*)$) { + return 301 $scheme://{{ .Values.frontend.ingress.hostname }}$request_uri; + } + tls: + enabled: true + ## If set to true will create service monitors for blockscout and stats + ## + prometheus: + enabled: true + ## Whitelist metrics path on ingress to make metrics non-public + ingressWhitelist: + enabled: true + annotations: + nginx.ingress.kubernetes.io/whitelist-source-range: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + blackbox: + enabled: true + path: /api/health + rules: + enabled: true + ## If latest block timestamp is older than healthyBlockPeriod instance is considered unhealthy and alert is created + healthyBlockPeriod: 300 + ## Alert is created if there is no new batches for more than batchTimeMultiplier x average_batch_time + batchTimeMultiplier: 2 + labels: {} +## Configuration options for backend +## +blockscout: + enabled: true + ## Replica count for indexer (if separate api is not used this replica count for deployment containing both indexer and api). Currently only one replica is supported + ## + replicaCount: 1 + ## Image parametes + ## + image: + repository: ghcr.io/blockscout/blockscout + pullPolicy: IfNotPresent + tag: "latest" + ## Init container configuration (used to run DB migrations) + ## + init: + enabled: true + command: + - /bin/sh + args: + - -c + - bin/blockscout eval "Elixir.Explorer.ReleaseTasks.create_and_migrate()" + ## Run API service as separate deployment + ## + separateApi: + enabled: false + replicaCount: 2 + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + nftStorage: + enabled: false + cookie: secret + bucketHost: xxx.r2.cloudflarestorage.com + accessKey: "" + secretKey: "" + bucketName: nft + bucketUrl: https://pub-xxx.r2.dev + ipfsGateway: https://ipfs.io/ipfs + workerConcurrency: 10 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + ## Blockscout ENV vars + ## ref: https://docs.blockscout.com/setup/env-variables + ## + env: [] + # NAME: VALUE + # Refer to an existing Secret/ConfigMap + extraEnv: [] + # - name: DATABASE_URL + # valueFrom: + # secretKeyRef: + # name: blockscout-secret + # key: DATABASE_URL + ## Set ENV vars via secret, this can be useful for DB connection params, api keys, etc. + ## + # This will create a Secret with the specified data + envFromSecret: [] + # NAME: VALUE + # Refer to an existing Secret/ConfigMap + envFrom: [] + # - secretRef: + # name: blockscout-secret + # - configMapRef: + # name: blockscout-config + ## Command to start blockscout instance + ## + command: + - /bin/sh + args: + - -c + - bin/blockscout start + ## Annotations to add to blockscout pod + podAnnotations: {} + + ## Annotations to add to blockscout deployment + annotations: {} + + podSecurityContext: {} + ## SecurityContext holds pod-level security attributes and common container settings. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: {} + terminationGracePeriodSeconds: 300 + ## Liveness probe + ## + livenessProbe: + enabled: true + path: /api/health/liveness + params: + initialDelaySeconds: 100 + periodSeconds: 100 + timeoutSeconds: 30 + ## Readiness probe + ## + readinessProbe: + enabled: true + path: /api/health/readiness + params: + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 60 + + service: + type: ClusterIP + port: 80 + ## Configure ingress resource that allow you to access the blockscout installation. + ## ref: http://kubernetes.io/docs/user-guide/ingress/ + ## + ingress: + enabled: false + className: "" + annotations: {} + hostname: chart-example.local + tls: + enabled: false + #secretName: + paths: + - path: /api + pathType: Prefix + - path: /socket + pathType: Prefix + - path: /public-metrics + pathType: Prefix + - path: /auth/auth0 + pathType: Exact + - path: /auth/auth0/callback + pathType: Exact + - path: /auth/logout + pathType: Exact + + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + # Additional volumes on the output Blockscout Deployment definition. + volumes: [] + # - name: foo + # secret: + # secretName: mysecret + # optional: false + + # Additional volumeMounts on the output Blockscout Deployment definition. + volumeMounts: [] + # - name: foo + # mountPath: "/etc/foo" + # readOnly: true +## Configuration options for frontend +## +frontend: + enabled: true + ## Image parametes + image: + repository: ghcr.io/blockscout/frontend + tag: latest + pullPolicy: IfNotPresent + + replicaCount: 2 + ## Annotations to add to frontend pod + podAnnotations: {} + + ## Annotations to add to frontend deployment + annotations: {} + + podSecurityContext: {} + ## SecurityContext holds pod-level security attributes and common container settings. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: {} + + service: + type: ClusterIP + port: 80 + ## Configure ingress resource that allow you to access the frontend installation. + ## ref: http://kubernetes.io/docs/user-guide/ingress/ + ## + ingress: + enabled: false + className: "" + annotations: {} + hostname: chart-example.local + tls: + enabled: false + #secretName: + paths: + - path: / + + resources: + limits: + memory: "1Gi" + cpu: "500m" + requests: + memory: "256Mi" + cpu: "250m" + ## Liveness probe + ## + livenessProbe: + enabled: true + path: /api/healthz + params: + initialDelaySeconds: 100 + periodSeconds: 100 + timeoutSeconds: 30 + ## Readiness probe + ## + readinessProbe: + enabled: true + path: /api/healthz + params: + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 30 + ## Frontend ENV vars + ## ref: https://github.com/blockscout/frontend/blob/main/docs/ENVS.md + ## + env: [] + # NAME: VALUE + # Refer to an existing Secret/ConfigMap + extraEnv: [] + # - name: FAVICON_GENERATOR_API_KEY + # valueFrom: + # secretKeyRef: + # name: blockscout-frontend-secret + # key: FAVICON_GENERATOR_API_KEY + # This will create a Secret with the specified data + envFromSecret: [] + # NAME: VALUE + # Refer to an existing Secret/ConfigMap + envFrom: [] + # - secretRef: + # name: blockscout-frontend-secret + # - configMapRef: + # name: blockscout-frontend-config + +stats: + enabled: false + ## Image parametes + ## + image: + repository: ghcr.io/blockscout/stats + tag: v2.4.0 + pullPolicy: IfNotPresent + + replicaCount: 1 + service: + type: ClusterIP + port: 80 + metricsPort: 6060 + + podAnnotations: {} + + ## Annotations to add to stats deployment + annotations: {} + + podSecurityContext: {} + + securityContext: {} + + basePath: "/" + + ## Configure ingress resource that allow you to access the stats installation. + ## ref: http://kubernetes.io/docs/user-guide/ingress/ + ## + ingress: + enabled: false + className: "" + annotations: {} + hostname: chart-example-stats.local + tls: + enabled: false + #secretName: + paths: + - path: / + pathType: Prefix + + resources: + limits: + memory: "512Mi" + cpu: 250m + requests: + memory: 512Mi + cpu: 250m + ## Files to mount to stats pod + ## + files: + enabled: false + list: {} + # file.txt: | + # test + mountPath: /tmp/path + + ## Liveness probe + ## + livenessProbe: + enabled: true + path: /health + params: + initialDelaySeconds: 100 + periodSeconds: 100 + timeoutSeconds: 30 + ## Readiness probe + ## + readinessProbe: + enabled: true + path: /health + params: + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 60 + ## Stats ENV vars + ## ref: https://github.com/blockscout/blockscout-rs/tree/main/stats#env + env: [] + # NAME: VALUE + envFromSecret: [] + # NAME: VALUE + extraEnv: [] + # - name: STATS__DB_URL + # valueFrom: + # secretKeyRef: + # name: blockscout-stats-secret + # key: STATS__DB_URL + +userOpsIndexer: + enabled: false + ## Image parametes + ## + image: + repository: ghcr.io/blockscout/user-ops-indexer + tag: latest + pullPolicy: IfNotPresent + + replicaCount: 1 + service: + type: ClusterIP + port: 80 + grpc: + enabled: true + port: 8051 + metricsPort: 6060 + + podAnnotations: {} + + ## Annotations to add to user-ops-indexer deployment + annotations: {} + + podSecurityContext: {} + + securityContext: {} + + ## Configure ingress resource that allow you to access the stats installation. + ## ref: http://kubernetes.io/docs/user-guide/ingress/ + ## + ingress: + enabled: false + className: "" + annotations: {} + hostname: chart-example-stats.local + tls: + enabled: false + #secretName: + paths: + - path: / + pathType: Prefix + + resources: + limits: + memory: "512Mi" + cpu: 250m + requests: + memory: 512Mi + cpu: 250m + + ## Liveness probe + ## + livenessProbe: + enabled: false + path: /health + params: + initialDelaySeconds: 100 + periodSeconds: 100 + timeoutSeconds: 30 + ## Readiness probe + ## + readinessProbe: + enabled: false + path: /health + params: + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 60 + ## Stats ENV vars + ## ref: https://github.com/blockscout/blockscout-rs/tree/main/stats#env + env: [] + # NAME: VALUE + envFromSecret: [] + # NAME: VALUE + extraEnv: [] + # - name: USER_OPS_INDEXER__DATABASE__CONNECT__URL + # valueFrom: + # secretKeyRef: + # name: blockscout-userops-indexer-secret + # key: USER_OPS_INDEXER__DATABASE__CONNECT__URL + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" +## Node labels for blockscout-stack pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +tolerations: [] + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} diff --git a/helm/vendor/blockscout-verification/.helmignore b/helm/vendor/blockscout-verification/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/helm/vendor/blockscout-verification/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm/vendor/blockscout-verification/Chart.yaml b/helm/vendor/blockscout-verification/Chart.yaml new file mode 100644 index 00000000..eec2e3ef --- /dev/null +++ b/helm/vendor/blockscout-verification/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: blockscout-verification +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.0.0" diff --git a/helm/vendor/blockscout-verification/templates/_helpers.tpl b/helm/vendor/blockscout-verification/templates/_helpers.tpl new file mode 100644 index 00000000..7abde75e --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "smart-contract-verification.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "smart-contract-verification.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "smart-contract-verification.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "smart-contract-verification.labels" -}} +helm.sh/chart: {{ include "smart-contract-verification.chart" . }} +{{ include "smart-contract-verification.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "smart-contract-verification.selectorLabels" -}} +app.kubernetes.io/name: {{ include "smart-contract-verification.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "smart-contract-verification.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "smart-contract-verification.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/helm/vendor/blockscout-verification/templates/eth-bytecode-db-deployment.yaml b/helm/vendor/blockscout-verification/templates/eth-bytecode-db-deployment.yaml new file mode 100644 index 00000000..719b7a32 --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/eth-bytecode-db-deployment.yaml @@ -0,0 +1,99 @@ +{{- if .Values.ethBytecodeDb.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "smart-contract-verification.fullname" . }}-eth-bytecode-db + labels: + app: {{ include "smart-contract-verification.fullname" . }}-eth-bytecode-db + {{- include "smart-contract-verification.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.ethBytecodeDb.replicaCount }} + selector: + matchLabels: + app: {{ include "smart-contract-verification.fullname" . }}-eth-bytecode-db + {{- include "smart-contract-verification.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- if eq .Values.ethBytecodeDb.image.pullPolicy "Always" }} + rollme: {{ randAlphaNum 5 | quote }} + {{- end }} + {{- with .Values.ethBytecodeDb.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: {{ include "smart-contract-verification.fullname" . }}-eth-bytecode-db + {{- include "smart-contract-verification.labels" . | nindent 8 }} + {{- with .Values.ethBytecodeDb.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "smart-contract-verification.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.ethBytecodeDb.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }}-eth-bytecode-db + securityContext: + {{- toYaml .Values.ethBytecodeDb.securityContext | nindent 12 }} + image: "{{ .Values.ethBytecodeDb.image.repository }}:{{ .Values.ethBytecodeDb.image.tag }}" + imagePullPolicy: {{ .Values.ethBytecodeDb.image.pullPolicy }} + env: + - name: ETH_BYTECODE_DB__SERVER__HTTP__ADDR + value: "0.0.0.0:{{ .Values.ethBytecodeDb.service.port }}" + - name: ETH_BYTECODE_DB__VERIFIER__HTTP_URL + value: http://{{ include "smart-contract-verification.fullname" . }}-smart-contract-verifier:{{ .Values.smartContractVerifier.service.port }} + {{- if .Values.metrics.enabled }} + - name: ETH_BYTECODE_DB__METRICS__ENABLED + value: 'true' + - name: ETH_BYTECODE_DB__METRICS__ADDR + value: 0.0.0.0:{{ .Values.metrics.port }} + {{- end }} + {{- range $key, $value := .Values.ethBytecodeDb.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- if or .Values.ethBytecodeDb.envFromSecret }} + envFrom: + - secretRef: + name: {{ include "smart-contract-verification.fullname" . }}-eth-bytecode-db-env + {{- end }} + ports: + - name: http + containerPort: {{ .Values.ethBytecodeDb.service.port }} + protocol: TCP + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.metrics.port }} + protocol: TCP + {{- end }} + livenessProbe: + {{- toYaml .Values.ethBytecodeDb.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.ethBytecodeDb.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ethBytecodeDb.resources | nindent 12 }} + {{- with .Values.ethBytecodeDb.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ethBytecodeDb.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-verification/templates/eth-bytecode-db-ingress.yaml b/helm/vendor/blockscout-verification/templates/eth-bytecode-db-ingress.yaml new file mode 100644 index 00000000..81166414 --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/eth-bytecode-db-ingress.yaml @@ -0,0 +1,51 @@ +{{- if .Values.ethBytecodeDb.ingress.enabled -}} +{{- $fullName := include "smart-contract-verification.fullname" . -}} +{{- $svcPort := .Values.ethBytecodeDb.service.port -}} +{{- if and .Values.ethBytecodeDb.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ethBytecodeDb.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ethBytecodeDb.ingress.annotations "kubernetes.io/ingress.class" .Values.ethBytecodeDb.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }}-eth-bytecode-db + labels: + {{- include "smart-contract-verification.labels" . | nindent 4 }} + {{- with .Values.ethBytecodeDb.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ethBytecodeDb.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ethBytecodeDb.ingress.className }} + {{- end }} + {{- if .Values.ethBytecodeDb.ingress.tls.enabled }} + tls: + - hosts: + - {{ .Values.ethBytecodeDb.ingress.hostname | quote }} + secretName: {{ $fullName }}-eth-bytecode-db-ingress-tls + {{- end }} + rules: + - host: {{ .Values.ethBytecodeDb.ingress.hostname | quote }} + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }}-eth-bytecode-db + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }}-eth-bytecode-db + servicePort: {{ $svcPort }} + {{- end }} +{{- end }} diff --git a/helm/vendor/blockscout-verification/templates/eth-bytecode-db-secret.yaml b/helm/vendor/blockscout-verification/templates/eth-bytecode-db-secret.yaml new file mode 100644 index 00000000..09792836 --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/eth-bytecode-db-secret.yaml @@ -0,0 +1,13 @@ +{{- if .Values.ethBytecodeDb.envFromSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "smart-contract-verification.fullname" . }}-eth-bytecode-db-env + labels: + {{- include "smart-contract-verification.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $value := .Values.ethBytecodeDb.envFromSecret }} + {{ $key }}: {{ $value | b64enc }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-verification/templates/eth-bytecode-db-service.yaml b/helm/vendor/blockscout-verification/templates/eth-bytecode-db-service.yaml new file mode 100644 index 00000000..514df333 --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/eth-bytecode-db-service.yaml @@ -0,0 +1,24 @@ +{{- if .Values.ethBytecodeDb.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "smart-contract-verification.fullname" . }}-eth-bytecode-db + labels: + {{- include "smart-contract-verification.labels" . | nindent 4 }} +spec: + type: {{ .Values.ethBytecodeDb.service.type }} + ports: + - port: {{ .Values.ethBytecodeDb.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.port }} + targetPort: metrics + protocol: TCP + name: metrics + {{- end }} + selector: + app: {{ include "smart-contract-verification.fullname" . }}-eth-bytecode-db + {{- include "smart-contract-verification.selectorLabels" . | nindent 4 }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-verification/templates/eth-bytecode-db-servicemonitor.yaml b/helm/vendor/blockscout-verification/templates/eth-bytecode-db-servicemonitor.yaml new file mode 100644 index 00000000..ad7a1774 --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/eth-bytecode-db-servicemonitor.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheus.enabled .Values.ethBytecodeDb.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "smart-contract-verification.fullname" . }}-eth-bytecode-db + labels: + {{- include "smart-contract-verification.labels" . | nindent 4 }} +spec: + endpoints: + - scrapeTimeout: 30s + port: metrics + path: /metrics + selector: + matchLabels: + app: {{ include "smart-contract-verification.fullname" . }}-eth-bytecode-db +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-verification/templates/serviceaccount.yaml b/helm/vendor/blockscout-verification/templates/serviceaccount.yaml new file mode 100644 index 00000000..f3f00f62 --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "smart-contract-verification.serviceAccountName" . }} + labels: + {{- include "smart-contract-verification.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +{{- end }} diff --git a/helm/vendor/blockscout-verification/templates/sig-provider-deployment.yaml b/helm/vendor/blockscout-verification/templates/sig-provider-deployment.yaml new file mode 100644 index 00000000..2e71d697 --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/sig-provider-deployment.yaml @@ -0,0 +1,94 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "smart-contract-verification.fullname" . }}-sig-provider + labels: + app: {{ include "smart-contract-verification.fullname" . }}-sig-provider + {{- include "smart-contract-verification.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.sigProvider.replicaCount }} + selector: + matchLabels: + app: {{ include "smart-contract-verification.fullname" . }}-sig-provider + {{- include "smart-contract-verification.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- if eq .Values.sigProvider.image.pullPolicy "Always" }} + rollme: {{ randAlphaNum 5 | quote }} + {{- end }} + {{- with .Values.sigProvider.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: {{ include "smart-contract-verification.fullname" . }}-sig-provider + {{- include "smart-contract-verification.labels" . | nindent 8 }} + {{- with .Values.sigProvider.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "smart-contract-verification.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.sigProvider.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }}-sig-provider + securityContext: + {{- toYaml .Values.sigProvider.securityContext | nindent 12 }} + image: "{{ .Values.sigProvider.image.repository }}:{{ .Values.sigProvider.image.tag }}" + imagePullPolicy: {{ .Values.sigProvider.image.pullPolicy }} + env: + - name: SIG_PROVIDER__SERVER__HTTP__ADDR + value: "0.0.0.0:{{ .Values.sigProvider.service.port }}" + {{- if .Values.metrics.enabled }} + - name: SIG_PROVIDER__METRICS__ENABLED + value: 'true' + {{- end }} + {{- if .Values.ethBytecodeDb.enabled }} + - name: SIG_PROVIDER__SOURCES__ETH_BYTECODE_DB__ENABLED + value: 'true' + - name: SIG_PROVIDER__SOURCES__ETH_BYTECODE_DB__URL + value: http://{{ include "smart-contract-verification.fullname" . }}-eth-bytecode-db:{{ .Values.ethBytecodeDb.service.port }} + {{- end }} + {{- range $key, $value := .Values.sigProvider.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.sigProvider.service.port }} + protocol: TCP + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.metrics.port }} + protocol: TCP + {{- end }} + livenessProbe: + {{- toYaml .Values.sigProvider.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.sigProvider.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.sigProvider.resources | nindent 12 }} + {{- with .Values.sigProvider.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.sigProvider.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/helm/vendor/blockscout-verification/templates/sig-provider-ingress.yaml b/helm/vendor/blockscout-verification/templates/sig-provider-ingress.yaml new file mode 100644 index 00000000..c05e5c7e --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/sig-provider-ingress.yaml @@ -0,0 +1,51 @@ +{{- if .Values.sigProvider.ingress.enabled -}} +{{- $fullName := include "smart-contract-verification.fullname" . -}} +{{- $svcPort := .Values.sigProvider.service.port -}} +{{- if and .Values.sigProvider.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.sigProvider.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.sigProvider.ingress.annotations "kubernetes.io/ingress.class" .Values.sigProvider.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }}-sig-provider + labels: + {{- include "smart-contract-verification.labels" . | nindent 4 }} + {{- with .Values.sigProvider.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.sigProvider.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.sigProvider.ingress.className }} + {{- end }} + {{- if .Values.sigProvider.ingress.tls.enabled }} + tls: + - hosts: + - {{ .Values.sigProvider.ingress.hostname | quote }} + secretName: {{ $fullName }}-sig-provider-ingress-tls + {{- end }} + rules: + - host: {{ .Values.sigProvider.ingress.hostname | quote }} + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }}-sig-provider + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }}-sig-provider + servicePort: {{ $svcPort }} + {{- end }} +{{- end }} diff --git a/helm/vendor/blockscout-verification/templates/sig-provider-service.yaml b/helm/vendor/blockscout-verification/templates/sig-provider-service.yaml new file mode 100644 index 00000000..951492de --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/sig-provider-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "smart-contract-verification.fullname" . }}-sig-provider + labels: + {{- include "smart-contract-verification.labels" . | nindent 4 }} +spec: + type: {{ .Values.sigProvider.service.type }} + ports: + - port: {{ .Values.sigProvider.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.port }} + targetPort: metrics + protocol: TCP + name: metrics + {{- end }} + selector: + app: {{ include "smart-contract-verification.fullname" . }}-sig-provider + {{- include "smart-contract-verification.selectorLabels" . | nindent 4 }} diff --git a/helm/vendor/blockscout-verification/templates/sig-provider-servicemonitor.yaml b/helm/vendor/blockscout-verification/templates/sig-provider-servicemonitor.yaml new file mode 100644 index 00000000..ec90a176 --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/sig-provider-servicemonitor.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheus.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "smart-contract-verification.fullname" . }}-sig-provider + labels: + {{- include "smart-contract-verification.labels" . | nindent 4 }} +spec: + endpoints: + - scrapeTimeout: 30s + port: metrics + path: /metrics + selector: + matchLabels: + app: {{ include "smart-contract-verification.fullname" . }}-sig-provider +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-verification/templates/smart-contract-verifier-deployment.yaml b/helm/vendor/blockscout-verification/templates/smart-contract-verifier-deployment.yaml new file mode 100644 index 00000000..079c0077 --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/smart-contract-verifier-deployment.yaml @@ -0,0 +1,108 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "smart-contract-verification.fullname" . }}-smart-contract-verifier + labels: + app: {{ include "smart-contract-verification.fullname" . }}-smart-contract-verifier + {{- include "smart-contract-verification.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.smartContractVerifier.replicaCount }} + selector: + matchLabels: + app: {{ include "smart-contract-verification.fullname" . }}-smart-contract-verifier + {{- include "smart-contract-verification.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- if eq .Values.smartContractVerifier.image.pullPolicy "Always" }} + rollme: {{ randAlphaNum 5 | quote }} + {{- end }} + {{- with .Values.smartContractVerifier.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: {{ include "smart-contract-verification.fullname" . }}-smart-contract-verifier + {{- include "smart-contract-verification.labels" . | nindent 8 }} + {{- with .Values.smartContractVerifier.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "smart-contract-verification.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.smartContractVerifier.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }}-smart-contract-verifier + securityContext: + {{- toYaml .Values.smartContractVerifier.securityContext | nindent 12 }} + image: "{{ .Values.smartContractVerifier.image.repository }}:{{ .Values.smartContractVerifier.image.tag }}" + imagePullPolicy: {{ .Values.smartContractVerifier.image.pullPolicy }} + env: + - name: SMART_CONTRACT_VERIFIER__SERVER__HTTP__ADDR + value: "0.0.0.0:{{ .Values.smartContractVerifier.service.port }}" + {{- if .Values.smartContractVerifier.grpc.enabled }} + - name: SMART_CONTRACT_VERIFIER__SERVER__GRPC__ENABLED + value: 'true' + - name: SMART_CONTRACT_VERIFIER__SERVER__GRPC__ADDR + value: 0.0.0.0:{{ .Values.smartContractVerifier.grpc.port }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: SMART_CONTRACT_VERIFIER__METRICS__ENABLED + value: 'true' + - name: SMART_CONTRACT_VERIFIER__METRICS__ADDR + value: 0.0.0.0:{{ .Values.metrics.port }} + - name: SMART_CONTRACT_VERIFIER__METRICS__ROUTE + value: /metrics + {{- end }} + {{- range $key, $value := .Values.smartContractVerifier.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- if or .Values.smartContractVerifier.envFromSecret }} + envFrom: + - secretRef: + name: {{ include "smart-contract-verification.fullname" . }}-smart-contract-verifier-env + {{- end }} + ports: + - name: http + containerPort: {{ .Values.smartContractVerifier.service.port }} + protocol: TCP + {{- if .Values.smartContractVerifier.grpc.enabled }} + - name: grpc + containerPort: {{ .Values.smartContractVerifier.grpc.port }} + protocol: TCP + {{- end}} + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.metrics.port }} + protocol: TCP + {{- end }} + livenessProbe: + {{- toYaml .Values.smartContractVerifier.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.smartContractVerifier.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.smartContractVerifier.resources | nindent 12 }} + {{- with .Values.smartContractVerifier.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.smartContractVerifier.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/helm/vendor/blockscout-verification/templates/smart-contract-verifier-ingress.yaml b/helm/vendor/blockscout-verification/templates/smart-contract-verifier-ingress.yaml new file mode 100644 index 00000000..bcad7d1a --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/smart-contract-verifier-ingress.yaml @@ -0,0 +1,103 @@ +{{- if .Values.smartContractVerifier.ingress.enabled -}} +{{- $fullName := include "smart-contract-verification.fullname" . -}} +{{- $svcPort := .Values.smartContractVerifier.service.port -}} +{{- if and .Values.smartContractVerifier.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.smartContractVerifier.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.smartContractVerifier.ingress.annotations "kubernetes.io/ingress.class" .Values.smartContractVerifier.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }}-smart-contract-verifier + labels: + {{- include "smart-contract-verification.labels" . | nindent 4 }} + {{- with .Values.smartContractVerifier.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.smartContractVerifier.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.smartContractVerifier.ingress.className }} + {{- end }} + {{- if .Values.smartContractVerifier.ingress.tls.enabled }} + tls: + - hosts: + - {{ .Values.smartContractVerifier.ingress.hostname | quote }} + secretName: {{ $fullName }}-smart-contract-verifier-ingress-tls + {{- end }} + rules: + - host: {{ .Values.smartContractVerifier.ingress.hostname | quote }} + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }}-smart-contract-verifier + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }}-smart-contract-verifier + servicePort: {{ $svcPort }} + {{- end }} +{{- end }} +--- +{{- if .Values.smartContractVerifier.grpc.ingress.enabled -}} +{{- $fullName := include "smart-contract-verification.fullname" . -}} +{{- $svcPort := .Values.smartContractVerifier.grpc.port -}} +{{- if and .Values.smartContractVerifier.grpc.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.smartContractVerifier.grpc.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.smartContractVerifier.grpc.ingress.annotations "kubernetes.io/ingress.class" .Values.smartContractVerifier.grpc.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }}-smart-contract-verifier-grpc + labels: + {{- include "smart-contract-verification.labels" . | nindent 4 }} + {{- with .Values.smartContractVerifier.grpc.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.smartContractVerifier.grpc.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.smartContractVerifier.grpc.ingress.className }} + {{- end }} + {{- if .Values.smartContractVerifier.grpc.ingress.tls.enabled }} + tls: + - hosts: + - {{ .Values.smartContractVerifier.grpc.ingress.hostname | quote }} + secretName: {{ $fullName }}-grpc-smart-contract-verifier-ingress-tls + {{- end }} + rules: + - host: {{ .Values.smartContractVerifier.grpc.ingress.hostname | quote }} + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }}-smart-contract-verifier + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }}-smart-contract-verifier + servicePort: {{ $svcPort }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-verification/templates/smart-contract-verifier-secret.yaml b/helm/vendor/blockscout-verification/templates/smart-contract-verifier-secret.yaml new file mode 100644 index 00000000..5abb56aa --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/smart-contract-verifier-secret.yaml @@ -0,0 +1,13 @@ +{{- if .Values.smartContractVerifier.envFromSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "smart-contract-verification.fullname" . }}-smart-contract-verifier-env + labels: + {{- include "smart-contract-verification.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $value := .Values.smartContractVerifier.envFromSecret }} + {{ $key }}: {{ $value | b64enc }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-verification/templates/smart-contract-verifier-service.yaml b/helm/vendor/blockscout-verification/templates/smart-contract-verifier-service.yaml new file mode 100644 index 00000000..959a983f --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/smart-contract-verifier-service.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "smart-contract-verification.fullname" . }}-smart-contract-verifier + labels: + {{- include "smart-contract-verification.labels" . | nindent 4 }} +spec: + type: {{ .Values.smartContractVerifier.service.type }} + ports: + - port: {{ .Values.smartContractVerifier.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if .Values.smartContractVerifier.grpc.enabled }} + - port: {{ .Values.smartContractVerifier.grpc.port }} + targetPort: grpc + protocol: TCP + name: grpc + {{- end}} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.port }} + targetPort: metrics + protocol: TCP + name: metrics + {{- end }} + selector: + app: {{ include "smart-contract-verification.fullname" . }}-smart-contract-verifier + {{- include "smart-contract-verification.selectorLabels" . | nindent 4 }} diff --git a/helm/vendor/blockscout-verification/templates/smart-contract-verifier-servicemonitor.yaml b/helm/vendor/blockscout-verification/templates/smart-contract-verifier-servicemonitor.yaml new file mode 100644 index 00000000..91250052 --- /dev/null +++ b/helm/vendor/blockscout-verification/templates/smart-contract-verifier-servicemonitor.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheus.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "smart-contract-verification.fullname" . }}-smart-contract-verifier + labels: + {{- include "smart-contract-verification.labels" . | nindent 4 }} +spec: + endpoints: + - scrapeTimeout: 30s + port: metrics + path: /metrics + selector: + matchLabels: + app: {{ include "smart-contract-verification.fullname" . }}-smart-contract-verifier +{{- end }} \ No newline at end of file diff --git a/helm/vendor/blockscout-verification/values.yaml b/helm/vendor/blockscout-verification/values.yaml new file mode 100644 index 00000000..5b6cdd87 --- /dev/null +++ b/helm/vendor/blockscout-verification/values.yaml @@ -0,0 +1,262 @@ +# Default values for sig-provider. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Automatically mount a ServiceAccount's API credentials? + automount: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +sigProvider: + replicaCount: 1 + + image: + repository: ghcr.io/blockscout/sig-provider + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + + podAnnotations: {} + podLabels: {} + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + service: + type: ClusterIP + port: 8043 + + ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hostname: sig-provider.local + tls: + enabled: false + + env: [] + # NAME: VALUE + + resources: + limits: + memory: 256Mi + cpu: 250m + requests: + memory: 128Mi + cpu: 100m + + livenessProbe: + httpGet: + path: /health?service= + port: http + readinessProbe: + httpGet: + path: /health?service= + port: http + + # Additional volumes on the output Deployment definition. + volumes: [] + # - name: foo + # secret: + # secretName: mysecret + # optional: false + + # Additional volumeMounts on the output Deployment definition. + volumeMounts: [] + # - name: foo + # mountPath: "/etc/foo" + # readOnly: true + +smartContractVerifier: + replicaCount: 1 + + image: + repository: ghcr.io/blockscout/smart-contract-verifier + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + + podAnnotations: {} + podLabels: {} + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + service: + type: ClusterIP + port: 8050 + grpc: + enabled: false + port: 8051 + #GRPC ingress configuration + ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hostname: grpc-smart-contract-verifier.local + tls: + enabled: false + ingress: + enabled: true + className: "public" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hostname: smart-contract-verifier.local + tls: + enabled: false + + env: [] + # NAME: VALUE + envFromSecret: [] + # NAME: VALUE + + resources: + limits: + memory: "8Gi" + cpu: "1" + requests: + memory: "1Gi" + cpu: "250m" + + livenessProbe: + httpGet: + path: /health?service= + port: http + readinessProbe: + httpGet: + path: /health?service= + port: http + + # Additional volumes on the output Deployment definition. + volumes: [] + # - name: foo + # secret: + # secretName: mysecret + # optional: false + + # Additional volumeMounts on the output Deployment definition. + volumeMounts: [] + # - name: foo + # mountPath: "/etc/foo" + # readOnly: true +ethBytecodeDb: + enabled: false + replicaCount: 1 + + image: + repository: ghcr.io/blockscout/eth-bytecode-db + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + + podAnnotations: {} + podLabels: {} + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + service: + type: ClusterIP + port: 8050 + + ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hostname: eth-bytecode-db.local + tls: + enabled: false + + env: [] + # NAME: VALUE + envFromSecret: [] + # NAME: VALUE + resources: + limits: + memory: 1Gi + cpu: 500m + requests: + memory: 512Mi + cpu: 250m + + livenessProbe: + httpGet: + path: /health?service= + port: http + initialDelaySeconds: 100 + periodSeconds: 100 + timeoutSeconds: 30 + readinessProbe: + httpGet: + path: /health?service= + port: http + initialDelaySeconds: 100 + periodSeconds: 100 + timeoutSeconds: 30 + + # Additional volumes on the output Deployment definition. + volumes: [] + # - name: foo + # secret: + # secretName: mysecret + # optional: false + + # Additional volumeMounts on the output Deployment definition. + volumeMounts: [] + # - name: foo + # mountPath: "/etc/foo" + # readOnly: true + + +metrics: + enabled: false + port: 6060 + prometheus: + enabled: false + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/helm/vendor/docker-registry-ui/.helmignore b/helm/vendor/docker-registry-ui/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/helm/vendor/docker-registry-ui/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm/vendor/docker-registry-ui/Chart.yaml b/helm/vendor/docker-registry-ui/Chart.yaml new file mode 100644 index 00000000..9d365fb0 --- /dev/null +++ b/helm/vendor/docker-registry-ui/Chart.yaml @@ -0,0 +1,30 @@ +annotations: + artifacthub.io/images: | + - name: docker-registry-ui + image: joxit/docker-registry-ui:2.5.2 + - name: registry + image: registry:2.8.2 + artifacthub.io/license: MIT + artifacthub.io/links: | + - name: Documentation + url: https://joxit.dev/docker-registry-ui + - name: Joxit/docker-registry-ui + url: https://github.com/Joxit/docker-registry-ui + - name: Joxit/helm-charts + url: https://github.com/Joxit/helm-charts + artifacthub.io/prerelease: "false" +apiVersion: v2 +appVersion: 2.5.2 +description: The simplest and most complete UI for your private registry +home: https://github.com/Joxit/docker-registry-ui +keywords: +- docker +- registry +- user-interface +- interface +kubeVersion: '>=1.19.0-0' +name: docker-registry-ui +sources: +- https://github.com/Joxit/docker-registry-ui +- https://github.com/Joxit/helm-charts +version: 1.1.4-patched diff --git a/helm/vendor/docker-registry-ui/README.md b/helm/vendor/docker-registry-ui/README.md new file mode 100644 index 00000000..a67cf52d --- /dev/null +++ b/helm/vendor/docker-registry-ui/README.md @@ -0,0 +1,140 @@ +# Docker Registry UI Chart + +[![Stars](https://img.shields.io/github/stars/joxit/docker-registry-ui.svg?logo=github&maxAge=86400)](https://github.com/Joxit/docker-registry-ui/stargazers) +[![Pulls](https://img.shields.io/docker/pulls/joxit/docker-registry-ui.svg?maxAge=86400)](https://hub.docker.com/r/joxit/docker-registry-ui) +[![Sponsor](https://joxit.dev/images/sponsor.svg)](https://github.com/sponsors/Joxit) +[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/joxit)](https://artifacthub.io/packages/helm/joxit/docker-registry-ui) + +## Overview + +This project aims to provide a simple and complete user interface for your private docker registry. You can customize the interface with various options. The major option is `ui.singleRegistry` which allows you to disable the dynamic selection of docker registeries. + +If you like my work and want to support it, don't hesitate to [sponsor me](https://github.com/sponsors/Joxit). + +## [Project Page](https://joxit.dev/docker-registry-ui), [Live Demo](https://joxit.dev/docker-registry-ui/demo/), [Examples](https://github.com/Joxit/docker-registry-ui/tree/main/examples), [Helm Chart](https://helm.joxit.dev/charts/docker-registry-ui/) + +![preview](https://raw.github.com/Joxit/docker-registry-ui/main/docker-registry-ui.gif "Preview of Docker Registry UI") + +## Prerequisites + + * **Helm 3.2+** (Helm 2 is not supported) + * **Kubernetes 1.19+** - This is the earliest version of Kubernetes tested. + It is possible that this chart works with earlier versions but it is untested. + +## Usage + +1. Add my Helm repository (named `joxit`) +``` +helm repo add joxit https://helm.joxit.dev +``` +2. Ensure you have access to the Helm chart and you see the latest chart version listed. If you have previously added the Helm repository, run `helm repo update`. +``` +helm search repo joxit/docker-registry-ui +``` +3. Now you're ready to install the Docker Registry UI! To install Docker Registry UI with the default configuration using Helm 3.2 run the following command below. This will deploy the Docker Registry UI on the default namespace. +``` +helm upgrade --install docker-registry-ui joxit/docker-registry-ui +``` + +## Configuration + +### Global + +| Value | Default | Description | +| --- | --- | --- | +| `global.name` | `null` | Set the prefix used for all resources in the Helm chart. If not set, the prefix will be ``. | +| `global.imagePullSecrets` | `[]` | The default array of objects containing image pull secret names that will be applied. | +| `global.imagePullPolicy` | `IfNotPresent` | The default image policy for images: `IfNotPresent`, `Always`, `Never` | + +### User Interface + +| Value | Default | Description | +| --- | --- | --- | +| `ui.replicas` | `1` | Number of replicas for the Deployment. | +| `ui.title` | `"Docker registry UI"` | Title of the registry | +| `ui.proxy` | `false` | UI behave as a proxy of the registry | +| `ui.dockerRegistryUrl` | `null` | The URL of your docker registry, may be a service (when proxy is on) or an external URL. | +| `ui.pullUrl` | `null` | Override the pull URL | +| `ui.singleRegistry` | `true` | Remove the menu that show the dialogs to add, remove and change the endpoint of your docker registry. | +| `ui.registrySecured` | `false` | By default, the UI will check on every requests if your registry is secured or not (you will see `401` responses in your console). Set to `true` if your registry uses Basic Authentication and divide by two the number of call to your registry. | +| `ui.showCatalogNbTags` | `false` | Show number of tags per images on catalog page. This will produce + nb images requests, not recommended on large registries. | +| `ui.catalogElementsLimit` | `1000` | Limit the number of elements in the catalog page. | +| `ui.catalogDefaultExpanded` | `false` | Expand by default all repositories in catalog | +| `ui.catalogMinBranches` | `1` | Set the minimum repository/namespace to expand (e.g. `joxit/docker-registry-ui` `joxit/` is the repository/namespace). Can be 0 to disable branching. | +| `ui.catalogMaxBranches` | `1` | Set the maximum repository/namespace to expand (e.g. `joxit/docker-registry-ui` `joxit/` is the repository/namespace). Can be 0 to disable branching. | +| `ui.deleteImages` | `false` | Allow delete of images | +| `ui.showContentDigest` | `false` | Show content digest in docker tag list. | +| `ui.taglistOrder` | `alpha-asc;num-desc` | Set the default order for the taglist page, could be `num-asc;alpha-asc`, `num-desc;alpha-asc`, `num-asc;alpha-desc`, `num-desc;alpha-desc`, `alpha-asc;num-asc`, `alpha-asc;num-desc`, `alpha-desc;num-asc` or `alpha-desc;num-desc`. | +| `ui.taglistPageSize` | `100` | Set the number of tags to display in one page. | +| `ui.historyCustomLabels` | `[]` | Expose custom labels in history page, custom labels will be processed like maintainer label. | +| `ui.nginxProxyHeaders` | `[]` | Update the default Nginx configuration and **set custom headers** for your backend docker registry. Only when `ui.proxy` is used. Example: nginxProxyHeaders: [ { my-heeader-name: my-header-value } ] | +| `ui.nginxProxyPassHeaders` | `[]` | Update the default Nginx configuration and **forward custom headers** to your backend docker registry. Only when `ui.proxy` is used. Example: nginxProxyPassHeaders: [ my-first-header, my-second-header ] | +| `ui.useControlCacheHeader` | `false` | Add header Control-Cache: no-store, no-cache on requests to registry server. This needs to update your registry configuration with : `Access-Control-Allow-Headers: ['Authorization', 'Accept', 'Cache-Control']` | +| `ui.runAsRoot` | `true` | Use root or nginx user inside the container, when this is false the target port must be greater or equal to 1024. | +| `ui.defaultTheme` | `"auto"` | Select the default theme to apply, values can be `auto`, `dark` and `light` | +| `ui.theme.background` | `""` | Custom background color for the UI | +| `ui.theme.primaryText` | `""` | Custom primary text color for the UI | +| `ui.theme.neutralText` | `""` | Custom netral color for the UI (icons) | +| `ui.theme.accentText` | `""` | Custom accent color for the UI (buttons) | +| `ui.theme.hoverBackground` | `""` | Custom hover background color for the UI | +| `ui.theme.headerBackground` | `""` | Custom header background color for the UI | +| `ui.theme.headerText` | `""` | Custom header text color for the UI | +| `ui.theme.footerBackground` | `""` | Custom footer background color for the UI | +| `ui.theme.footerText` | `""` | Custom footer text color for the UI | +| `ui.theme.footerNeutralText` | `""` | Custom footer neutral color for the UI (links) | +| `ui.image` | `joxit/docker-registry-ui:2.5.2` | The name and tag of the docker image of the interface | +| `ui.imagePullSecrets` | `"-"` | Override default image pull secrets | +| `ui.imagePullPolicy` | `"-"` | Override default pull policy | +| `ui.resources` | `{}` | The resource settings for user interface pod. | +| `ui.nodeSelector` | `{}` | Optional YAML string to specify a nodeSelector config. | +| `ui.tolerations` | `[]` | Optional YAML string to specify tolerations. | +| `ui.affinity` | `{}` | This value defines the [affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) for server pods. | +| `ui.annotations` | `{}` | Annotations to apply to the user interface deployment. | +| `ui.additionalSpec` | `{}` | Optional YAML string that will be appended to the deployment spec. | +| `ui.service.type` | `ClusterIP` | Type of service: `LoadBalancer`, `ClusterIP` or `NodePort`. If using `NodePort` service type, you must set the desired `nodePorts` setting below. | +| `ui.service.port` | `80` | Ports that will be exposed on the service | +| `ui.service.targetPort` | `80` | The port to listhen on the container. If under 1024, the user must be root | +| `ui.service.nodePort` | `null` | If using a `NodePort` service type, you must specify the desired `nodePort` for each exposed port. | +| `ui.service.annotations` | `{}` | Annotations to apply to the user interface service. | +| `ui.service.additionalSpec` | `{}` | Optional YAML string that will be appended to the Service spec. | +| `ui.ingress.enabled` | `false` | Enable the ingress for the user interface. | +| `ui.ingress.host` | `null` | Fully qualified domain name of a network host. | +| `ui.ingress.path` | `/` | Path is matched against the path of an incoming request. | +| `ui.ingress.pathType` | `Prefix` | Determines the interpretation of the Path matching, must be Prefix to serve assets. | +| `ui.ingress.ingressClassName` | `nginx` | The name of an IngressClass cluster resource. | +| `ui.ingress.tls` | `[]` | TLS configuration | +| `ui.ingress.annotations` | `{}` | Annotations to apply to the user interface ingress. | + +### Registry Server + +| Value | Default | Description | +| --- | --- | --- | +| `registry.enabled` | `false` | Enable the registry server. | +| `registry.image` | `registry:2.8.2` | The name and tag of the docker registry server image | +| `registry.imagePullSecrets` | `"-"` | Override default image pull secrets | +| `registry.imagePullPolicy` | `"-"` | Override default pull policy | +| `registry.dataVolume` | `null` | Configuration for the data directory. When null it will create an emptyDir. | +| `registry.resources` | `{}` | The resource settings for registry server pod. | +| `registry.nodeSelector` | `{}` | Optional YAML string to specify a nodeSelector config. | +| `registry.tolerations` | `[]` | Optional YAML string to specify tolerations. | +| `registry.affinity` | `{}` | This value defines the [affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) for server pods. | +| `registry.annotations` | `{}` | Annotations to apply to the registry server deployment. | +| `registry.additionalSpec` | `{}` | Optional YAML string that will be appended to the deployment spec. | +| `registry.extraEnv` | `[]` | Extra Environmental Variables for Registry | +| `registry.auth.basic.enabled` | `false` | Enable basic auth for Registry. | +| `registry.auth.basic.realm` | `Docker registry` | Basic auth realm. | +| `registry.auth.basic.htpasswdPath` | `/etc/docker/registry/auth/htpasswd` | Full path for htpasswd file. Note that filename should match the secret key. | +| `registry.auth.basic.secretName` | `''` | htpasswd secret name volume to mount. | +| `registry.service.type` | `ClusterIP` | Type of service: `LoadBalancer`, `ClusterIP` or `NodePort`. If using `NodePort` service type, you must set the desired `nodePorts` setting below. | +| `registry.service.port` | `5000` | Ports that will be exposed on the service | +| `registry.service.targetPort` | `5000` | The port to listhen on the container. | +| `registry.service.nodePort` | `null` | If using a `NodePort` service type, you must specify the desired `nodePort` for each exposed port. | +| `registry.service.annotations` | `{}` | Annotations to apply to the registry server service. | +| `registry.service.additionalSpec` | `{}` | Optional YAML string that will be appended to the Service spec. | +| `registry.ingress.enabled` | `false` | Enable the ingress for the registry server. | +| `registry.ingress.host` | `null` | Fully qualified domain name of a network host. | +| `registry.ingress.path` | `/v2/` | Path is matched against the path of an incoming request. | +| `registry.ingress.pathType` | `Prefix` | Determines the interpretation of the Path matching, must be Prefix to serve assets. | +| `registry.ingress.ingressClassName` | `nginx` | The name of an IngressClass cluster resource. | +| `registry.ingress.tls` | `[]` | TLS configuration | +| `registry.ingress.annotations` | `{}` | Annotations to apply to the registry server ingress. | diff --git a/helm/vendor/docker-registry-ui/README.tmpl b/helm/vendor/docker-registry-ui/README.tmpl new file mode 100644 index 00000000..e2c1cd18 --- /dev/null +++ b/helm/vendor/docker-registry-ui/README.tmpl @@ -0,0 +1,28 @@ +# {{ prettyName }} Chart + +[![Stars](https://img.shields.io/github/stars/joxit/docker-registry-ui.svg?logo=github&maxAge=86400)](https://github.com/Joxit/docker-registry-ui/stargazers) +[![Pulls](https://img.shields.io/docker/pulls/joxit/docker-registry-ui.svg?maxAge=86400)](https://hub.docker.com/r/joxit/docker-registry-ui) +[![Sponsor](https://joxit.dev/images/sponsor.svg)](https://github.com/sponsors/Joxit) +[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/joxit)](https://artifacthub.io/packages/helm/joxit/docker-registry-ui) + +## Overview + +This project aims to provide a simple and complete user interface for your private docker registry. You can customize the interface with various options. The major option is `ui.singleRegistry` which allows you to disable the dynamic selection of docker registeries. + +If you like my work and want to support it, don't hesitate to [sponsor me](https://github.com/sponsors/Joxit). + +## [Project Page](https://joxit.dev/docker-registry-ui), [Live Demo](https://joxit.dev/docker-registry-ui/demo/), [Examples](https://github.com/Joxit/docker-registry-ui/tree/main/examples), [Helm Chart](https://helm.joxit.dev/charts/docker-registry-ui/) + +![preview](https://raw.github.com/Joxit/docker-registry-ui/main/docker-registry-ui.gif "Preview of Docker Registry UI") + +## Prerequisites + +{{ prerequisites }} + +## Usage + +{{ usage }} + +## Configuration + +{{ configuration }} diff --git a/helm/vendor/docker-registry-ui/templates/NOTES.txt b/helm/vendor/docker-registry-ui/templates/NOTES.txt new file mode 100644 index 00000000..9b179e38 --- /dev/null +++ b/helm/vendor/docker-registry-ui/templates/NOTES.txt @@ -0,0 +1,8 @@ +Thank you for installing Joxit's Docker Registry UI! + +Your release is named {{ .Release.Name }}. + +To learn more about the release, run: + + $ helm status {{ .Release.Name }} {{- if .Release.Namespace }} --namespace {{ .Release.Namespace }}{{ end }} + $ helm get all {{ .Release.Name }} {{- if .Release.Namespace }} --namespace {{ .Release.Namespace }}{{ end }} diff --git a/helm/vendor/docker-registry-ui/templates/_helpers.tpl b/helm/vendor/docker-registry-ui/templates/_helpers.tpl new file mode 100644 index 00000000..4fd7015b --- /dev/null +++ b/helm/vendor/docker-registry-ui/templates/_helpers.tpl @@ -0,0 +1,43 @@ +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to +this (by the DNS naming spec). Supports the legacy fullnameOverride setting +as well as the global.name setting. +*/}} +{{- define "docker-registry-ui.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.global.name -}} +{{- .Values.global.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "docker-registry-ui.chart" -}} +{{- printf "%s-helm" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "docker-registry-ui.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels. +*/}} +{{- define "docker-registry-ui.labels" -}} +app.kubernetes.io/name: {{ include "docker-registry-ui.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +helm.sh/chart: {{ include "docker-registry-ui.chart" . }} +{{- end -}} \ No newline at end of file diff --git a/helm/vendor/docker-registry-ui/templates/registry-deployment.yaml b/helm/vendor/docker-registry-ui/templates/registry-deployment.yaml new file mode 100644 index 00000000..9c5f31e9 --- /dev/null +++ b/helm/vendor/docker-registry-ui/templates/registry-deployment.yaml @@ -0,0 +1,103 @@ +{{- if .Values.registry.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "docker-registry-ui.fullname" . }}-registry-server + labels: + app.kubernetes.io/component : registry-server + {{- include "docker-registry-ui.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.registry.replicas }} + selector: + matchLabels: + app.kubernetes.io/component : registry-server + {{- include "docker-registry-ui.labels" . | nindent 6 }} + template: + metadata: + labels: + app.kubernetes.io/component : registry-server + {{- include "docker-registry-ui.labels" . | nindent 8 }} + {{- if .Values.registry.annotations }} + annotations: + {{- toYaml .Values.registry.annotations | nindent 8 }} + {{- end }} + spec: + {{- if ne (.Values.registry.imagePullSecrets | toString) "-" }} + imagePullSecrets: + {{- toYaml .Values.registry.imagePullSecrets | nindent 8 }} + {{- else }} + imagePullSecrets: + {{- toYaml .Values.global.imagePullSecrets | nindent 8 }} + {{- end}} + containers: + - name: "registry-server" + image: {{ .Values.registry.image | quote }} + imagePullPolicy: {{ if ne (.Values.registry.imagePullPolicy | toString) "-" }}{{ .Values.registry.imagePullPolicy }}{{ else }}{{ .Values.global.imagePullPolicy }}{{ end }} + env: + - name: REGISTRY_HTTP_ADDR + value: {{ printf "%s:%d" "0.0.0.0" (.Values.registry.service.targetPort | int) }} + {{- if .Values.ui.deleteImages }} + - name: REGISTRY_STORAGE_DELETE_ENABLED + value: 'true' + {{- end }} + {{- if .Values.registry.auth.basic.enabled }} + - name: REGISTRY_AUTH + value: htpasswd + - name: REGISTRY_AUTH_HTPASSWD_REALM + value: {{ if ne (.Values.registry.auth.basic.realm | toString) "-" }}{{ .Values.registry.auth.basic.realm }}{{ else }}{{ "Docker registry" }}{{ end }} + - name: REGISTRY_AUTH_HTPASSWD_PATH + value: {{ if ne (.Values.registry.auth.basic.htpasswdPath | toString) "-" }}{{ .Values.registry.auth.basic.htpasswdPath }}{{ else }}{{ "/etc/docker/registry/auth/htpasswd" }}{{ end }} + {{- end }} + {{- range .Values.registry.extraEnv }} + - name: {{ .name | quote }} + value: {{ .value | quote }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.registry.service.targetPort }} + protocol: TCP + volumeMounts: + - mountPath: /var/lib/registry + name: data + {{- if .Values.registry.auth.basic.enabled }} + - name: htpasswd + mountPath: {{ if ne (.Values.registry.auth.basic.htpasswdPath | toString) "-" }}{{ dir .Values.registry.auth.basic.htpasswdPath }}{{ else }}{{ "/etc/docker/registry/auth" }}{{ end }} + readOnly: true + {{- end }} + resources: + {{- toYaml .Values.registry.resources | nindent 12 }} + volumes: + - name: data + {{- if .Values.registry.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ include "docker-registry-ui.fullname" . }}-registry-data + {{- else if .Values.registry.dataVolume }} + {{- toYaml .Values.registry.dataVolume | nindent 10 }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.registry.auth.basic.enabled }} + - name: htpasswd + secret: + secretName: {{ if .Values.registry.auth.basic.secretName }}{{ .Values.registry.auth.basic.secretName }}{{ else }}{{ fail "Basic auth secret name is required" }}{{ end }} + {{- end }} + {{- with .Values.registry.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.registry.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.registry.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if not .Values.registry.runAsRoot }} + securityContext: + runAsUser: 101 + {{- end }} + {{- if .Values.registry.additionalSpec }} + {{ tpl .Values.registry.additionalSpec . | nindent 6 | trim }} + {{- end }} +{{- end }} diff --git a/helm/vendor/docker-registry-ui/templates/registry-ingress.yaml b/helm/vendor/docker-registry-ui/templates/registry-ingress.yaml new file mode 100644 index 00000000..ec7b6bce --- /dev/null +++ b/helm/vendor/docker-registry-ui/templates/registry-ingress.yaml @@ -0,0 +1,38 @@ +{{- if .Values.registry.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "docker-registry-ui.fullname" . }}-registry-server + labels: + app.kubernetes.io/component : registry-server + {{- include "docker-registry-ui.labels" . | nindent 4 }} + {{- with .Values.registry.ingress.annotations }} + annotations: + {{- tpl (toYaml .) $ | nindent 4 }} + {{- end }} +spec: + {{- if .Values.registry.ingress.ingressClassName }} + ingressClassName: {{ .Values.registry.ingress.ingressClassName }} + {{- end -}} +{{- if .Values.registry.ingress.tls }} + tls: +{{ tpl (toYaml .Values.registry.ingress.tls) $ | indent 4 }} +{{- end }} + rules: + - http: + paths: + - backend: + service: + name: {{ include "docker-registry-ui.fullname" . }}-registry-server + port: + number: {{ .Values.registry.service.port }} + {{- if .Values.registry.ingress.path }} + path: {{ .Values.registry.ingress.path }} + {{- end }} + {{- if .Values.registry.ingress.pathType }} + pathType: {{ .Values.registry.ingress.pathType }} + {{- end }} + {{- if .Values.registry.ingress.host }} + host: {{ tpl (.Values.registry.ingress.host) $ | quote }} + {{- end -}} +{{- end }} diff --git a/helm/vendor/docker-registry-ui/templates/registry-pvc.yaml b/helm/vendor/docker-registry-ui/templates/registry-pvc.yaml new file mode 100644 index 00000000..c511d41c --- /dev/null +++ b/helm/vendor/docker-registry-ui/templates/registry-pvc.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.registry.enabled .Values.registry.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "docker-registry-ui.fullname" . }}-registry-data + labels: + app.kubernetes.io/component: registry-server + {{- include "docker-registry-ui.labels" . | nindent 4 }} + {{- with .Values.registry.persistence.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.registry.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.registry.persistence.size | quote }} + {{- if .Values.registry.persistence.storageClass }} + {{- if (eq "-" .Values.registry.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: {{ .Values.registry.persistence.storageClass | quote }} + {{- end }} + {{- end }} + {{- if .Values.registry.persistence.selector }} + selector: + {{- toYaml .Values.registry.persistence.selector | nindent 4 }} + {{- end }} +{{- end }} diff --git a/helm/vendor/docker-registry-ui/templates/registry-service.yaml b/helm/vendor/docker-registry-ui/templates/registry-service.yaml new file mode 100644 index 00000000..5da15269 --- /dev/null +++ b/helm/vendor/docker-registry-ui/templates/registry-service.yaml @@ -0,0 +1,29 @@ +{{- if .Values.registry.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "docker-registry-ui.fullname" . }}-registry-server + labels: + app.kubernetes.io/component : registry-server + {{- include "docker-registry-ui.labels" . | nindent 4 }} + {{- with .Values.registry.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + app.kubernetes.io/component : registry-server + {{- include "docker-registry-ui.labels" . | nindent 4 }} + type: {{ .Values.registry.service.type }} + ports: + - port: {{ .Values.registry.service.port }} + targetPort: {{ .Values.registry.service.targetPort }} + protocol: TCP + name: http + {{- if (and (eq .Values.registry.service.type "NodePort") .Values.registry.service.nodePort) }} + nodePort: {{ .Values.registry.service.nodePort }} + {{- end }} + {{- if .Values.registry.service.additionalSpec }} + {{ tpl .Values.registry.service.additionalSpec . | nindent 2 | trim }} + {{- end }} +{{- end }} diff --git a/helm/vendor/docker-registry-ui/templates/ui-deployment.yaml b/helm/vendor/docker-registry-ui/templates/ui-deployment.yaml new file mode 100644 index 00000000..77cdcae8 --- /dev/null +++ b/helm/vendor/docker-registry-ui/templates/ui-deployment.yaml @@ -0,0 +1,139 @@ +{{- if and (not .Values.ui.runAsRoot) (lt (.Values.ui.service.targetPort | int) 1024) }} +{{ fail "When `ui.runAsRoot` is false `ui.service.targetPort` must be less than 1024." }} +{{- end }} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "docker-registry-ui.fullname" . }}-user-interface + labels: + app.kubernetes.io/component : user-interface + {{- include "docker-registry-ui.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.ui.replicas }} + selector: + matchLabels: + app.kubernetes.io/component : user-interface + {{- include "docker-registry-ui.labels" . | nindent 6 }} + template: + metadata: + labels: + app.kubernetes.io/component : user-interface + {{- include "docker-registry-ui.labels" . | nindent 8 }} + {{- if .Values.ui.annotations }} + annotations: + {{- toYaml .Values.ui.annotations | nindent 8 }} + {{- end }} + spec: + {{- if ne (.Values.ui.imagePullSecrets | toString) "-" }} + imagePullSecrets: + {{- toYaml .Values.ui.imagePullSecrets | nindent 8 }} + {{- else }} + imagePullSecrets: + {{- toYaml .Values.global.imagePullSecrets | nindent 8 }} + {{- end}} + containers: + - name: "registry-ui" + image: {{ .Values.ui.image | quote }} + imagePullPolicy: {{ if ne (.Values.ui.imagePullPolicy | toString) "-" }}{{ .Values.ui.imagePullPolicy }}{{ else }}{{ .Values.global.imagePullPolicy }}{{ end }} + env: + - name: REGISTRY_TITLE + value: {{ .Values.ui.title | quote }} + - name: DELETE_IMAGES + value: {{ .Values.ui.deleteImages | quote }} + {{- if .Values.ui.proxy }} + {{- if tpl (.Values.ui.dockerRegistryUrl) $ }} + - name: NGINX_PROXY_PASS_URL + value: {{ tpl (.Values.ui.dockerRegistryUrl) $ | quote }} + {{- else if .Values.registry.enabled }} + - name: NGINX_PROXY_PASS_URL + value: {{ printf "http://%s-registry-server:%d" (include "docker-registry-ui.fullname" .) (.Values.registry.service.port | int) }} + {{- end }} + {{- range $header := .Values.ui.nginxProxyHeaders }} + {{- range $key, $value := $header }} + - name: {{ printf "NGINX_PROXY_HEADER_%s" $key }} + value: {{ $value }} + {{- end }} + {{- end }} + {{- range $header := .Values.ui.nginxProxyPassHeaders }} + - name: {{ printf "NGINX_PROXY_PASS_HEADER_%s" $header }} + {{- end }} + {{- else }} + - name: REGISTRY_URL + value: {{ tpl (.Values.ui.dockerRegistryUrl) $ | quote }} + {{- end }} + - name: PULL_URL + value: {{ .Values.ui.pullUrl | quote }} + - name: SHOW_CATALOG_NB_TAGS + value: {{ .Values.ui.showCatalogNbTags | quote }} + - name: SHOW_CONTENT_DIGEST + value: {{ .Values.ui.showContentDigest | quote }} + - name: SINGLE_REGISTRY + value: {{ .Values.ui.singleRegistry | quote }} + - name: CATALOG_ELEMENTS_LIMIT + value: {{ .Values.ui.catalogElementsLimit | quote }} + - name: HISTORY_CUSTOM_LABELS + value: {{ .Values.ui.historyCustomLabels | join "," }} + - name: NGINX_LISTEN_PORT + value: {{ .Values.ui.service.targetPort | quote }} + - name: USE_CONTROL_CACHE_HEADER + value: {{ .Values.ui.useControlCacheHeader | quote }} + - name: TAGLIST_ORDER + value: {{ .Values.ui.taglistOrder | quote }} + - name: CATALOG_DEFAULT_EXPANDED + value: {{ .Values.ui.catalogDefaultExpanded | quote }} + - name: CATALOG_MIN_BRANCHES + value: {{ .Values.ui.catalogMinBranches | quote }} + - name: CATALOG_MAX_BRANCHES + value: {{ .Values.ui.catalogMaxBranches | quote }} + - name: TAGLIST_PAGE_SIZE + value: {{ .Values.ui.taglistPageSize | quote }} + - name: REGISTRY_SECURED + value: {{ .Values.ui.registrySecured | quote }} + - name: THEME + value: {{ .Values.ui.defaultTheme | quote }} + - name: THEME_PRIMARY_TEXT + value: {{ .Values.ui.theme.primaryText | quote }} + - name: THEME_NEUTRAL_TEXT + value: {{ .Values.ui.theme.neutralText | quote }} + - name: THEME_BACKGROUND + value: {{ .Values.ui.theme.background | quote }} + - name: THEME_HOVER_BACKGROUND + value: {{ .Values.ui.theme.hoverBackground | quote }} + - name: THEME_ACCENT_TEXT + value: {{ .Values.ui.theme.accentText | quote }} + - name: THEME_HEADER_TEXT + value: {{ .Values.ui.theme.headerText | quote }} + - name: THEME_HEADER_BACKGROUND + value: {{ .Values.ui.theme.headerBackground | quote }} + - name: THEME_FOOTER_TEXT + value: {{ .Values.ui.theme.footerText | quote }} + - name: THEME_FOOTER_NEUTRAL_TEXT + value: {{ .Values.ui.theme.footerNeutralText | quote }} + - name: THEME_FOOTER_BACKGROUND + value: {{ .Values.ui.theme.footerBackground | quote }} + ports: + - name: http + containerPort: {{ .Values.ui.service.targetPort }} + protocol: TCP + resources: + {{- toYaml .Values.ui.resources | nindent 12 }} + {{- with .Values.ui.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ui.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ui.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if not .Values.ui.runAsRoot }} + securityContext: + runAsUser: 101 + {{- end }} + {{- if .Values.ui.additionalSpec }} + {{ tpl .Values.ui.additionalSpec . | nindent 6 | trim }} + {{- end }} diff --git a/helm/vendor/docker-registry-ui/templates/ui-ingress.yaml b/helm/vendor/docker-registry-ui/templates/ui-ingress.yaml new file mode 100644 index 00000000..b5411d5d --- /dev/null +++ b/helm/vendor/docker-registry-ui/templates/ui-ingress.yaml @@ -0,0 +1,38 @@ +{{- if .Values.ui.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "docker-registry-ui.fullname" . }}-user-interface + labels: + app.kubernetes.io/component : user-interface + {{- include "docker-registry-ui.labels" . | nindent 4 }} + {{- with .Values.ui.ingress.annotations }} + annotations: + {{- tpl (toYaml .) $ | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ui.ingress.ingressClassName }} + ingressClassName: {{ .Values.ui.ingress.ingressClassName }} + {{- end -}} +{{- if .Values.ui.ingress.tls }} + tls: +{{ tpl (toYaml .Values.ui.ingress.tls) $ | indent 4 }} +{{- end }} + rules: + - http: + paths: + - backend: + service: + name: {{ include "docker-registry-ui.fullname" . }}-user-interface + port: + number: {{ .Values.ui.service.port }} + {{- if .Values.ui.ingress.path }} + path: {{ .Values.ui.ingress.path }} + {{- end }} + {{- if .Values.ui.ingress.pathType }} + pathType: {{ .Values.ui.ingress.pathType }} + {{- end }} + {{- if .Values.ui.ingress.host }} + host: {{ tpl (.Values.ui.ingress.host) $ | quote }} + {{- end -}} +{{- end }} diff --git a/helm/vendor/docker-registry-ui/templates/ui-service.yaml b/helm/vendor/docker-registry-ui/templates/ui-service.yaml new file mode 100644 index 00000000..70319031 --- /dev/null +++ b/helm/vendor/docker-registry-ui/templates/ui-service.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "docker-registry-ui.fullname" . }}-user-interface + labels: + app.kubernetes.io/component : user-interface + {{- include "docker-registry-ui.labels" . | nindent 4 }} + {{- with .Values.ui.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + app.kubernetes.io/component : user-interface + {{- include "docker-registry-ui.labels" . | nindent 4 }} + type: {{ .Values.ui.service.type }} + ports: + - port: {{ .Values.ui.service.port }} + targetPort: {{ .Values.ui.service.targetPort }} + protocol: TCP + name: http + {{- if (and (eq .Values.ui.service.type "NodePort") .Values.ui.service.nodePort) }} + nodePort: {{ .Values.ui.service.nodePort }} + {{- end }} + {{- if .Values.ui.service.additionalSpec }} + {{ tpl .Values.ui.service.additionalSpec . | nindent 2 | trim }} + {{- end }} diff --git a/helm/vendor/docker-registry-ui/values.yaml b/helm/vendor/docker-registry-ui/values.yaml new file mode 100644 index 00000000..33030284 --- /dev/null +++ b/helm/vendor/docker-registry-ui/values.yaml @@ -0,0 +1,233 @@ +## Global +global: + # Set the prefix used for all resources in the Helm chart. If not set, + # the prefix will be ``. + name: null + # The default array of objects containing image pull secret names that will be applied. + imagePullSecrets: [] + # The default image policy for images: `IfNotPresent`, `Always`, `Never` + imagePullPolicy: IfNotPresent + +## User Interface +ui: + # Number of replicas for the Deployment. + replicas: 1 + # Title of the registry + title: "Docker registry UI" + # UI behave as a proxy of the registry + proxy: false + # The URL of your docker registry, may be a service (when proxy is on) or an external URL. + dockerRegistryUrl: null + # Override the pull URL + pullUrl: null + # Remove the menu that show the dialogs to add, remove and change the endpoint of your docker registry. + singleRegistry: true + # By default, the UI will check on every requests if your registry is secured or not (you will see `401` responses in your console). Set to `true` if your registry uses Basic Authentication and divide by two the number of call to your registry. + registrySecured: false + + # Show number of tags per images on catalog page. This will produce + nb images requests, not recommended on large registries. + showCatalogNbTags: false + # Limit the number of elements in the catalog page. + catalogElementsLimit: 1000 + # Expand by default all repositories in catalog + catalogDefaultExpanded: false + # Set the minimum repository/namespace to expand (e.g. `joxit/docker-registry-ui` `joxit/` is the repository/namespace). Can be 0 to disable branching. + catalogMinBranches: 1 + # Set the maximum repository/namespace to expand (e.g. `joxit/docker-registry-ui` `joxit/` is the repository/namespace). Can be 0 to disable branching. + catalogMaxBranches: 1 + + # Allow delete of images + deleteImages: true + # Show content digest in docker tag list. + showContentDigest: false + # Set the default order for the taglist page, could be `num-asc;alpha-asc`, `num-desc;alpha-asc`, `num-asc;alpha-desc`, `num-desc;alpha-desc`, `alpha-asc;num-asc`, `alpha-asc;num-desc`, `alpha-desc;num-asc` or `alpha-desc;num-desc`. + taglistOrder: alpha-asc;num-desc + # Set the number of tags to display in one page. + taglistPageSize: 100 + + # Expose custom labels in history page, custom labels will be processed like maintainer label. + historyCustomLabels: [] + + # Update the default Nginx configuration and **set custom headers** for your backend docker registry. Only when `ui.proxy` is used. + # Example: + # nginxProxyHeaders: + # [ { my-heeader-name: my-header-value } ] + nginxProxyHeaders: [] + # Update the default Nginx configuration and **forward custom headers** to your backend docker registry. Only when `ui.proxy` is used. + # Example: + # nginxProxyPassHeaders: [ my-first-header, my-second-header ] + nginxProxyPassHeaders: [] + # Add header Control-Cache: no-store, no-cache on requests to registry server. + # This needs to update your registry configuration with : `Access-Control-Allow-Headers: ['Authorization', 'Accept', 'Cache-Control']` + useControlCacheHeader: false + # Use root or nginx user inside the container, when this is false the target port must be greater or equal to 1024. + runAsRoot: false + + # Select the default theme to apply, values can be `auto`, `dark` and `light` + defaultTheme: "auto" + + theme: + # Custom background color for the UI + background: "" + # Custom primary text color for the UI + primaryText: "" + # Custom netral color for the UI (icons) + neutralText: "" + # Custom accent color for the UI (buttons) + accentText: "" + # Custom hover background color for the UI + hoverBackground: "" + # Custom header background color for the UI + headerBackground: "" + # Custom header text color for the UI + headerText: "" + # Custom footer background color for the UI + footerBackground: "" + # Custom footer text color for the UI + footerText: "" + # Custom footer neutral color for the UI (links) + footerNeutralText: "" + + # The name and tag of the docker image of the interface + image: joxit/docker-registry-ui:2.5.2 + # Override default image pull secrets + imagePullSecrets: "-" + # Override default pull policy + imagePullPolicy: "-" + # The resource settings for user interface pod. + resources: {} + # Optional YAML string to specify a nodeSelector config. + nodeSelector: {} + # Optional YAML string to specify tolerations. + tolerations: [] + # This value defines the [affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) + # for server pods. + affinity: {} + # Annotations to apply to the user interface deployment. + annotations: {} + # Optional YAML string that will be appended to the deployment spec. + additionalSpec: {} + + service: + # Type of service: `LoadBalancer`, `ClusterIP` or `NodePort`. If using `NodePort` service + # type, you must set the desired `nodePorts` setting below. + type: ClusterIP + # Ports that will be exposed on the service + port: 80 + # The port to listhen on the container. If under 1024, the user must be root + targetPort: 3000 + # If using a `NodePort` service type, you must specify the desired `nodePort` for each exposed port. + nodePort: null + # Annotations to apply to the user interface service. + annotations: {} + # Optional YAML string that will be appended to the Service spec. + additionalSpec: {} + + ingress: + # Enable the ingress for the user interface. + enabled: false + # Fully qualified domain name of a network host. + host: null + # Path is matched against the path of an incoming request. + path: / + # Determines the interpretation of the Path matching, must be Prefix to serve assets. + pathType: Prefix + # The name of an IngressClass cluster resource. + ingressClassName: nginx + # TLS configuration + tls: [] + # Annotations to apply to the user interface ingress. + annotations: {} + # If you want a custom path, you can try this example: + # path: /ui(/|$)(.*) + # annotations: + # { nginx.ingress.kubernetes.io/rewrite-target: /$2 } + +## Registry Server +registry: + # Enable the registry server. + enabled: false + # The name and tag of the docker registry server image + image: registry:2.8.2 + # Override default image pull secrets + imagePullSecrets: "-" + # Override default pull policy + imagePullPolicy: "-" + # Configuration for the data directory. When null it will create an emptyDir. + dataVolume: null + # Persistence configuration for registry data + persistence: + # Enable persistence for registry data + enabled: false + # Storage class to use for the PVC. Use "-" to disable dynamic provisioning + storageClass: "" + # Access modes for the PVC + accessModes: + - ReadWriteOnce + # Size of the persistent volume + size: 10Gi + # Annotations for the PVC + annotations: {} + # Selector for existing PV (optional) + selector: {} + # The resource settings for registry server pod. + resources: {} + # Optional YAML string to specify a nodeSelector config. + nodeSelector: {} + # Optional YAML string to specify tolerations. + tolerations: [] + # This value defines the [affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) + # for server pods. + affinity: {} + # Annotations to apply to the registry server deployment. + annotations: {} + # Optional YAML string that will be appended to the deployment spec. + additionalSpec: {} + # Extra Environmental Variables for Registry + extraEnv: [] + + auth: + basic: + # Enable basic auth for Registry. + enabled: true + # Basic auth realm. + realm: Docker registry + # Full path for htpasswd file. Note that filename should match the secret key. + htpasswdPath: /etc/docker/registry/auth/htpasswd + # htpasswd secret name volume to mount. + secretName: '' + + service: + # Type of service: `LoadBalancer`, `ClusterIP` or `NodePort`. If using `NodePort` service + # type, you must set the desired `nodePorts` setting below. + type: ClusterIP + # Ports that will be exposed on the service + port: 5000 + # The port to listhen on the container. + targetPort: 5000 + # If using a `NodePort` service type, you must specify the desired `nodePort` for each exposed port. + nodePort: null + # Annotations to apply to the registry server service. + annotations: {} + # Optional YAML string that will be appended to the Service spec. + additionalSpec: {} + + ingress: + # Enable the ingress for the registry server. + enabled: true + # Fully qualified domain name of a network host. + host: null + # Path is matched against the path of an incoming request. + path: /v2/ + # Determines the interpretation of the Path matching, must be Prefix to serve assets. + pathType: Prefix + # The name of an IngressClass cluster resource. + ingressClassName: nginx + # TLS configuration + tls: [] + # Annotations to apply to the registry server ingress. + annotations: {} + # If you want a custom path, you can try this example: + # path: /api(/|$)(.*) + # annotations: + # { nginx.ingress.kubernetes.io/rewrite-target: /$2 } diff --git a/helm/vendor/postgresql/.helmignore b/helm/vendor/postgresql/.helmignore new file mode 100644 index 00000000..207983f3 --- /dev/null +++ b/helm/vendor/postgresql/.helmignore @@ -0,0 +1,25 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# img folder +img/ +# Changelog +CHANGELOG.md diff --git a/helm/vendor/postgresql/Chart.lock b/helm/vendor/postgresql/Chart.lock new file mode 100644 index 00000000..2f96f226 --- /dev/null +++ b/helm/vendor/postgresql/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + version: 2.31.4 +digest: sha256:fc442e77200e1914dd46fe26490dcf62f44caa51db673c2f8e67d5319cd4c163 +generated: "2025-08-14T15:30:01.842897577Z" diff --git a/helm/vendor/postgresql/Chart.yaml b/helm/vendor/postgresql/Chart.yaml new file mode 100644 index 00000000..39d7f7a1 --- /dev/null +++ b/helm/vendor/postgresql/Chart.yaml @@ -0,0 +1,40 @@ +# Copyright Broadcom, Inc. All Rights Reserved. +# SPDX-License-Identifier: APACHE-2.0 + +annotations: + images: | + - name: os-shell + image: docker.io/bitnami/os-shell:12-debian-12-r51 + - name: postgres-exporter + image: docker.io/bitnami/postgres-exporter:0.17.1-debian-12-r16 + - name: postgresql + image: docker.io/bitnami/postgresql:latest + licenses: Apache-2.0 + tanzuCategory: service +apiVersion: v2 +appVersion: 17.6.0 +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + tags: + - bitnami-common + version: 2.x.x +description: PostgreSQL (Postgres) is an open source object-relational database known + for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, + views, triggers and stored procedures. +home: https://bitnami.com +icon: https://dyltqmyl993wv.cloudfront.net/assets/stacks/postgresql/img/postgresql-stack-220x234.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- name: Broadcom, Inc. All Rights Reserved. + url: https://github.com/bitnami/charts +name: postgresql +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/postgresql +version: 17.0.1 diff --git a/helm/vendor/postgresql/README.md b/helm/vendor/postgresql/README.md new file mode 100644 index 00000000..045e4100 --- /dev/null +++ b/helm/vendor/postgresql/README.md @@ -0,0 +1,1154 @@ + + +# Bitnami package for PostgreSQL + +PostgreSQL (Postgres) is an open source object-relational database known for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, views, triggers and stored procedures. + +[Overview of PostgreSQL](http://www.postgresql.org) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/postgresql +``` + +> Tip: Did you know that this app is also available as a Kubernetes App on the Azure Marketplace? Kubernetes Apps are the easiest way to deploy Bitnami on AKS. Click [here](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/bitnami.postgresql-cnab) to see the listing on Azure Marketplace. + +Looking to use PostgreSQL in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog. + +## ⚠️ Important Notice: Upcoming changes to the Bitnami Catalog + +Beginning August 28th, 2025, Bitnami will evolve its public catalog to offer a curated set of hardened, security-focused images under the new [Bitnami Secure Images initiative](https://news.broadcom.com/app-dev/broadcom-introduces-bitnami-secure-images-for-production-ready-containerized-applications). As part of this transition: + +- Granting community users access for the first time to security-optimized versions of popular container images. +- Bitnami will begin deprecating support for non-hardened, Debian-based software images in its free tier and will gradually remove non-latest tags from the public catalog. As a result, community users will have access to a reduced number of hardened images. These images are published only under the “latest” tag and are intended for development purposes +- Starting August 28th, over two weeks, all existing container images, including older or versioned tags (e.g., 2.50.0, 10.6), will be migrated from the public catalog (docker.io/bitnami) to the “Bitnami Legacy” repository (docker.io/bitnamilegacy), where they will no longer receive updates. +- For production workloads and long-term support, users are encouraged to adopt Bitnami Secure Images, which include hardened containers, smaller attack surfaces, CVE transparency (via VEX/KEV), SBOMs, and enterprise support. + +These changes aim to improve the security posture of all Bitnami users by promoting best practices for software supply chain integrity and up-to-date deployments. For more details, visit the [Bitnami Secure Images announcement](https://github.com/bitnami/containers/issues/83267). + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/main/bitnami/postgresql-ha) + +## Prerequisites + +- Kubernetes 1.23+ +- Helm 3.8.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/postgresql +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Configuration and installation details + +### Resource requests and limits + +Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case. + +To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcesPreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### Prometheus metrics + +This chart can be integrated with Prometheus by setting `metrics.enabled` to `true`. This will deploy a sidecar container with [postgres_exporter](https://github.com/prometheus-community/postgres_exporter) in all pods. It will also create `metrics` services that can be configured under the `metrics.service` section. These services will be have the necessary annotations to be automatically scraped by Prometheus. + +#### Prometheus requirements + +It is necessary to have a working installation of Prometheus or Prometheus Operator for the integration to work. Install the [Bitnami Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/prometheus) or the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) to easily have a working Prometheus in your cluster. + +#### Integration with Prometheus Operator + +The chart can deploy `ServiceMonitor` objects for integration with Prometheus Operator installations. To do so, set the value `metrics.serviceMonitor.enabled=true`. Ensure that the Prometheus Operator `CustomResourceDefinitions` are installed in the cluster or it will fail with the following error: + +```text +no matches for kind "ServiceMonitor" in version "monitoring.coreos.com/v1" +``` + +Install the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) for having the necessary CRDs and the Prometheus Operator. + +### [Rolling VS Immutable tags](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Customizing primary and read replica services in a replicated configuration + +At the top level, there is a service object which defines the services for both primary and readReplicas. For deeper customization, there are service objects for both the primary and read types individually. This allows you to override the values in the top level service object so that the primary and read can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the primary and read to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the primary.service or readReplicas.service objects will take precedence over the top level service object. + +### Use a different PostgreSQL version + +To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. + +### LDAP + +LDAP support can be enabled in the chart by specifying the `ldap.` parameters while creating a release. The following parameters should be configured to properly enable the LDAP support in the chart. + +- **ldap.enabled**: Enable LDAP support. Defaults to `false`. +- **ldap.uri**: LDAP URL beginning in the form `ldap[s]://:`. No defaults. +- **ldap.base**: LDAP base DN. No defaults. +- **ldap.binddn**: LDAP bind DN. No defaults. +- **ldap.bindpw**: LDAP bind password. No defaults. +- **ldap.bslookup**: LDAP base lookup. No defaults. +- **ldap.nss_initgroups_ignoreusers**: LDAP ignored users. `root,nslcd`. +- **ldap.scope**: LDAP search scope. No defaults. +- **ldap.tls_reqcert**: LDAP TLS check on server certificates. No defaults. + +For example: + +```text +ldap.enabled="true" +ldap.uri="ldap://my_ldap_server" +ldap.base="dc=example\,dc=org" +ldap.binddn="cn=admin\,dc=example\,dc=org" +ldap.bindpw="admin" +ldap.bslookup="ou=group-ok\,dc=example\,dc=org" +ldap.nss_initgroups_ignoreusers="root\,nslcd" +ldap.scope="sub" +ldap.tls_reqcert="demand" +``` + +Next, login to the PostgreSQL server using the `psql` client and add the PAM authenticated LDAP users. + +> Note: Parameters including commas must be escaped as shown in the above example. + +### Update credentials + +Bitnami charts, with its default settings, configure credentials at first boot. Any further change in the secrets or credentials can be done using one of the following methods: + +### Manual update of the passwords and secrets + +- Update the user password following [the upstream documentation](https://www.postgresql.org/docs/current/sql-alteruser.html) +- Update the password secret with the new values (replace the SECRET_NAME, PASSWORD and POSTGRES_PASSWORD placeholders) + +```shell +kubectl create secret generic SECRET_NAME --from-literal=password=PASSWORD --from-literal=postgres-password=POSTGRES_PASSWORD --dry-run -o yaml | kubectl apply -f - +``` + +### Automated update using a password update job + +The Bitnami PostgreSQL provides a password update job that will automatically change the PostgreSQL passwords when running helm upgrade. To enable the job set `passwordUpdateJob.enabled=true`. This job requires: + +- The new passwords: this is configured using either `auth.postgresPassword`, `auth.password` and `auth.replicationPassword` (if applicable) or setting `auth.existingSecret`. +- The previous passwords: This value is taken automatically from already deployed secret object. If you are using `auth.existingSecret` or `helm template` instead of `helm upgrade`, then set either `passwordUpdateJob.previousPasswords.postgresPassword`, `passwordUpdateJob.previousPasswords.password`, `passwordUpdateJob.previousPasswords.replicationPassword` (when applicable), or setting `passwordUpdateJob.previousPasswords.existingSecret`. + +In the following example we update the password via values.yaml in a PostgreSQL installation with replication + +```yaml +architecture: "replication" + +auth: + user: "user" + postgresPassword: "newPostgresPassword123" + password: "newUserPassword123" + replicationPassword: "newReplicationPassword123" + +passwordUpdateJob: + enabled: true +``` + +In this example we use two existing secrets (`new-password-secret` and `previous-password-secret`) to update the passwords: + +```yaml +auth: + existingSecret: new-password-secret + +passwordUpdateJob: + enabled: true + previousPasswords: + existingSecret: previous-password-secret +``` + +You can add extra update commands using the `passwordUpdateJob.extraCommands` value. + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the PostgreSQL configuration file. You can add additional PostgreSQL configuration parameters using the `primary.extendedConfiguration`/`readReplicas.extendedConfiguration` parameters as a string. Alternatively, to replace the entire default configuration use `primary.configuration`. + +You can also add a custom pg_hba.conf using the `primary.pgHbaConfiguration` parameter. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `primary.existingConfigmap` parameter. Note that this will override the two previous options. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, you can specify custom scripts using the `primary.initdb.scripts` parameter as a string. + +In addition, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `primary.initdb.scriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `primary.initdb.scriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +- First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +- Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `containerSecurityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL primary +primary: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +readReplicas: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +```text + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +```text +postgresql.auth.username=testuser +subchart1.postgresql.auth.username=testuser +subchart2.postgresql.auth.username=testuser +postgresql.auth.password=testpass +subchart1.postgresql.auth.password=testpass +subchart2.postgresql.auth.password=testpass +postgresql.auth.database=testdb +subchart1.postgresql.auth.database=testdb +subchart2.postgresql.auth.database=testdb +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +```text +global.postgresql.auth.username=testuser +global.postgresql.auth.password=testpass +global.postgresql.auth.database=testdb +``` + +This way, the credentials will be available in all of the subcharts. + +### Backup and restore + +To back up and restore Bitnami PostgreSQL Helm chart deployments on Kubernetes, you need to back up the persistent volumes from the source deployment and attach them to a new deployment using [Velero](https://velero.io/), a Kubernetes backup/restore tool. + +These are the steps you will usually follow to back up and restore your PostgreSQL cluster data: + +- Install Velero on the source and destination clusters. +- Use Velero to back up the PersistentVolumes (PVs) used by the deployment on the source cluster. +- Use Velero to restore the backed-up PVs on the destination cluster. +- Create a new deployment on the destination cluster with the same chart, deployment name, credentials and other parameters as the original. This new deployment will use the restored PVs and hence the original data. + +Refer to our detailed [tutorial on backing up and restoring PostgreSQL deployments on Kubernetes](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-migrate-data-tac-velero-index.html) for more information. + +### NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +### Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift up to 4.10, let set the volume permissions, security context, runAsUser and fsGroup automatically by OpenShift and disable the predefined settings of the helm chart: primary.securityContext.enabled=false,primary.containerSecurityContext.enabled=false,volumePermissions.enabled=false,shmVolume.enabled=false +- For OpenShift 4.11 and higher, let set OpenShift the runAsUser and fsGroup automatically. Configure the pod and container security context to restrictive defaults and disable the volume permissions setup: primary. + podSecurityContext.fsGroup=null,primary.podSecurityContext.seccompProfile.type=RuntimeDefault,primary.containerSecurityContext.runAsUser=null,primary.containerSecurityContext.allowPrivilegeEscalation=false,primary.containerSecurityContext.runAsNonRoot=true,primary.containerSecurityContext.seccompProfile.type=RuntimeDefault,primary.containerSecurityContext.capabilities.drop=['ALL'],volumePermissions.enabled=false,shmVolume.enabled=false + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to the [code present in the container repository](https://github.com/bitnami/containers/tree/main/bitnami/postgresql). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` | +| `global.storageClass` | DEPRECATED: use global.defaultStorageClass instead | `""` | +| `global.security.allowInsecureImages` | Allows skipping image verification | `false` | +| `global.postgresql.fullnameOverride` | Full chart name (overrides `fullnameOverride`) | `""` | +| `global.postgresql.auth.postgresPassword` | Password for the "postgres" admin user (overrides `auth.postgresPassword`) | `""` | +| `global.postgresql.auth.username` | Name for a custom user to create (overrides `auth.username`) | `""` | +| `global.postgresql.auth.password` | Password for the custom user to create (overrides `auth.password`) | `""` | +| `global.postgresql.auth.database` | Name for a custom database to create (overrides `auth.database`) | `""` | +| `global.postgresql.auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). | `""` | +| `global.postgresql.auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` | +| `global.postgresql.auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` | +| `global.postgresql.auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` | +| `global.postgresql.service.ports.postgresql` | PostgreSQL service port (overrides `service.ports.postgresql`) | `""` | +| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release (evaluated as a template) | `[]` | +| `commonLabels` | Add labels to all the deployed resources | `{}` | +| `commonAnnotations` | Add annotations to all the deployed resources | `{}` | +| `secretAnnotations` | Add annotations to the secrets | `{}` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + +### PostgreSQL common parameters + +| Name | Description | Value | +| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- | +| `image.registry` | PostgreSQL image registry | `REGISTRY_NAME` | +| `image.repository` | PostgreSQL image repository | `REPOSITORY_NAME/postgresql` | +| `image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `auth.enablePostgresUser` | Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user | `true` | +| `auth.postgresPassword` | Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided | `""` | +| `auth.username` | Name for a custom user to create | `""` | +| `auth.password` | Password for the custom user to create. Ignored if `auth.existingSecret` is provided | `""` | +| `auth.database` | Name for a custom database to create | `""` | +| `auth.replicationUsername` | Name of the replication user | `repl_user` | +| `auth.replicationPassword` | Password for the replication user. Ignored if `auth.existingSecret` is provided | `""` | +| `auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. | `""` | +| `auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `postgres-password` | +| `auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `password` | +| `auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `replication-password` | +| `auth.usePasswordFiles` | Mount credentials as a files instead of using an environment variable | `true` | +| `architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `containerPorts.postgresql` | PostgreSQL container port | `5432` | +| `audit.logHostname` | Log client hostnames | `false` | +| `audit.logConnections` | Add client log-in operations to the log file | `false` | +| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` | +| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `""` | +| `audit.pgAuditLogCatalog` | Log catalog using pgAudit | `off` | +| `audit.clientMinMessages` | Message log level to share with the user | `error` | +| `audit.logLinePrefix` | Template for log line prefix (default if not set) | `""` | +| `audit.logTimezone` | Timezone for the log timestamps | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.server` | IP address or name of the LDAP server. | `""` | +| `ldap.port` | Port number on the LDAP server to connect to | `""` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `""` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `""` | +| `ldap.basedn` | Root DN to begin the search for the user in | `""` | +| `ldap.binddn` | DN of user to bind to LDAP | `""` | +| `ldap.bindpw` | Password for the user to bind to LDAP | `""` | +| `ldap.searchAttribute` | Attribute to match against the user name in the search | `""` | +| `ldap.searchFilter` | The search filter to use when doing search+bind authentication | `""` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS | `""` | +| `ldap.tls.enabled` | Se to true to enable TLS encryption | `false` | +| `ldap.uri` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. | `""` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql/data` | +| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) | `true` | +| `shmVolume.sizeLimit` | Set this to enable a size limit on the shm tmpfs | `""` | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `""` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename | `""` | +| `tls.crlFilename` | File containing a Certificate Revocation List | `""` | + +### PostgreSQL Primary parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `primary.name` | Name of the primary database (eg primary, master, leader, ...) | `primary` | +| `primary.configuration` | PostgreSQL Primary main configuration to be injected as ConfigMap | `""` | +| `primary.pgHbaConfiguration` | PostgreSQL Primary client authentication configuration | `""` | +| `primary.existingConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary configuration | `""` | +| `primary.extendedConfiguration` | Extended PostgreSQL Primary configuration (appended to main or default configuration) | `""` | +| `primary.existingExtendedConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary extended configuration | `""` | +| `primary.initdb.args` | PostgreSQL initdb extra arguments | `""` | +| `primary.initdb.postgresqlWalDir` | Specify a custom location for the PostgreSQL transaction log | `""` | +| `primary.initdb.scripts` | Dictionary of initdb scripts | `{}` | +| `primary.initdb.scriptsConfigMap` | ConfigMap with scripts to be run at first boot | `""` | +| `primary.initdb.scriptsSecret` | Secret with scripts to be run at first boot (in case it contains sensitive information) | `""` | +| `primary.initdb.user` | Specify the PostgreSQL username to execute the initdb scripts | `""` | +| `primary.initdb.password` | Specify the PostgreSQL password to execute the initdb scripts | `""` | +| `primary.preInitDb.scripts` | Dictionary of pre-init scripts | `{}` | +| `primary.preInitDb.scriptsConfigMap` | ConfigMap with pre-init scripts to be run | `""` | +| `primary.preInitDb.scriptsSecret` | Secret with pre-init scripts to be run | `""` | +| `primary.standby.enabled` | Whether to enable current cluster's primary as standby server of another cluster or not | `false` | +| `primary.standby.primaryHost` | The Host of replication primary in the other cluster | `""` | +| `primary.standby.primaryPort` | The Port of replication primary in the other cluster | `""` | +| `primary.extraEnvVars` | Array with extra environment variables to add to PostgreSQL Primary nodes | `[]` | +| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes | `""` | +| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL Primary nodes | `""` | +| `primary.command` | Override default container command (useful when using custom images) | `[]` | +| `primary.args` | Override default container args (useful when using custom images) | `[]` | +| `primary.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Primary containers | `true` | +| `primary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `primary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `primary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `primary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `primary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `primary.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Primary containers | `true` | +| `primary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `primary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `primary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `primary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `primary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `primary.startupProbe.enabled` | Enable startupProbe on PostgreSQL Primary containers | `false` | +| `primary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `primary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `primary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `primary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `primary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `primary.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `primary.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `primary.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `primary.lifecycleHooks` | for the PostgreSQL Primary container to automate configuration before or after startup | `{}` | +| `primary.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production). | `nano` | +| `primary.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `primary.podSecurityContext.enabled` | Enable security context | `true` | +| `primary.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `primary.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `primary.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `primary.podSecurityContext.fsGroup` | Group ID for the pod | `1001` | +| `primary.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `primary.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `primary.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `primary.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `primary.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `primary.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `primary.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `primary.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `primary.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `primary.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `primary.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `primary.hostAliases` | PostgreSQL primary pods host aliases | `[]` | +| `primary.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (postgresql primary) | `false` | +| `primary.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` | +| `primary.labels` | Map of labels to add to the statefulset (postgresql primary) | `{}` | +| `primary.annotations` | Annotations for PostgreSQL primary pods | `{}` | +| `primary.podLabels` | Map of labels to add to the pods (postgresql primary) | `{}` | +| `primary.podAnnotations` | Map of annotations to add to the pods (postgresql primary) | `{}` | +| `primary.podAffinityPreset` | PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.podAntiAffinityPreset` | PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `primary.nodeAffinityPreset.type` | PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.nodeAffinityPreset.key` | PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. | `""` | +| `primary.nodeAffinityPreset.values` | PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `primary.affinity` | Affinity for PostgreSQL primary pods assignment | `{}` | +| `primary.nodeSelector` | Node labels for PostgreSQL primary pods assignment | `{}` | +| `primary.tolerations` | Tolerations for PostgreSQL primary pods assignment | `[]` | +| `primary.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `primary.priorityClassName` | Priority Class to use for each pod (postgresql primary) | `""` | +| `primary.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `primary.terminationGracePeriodSeconds` | Seconds PostgreSQL primary pod needs to terminate gracefully | `""` | +| `primary.updateStrategy.type` | PostgreSQL Primary statefulset strategy type | `RollingUpdate` | +| `primary.updateStrategy.rollingUpdate` | PostgreSQL Primary statefulset rolling update configuration parameters | `{}` | +| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) | `[]` | +| `primary.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) | `[]` | +| `primary.sidecars` | Add additional sidecar containers to the PostgreSQL Primary pod(s) | `[]` | +| `primary.initContainers` | Add additional init containers to the PostgreSQL Primary pod(s) | `[]` | +| `primary.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `primary.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `primary.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `primary.pdb.minAvailable` and `primary.pdb.maxUnavailable` are empty. | `""` | +| `primary.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) | `{}` | +| `primary.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `primary.networkPolicy.allowExternal` | Don't require server label for connections | `true` | +| `primary.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `primary.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `primary.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `primary.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `primary.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | +| `primary.service.type` | Kubernetes Service type | `ClusterIP` | +| `primary.service.ports.postgresql` | PostgreSQL service port | `5432` | +| `primary.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` | +| `primary.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `primary.service.labels` | Map of labels to add to the primary service | `{}` | +| `primary.service.annotations` | Annotations for PostgreSQL primary service | `{}` | +| `primary.service.loadBalancerClass` | Load balancer class if service type is `LoadBalancer` | `""` | +| `primary.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` | +| `primary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `primary.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` | +| `primary.service.extraPorts` | Extra ports to expose in the PostgreSQL primary service | `[]` | +| `primary.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `primary.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `primary.service.headless.annotations` | Additional custom annotations for headless PostgreSQL primary service | `{}` | +| `primary.persistence.enabled` | Enable PostgreSQL Primary data persistence using PVC | `true` | +| `primary.persistence.volumeName` | Name to assign the volume | `data` | +| `primary.persistence.existingClaim` | Name of an existing PVC to use | `""` | +| `primary.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` | +| `primary.persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `primary.persistence.storageClass` | PVC Storage Class for PostgreSQL Primary data volume | `""` | +| `primary.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` | +| `primary.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `primary.persistence.annotations` | Annotations for the PVC | `{}` | +| `primary.persistence.labels` | Labels for the PVC | `{}` | +| `primary.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` | +| `primary.persistence.dataSource` | Custom PVC data source | `{}` | +| `primary.persistentVolumeClaimRetentionPolicy.enabled` | Enable Persistent volume retention policy for Primary Statefulset | `false` | +| `primary.persistentVolumeClaimRetentionPolicy.whenScaled` | Volume retention behavior when the replica count of the StatefulSet is reduced | `Retain` | +| `primary.persistentVolumeClaimRetentionPolicy.whenDeleted` | Volume retention behavior that applies when the StatefulSet is deleted | `Retain` | + +### PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`) + +| Name | Description | Value | +| ---------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `readReplicas.name` | Name of the read replicas database (eg secondary, slave, ...) | `read` | +| `readReplicas.replicaCount` | Number of PostgreSQL read only replicas | `1` | +| `readReplicas.extendedConfiguration` | Extended PostgreSQL read only replicas configuration (appended to main or default configuration) | `""` | +| `readReplicas.extraEnvVars` | Array with extra environment variables to add to PostgreSQL read only nodes | `[]` | +| `readReplicas.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes | `""` | +| `readReplicas.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL read only nodes | `""` | +| `readReplicas.command` | Override default container command (useful when using custom images) | `[]` | +| `readReplicas.args` | Override default container args (useful when using custom images) | `[]` | +| `readReplicas.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL read only containers | `true` | +| `readReplicas.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `readReplicas.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `readReplicas.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `readReplicas.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `readReplicas.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readReplicas.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL read only containers | `true` | +| `readReplicas.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readReplicas.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readReplicas.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readReplicas.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readReplicas.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `readReplicas.startupProbe.enabled` | Enable startupProbe on PostgreSQL read only containers | `false` | +| `readReplicas.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `readReplicas.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `readReplicas.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `readReplicas.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `readReplicas.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `readReplicas.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `readReplicas.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `readReplicas.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `readReplicas.lifecycleHooks` | for the PostgreSQL read only container to automate configuration before or after startup | `{}` | +| `readReplicas.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if readReplicas.resources is set (readReplicas.resources is recommended for production). | `nano` | +| `readReplicas.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `readReplicas.podSecurityContext.enabled` | Enable security context | `true` | +| `readReplicas.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `readReplicas.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `readReplicas.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `readReplicas.podSecurityContext.fsGroup` | Group ID for the pod | `1001` | +| `readReplicas.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `readReplicas.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `readReplicas.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `readReplicas.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `readReplicas.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `readReplicas.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `readReplicas.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `readReplicas.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `readReplicas.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `readReplicas.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `readReplicas.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `readReplicas.hostAliases` | PostgreSQL read only pods host aliases | `[]` | +| `readReplicas.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) | `false` | +| `readReplicas.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` | +| `readReplicas.labels` | Map of labels to add to the statefulset (PostgreSQL read only) | `{}` | +| `readReplicas.annotations` | Annotations for PostgreSQL read only pods | `{}` | +| `readReplicas.podLabels` | Map of labels to add to the pods (PostgreSQL read only) | `{}` | +| `readReplicas.podAnnotations` | Map of annotations to add to the pods (PostgreSQL read only) | `{}` | +| `readReplicas.podAffinityPreset` | PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `readReplicas.podAntiAffinityPreset` | PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `readReplicas.nodeAffinityPreset.type` | PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `readReplicas.nodeAffinityPreset.key` | PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. | `""` | +| `readReplicas.nodeAffinityPreset.values` | PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `readReplicas.affinity` | Affinity for PostgreSQL read only pods assignment | `{}` | +| `readReplicas.nodeSelector` | Node labels for PostgreSQL read only pods assignment | `{}` | +| `readReplicas.tolerations` | Tolerations for PostgreSQL read only pods assignment | `[]` | +| `readReplicas.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `readReplicas.priorityClassName` | Priority Class to use for each pod (PostgreSQL read only) | `""` | +| `readReplicas.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `readReplicas.terminationGracePeriodSeconds` | Seconds PostgreSQL read only pod needs to terminate gracefully | `""` | +| `readReplicas.updateStrategy.type` | PostgreSQL read only statefulset strategy type | `RollingUpdate` | +| `readReplicas.updateStrategy.rollingUpdate` | PostgreSQL read only statefulset rolling update configuration parameters | `{}` | +| `readReplicas.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) | `[]` | +| `readReplicas.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.sidecars` | Add additional sidecar containers to the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.initContainers` | Add additional init containers to the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `readReplicas.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `readReplicas.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `readReplicas.pdb.minAvailable` and `readReplicas.pdb.maxUnavailable` are empty. | `""` | +| `readReplicas.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL read only pod(s) | `{}` | +| `readReplicas.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `readReplicas.networkPolicy.allowExternal` | Don't require server label for connections | `true` | +| `readReplicas.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `readReplicas.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `readReplicas.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `readReplicas.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `readReplicas.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | +| `readReplicas.service.type` | Kubernetes Service type | `ClusterIP` | +| `readReplicas.service.ports.postgresql` | PostgreSQL service port | `5432` | +| `readReplicas.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` | +| `readReplicas.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `readReplicas.service.labels` | Map of labels to add to the read service | `{}` | +| `readReplicas.service.annotations` | Annotations for PostgreSQL read only service | `{}` | +| `readReplicas.service.loadBalancerClass` | Load balancer class if service type is `LoadBalancer` | `""` | +| `readReplicas.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` | +| `readReplicas.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `readReplicas.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` | +| `readReplicas.service.extraPorts` | Extra ports to expose in the PostgreSQL read only service | `[]` | +| `readReplicas.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `readReplicas.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `readReplicas.service.headless.annotations` | Additional custom annotations for headless PostgreSQL read only service | `{}` | +| `readReplicas.persistence.enabled` | Enable PostgreSQL read only data persistence using PVC | `true` | +| `readReplicas.persistence.existingClaim` | Name of an existing PVC to use | `""` | +| `readReplicas.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` | +| `readReplicas.persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `readReplicas.persistence.storageClass` | PVC Storage Class for PostgreSQL read only data volume | `""` | +| `readReplicas.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` | +| `readReplicas.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `readReplicas.persistence.annotations` | Annotations for the PVC | `{}` | +| `readReplicas.persistence.labels` | Labels for the PVC | `{}` | +| `readReplicas.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` | +| `readReplicas.persistence.dataSource` | Custom PVC data source | `{}` | +| `readReplicas.persistentVolumeClaimRetentionPolicy.enabled` | Enable Persistent volume retention policy for read only Statefulset | `false` | +| `readReplicas.persistentVolumeClaimRetentionPolicy.whenScaled` | Volume retention behavior when the replica count of the StatefulSet is reduced | `Retain` | +| `readReplicas.persistentVolumeClaimRetentionPolicy.whenDeleted` | Volume retention behavior that applies when the StatefulSet is deleted | `Retain` | + +### Backup parameters + +| Name | Description | Value | +| ------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `backup.enabled` | Enable the logical dump of the database "regularly" | `false` | +| `backup.cronjob.schedule` | Set the cronjob parameter schedule | `@daily` | +| `backup.cronjob.timeZone` | Set the cronjob parameter timeZone | `""` | +| `backup.cronjob.concurrencyPolicy` | Set the cronjob parameter concurrencyPolicy | `Allow` | +| `backup.cronjob.failedJobsHistoryLimit` | Set the cronjob parameter failedJobsHistoryLimit | `1` | +| `backup.cronjob.successfulJobsHistoryLimit` | Set the cronjob parameter successfulJobsHistoryLimit | `3` | +| `backup.cronjob.startingDeadlineSeconds` | Set the cronjob parameter startingDeadlineSeconds | `""` | +| `backup.cronjob.ttlSecondsAfterFinished` | Set the cronjob parameter ttlSecondsAfterFinished | `""` | +| `backup.cronjob.restartPolicy` | Set the cronjob parameter restartPolicy | `OnFailure` | +| `backup.cronjob.podSecurityContext.enabled` | Enable PodSecurityContext for CronJob/Backup | `true` | +| `backup.cronjob.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `backup.cronjob.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `backup.cronjob.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `backup.cronjob.podSecurityContext.fsGroup` | Group ID for the CronJob | `1001` | +| `backup.cronjob.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `backup.cronjob.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `backup.cronjob.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `backup.cronjob.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `backup.cronjob.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `backup.cronjob.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `backup.cronjob.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `backup.cronjob.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `backup.cronjob.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `backup.cronjob.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `backup.cronjob.command` | Set backup container's command to run | `["/bin/bash","-c","PGPASSWORD=\"${PGPASSWORD:-$(< \"$PGPASSWORD_FILE\")}\" pg_dumpall --clean --if-exists --load-via-partition-root --quote-all-identifiers --no-password --file=\"${PGDUMP_DIR}/pg_dumpall-$(date '+%Y-%m-%d-%H-%M').pgdump\""]` | +| `backup.cronjob.labels` | Set the cronjob labels | `{}` | +| `backup.cronjob.annotations` | Set the cronjob annotations | `{}` | +| `backup.cronjob.nodeSelector` | Node labels for PostgreSQL backup CronJob pod assignment | `{}` | +| `backup.cronjob.tolerations` | Tolerations for PostgreSQL backup CronJob pods assignment | `[]` | +| `backup.cronjob.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if backup.cronjob.resources is set (backup.cronjob.resources is recommended for production). | `nano` | +| `backup.cronjob.resources` | Set container requests and limits for different resources like CPU or memory | `{}` | +| `backup.cronjob.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `backup.cronjob.storage.enabled` | Enable using a `PersistentVolumeClaim` as backup data volume | `true` | +| `backup.cronjob.storage.existingClaim` | Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) | `""` | +| `backup.cronjob.storage.resourcePolicy` | Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted | `""` | +| `backup.cronjob.storage.storageClass` | PVC Storage Class for the backup data volume | `""` | +| `backup.cronjob.storage.accessModes` | PV Access Mode | `["ReadWriteOnce"]` | +| `backup.cronjob.storage.size` | PVC Storage Request for the backup data volume | `8Gi` | +| `backup.cronjob.storage.annotations` | PVC annotations | `{}` | +| `backup.cronjob.storage.mountPath` | Path to mount the volume at | `/backup/pgdump` | +| `backup.cronjob.storage.subPath` | Subdirectory of the volume to mount at | `""` | +| `backup.cronjob.storage.volumeClaimTemplates.selector` | A label query over volumes to consider for binding (e.g. when using local volumes) | `{}` | +| `backup.cronjob.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the backup container | `[]` | +| `backup.cronjob.extraVolumes` | Optionally specify extra list of additional volumes for the backup container | `[]` | + +### Password update job + +| Name | Description | Value | +| --------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `passwordUpdateJob.enabled` | Enable password update job | `false` | +| `passwordUpdateJob.backoffLimit` | set backoff limit of the job | `10` | +| `passwordUpdateJob.command` | Override default container command on mysql Primary container(s) (useful when using custom images) | `[]` | +| `passwordUpdateJob.args` | Override default container args on mysql Primary container(s) (useful when using custom images) | `[]` | +| `passwordUpdateJob.extraCommands` | Extra commands to pass to the generation job | `""` | +| `passwordUpdateJob.previousPasswords.postgresPassword` | Previous postgres password (set if the password secret was already changed) | `""` | +| `passwordUpdateJob.previousPasswords.password` | Previous password (set if the password secret was already changed) | `""` | +| `passwordUpdateJob.previousPasswords.replicationPassword` | Previous replication password (set if the password secret was already changed) | `""` | +| `passwordUpdateJob.previousPasswords.existingSecret` | Name of a secret containing the previous passwords (set if the password secret was already changed) | `""` | +| `passwordUpdateJob.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `passwordUpdateJob.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `passwordUpdateJob.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `passwordUpdateJob.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `passwordUpdateJob.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `passwordUpdateJob.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `passwordUpdateJob.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `passwordUpdateJob.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `passwordUpdateJob.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `passwordUpdateJob.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `passwordUpdateJob.podSecurityContext.enabled` | Enabled credential init job pods' Security Context | `true` | +| `passwordUpdateJob.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `passwordUpdateJob.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `passwordUpdateJob.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `passwordUpdateJob.podSecurityContext.fsGroup` | Set credential init job pod's Security Context fsGroup | `1001` | +| `passwordUpdateJob.extraEnvVars` | Array containing extra env vars to configure the credential init job | `[]` | +| `passwordUpdateJob.extraEnvVarsCM` | ConfigMap containing extra env vars to configure the credential init job | `""` | +| `passwordUpdateJob.extraEnvVarsSecret` | Secret containing extra env vars to configure the credential init job (in case of sensitive data) | `""` | +| `passwordUpdateJob.extraVolumes` | Optionally specify extra list of additional volumes for the credential init job | `[]` | +| `passwordUpdateJob.extraVolumeMounts` | Array of extra volume mounts to be added to the jwt Container (evaluated as template). Normally used with `extraVolumes`. | `[]` | +| `passwordUpdateJob.initContainers` | Add additional init containers for the mysql Primary pod(s) | `[]` | +| `passwordUpdateJob.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if passwordUpdateJob.resources is set (passwordUpdateJob.resources is recommended for production). | `micro` | +| `passwordUpdateJob.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `passwordUpdateJob.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `passwordUpdateJob.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `passwordUpdateJob.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `passwordUpdateJob.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `passwordUpdateJob.hostAliases` | Add deployment host aliases | `[]` | +| `passwordUpdateJob.annotations` | Add annotations to the job | `{}` | +| `passwordUpdateJob.podLabels` | Additional pod labels | `{}` | +| `passwordUpdateJob.podAnnotations` | Additional pod annotations | `{}` | + +### Volume Permissions parameters + +| Name | Description | Value | +| ---------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `nano` | +| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | +| `volumePermissions.containerSecurityContext.runAsGroup` | Group ID for the init container | `0` | +| `volumePermissions.containerSecurityContext.runAsNonRoot` | runAsNonRoot for the init container | `false` | +| `volumePermissions.containerSecurityContext.seccompProfile.type` | seccompProfile.type for the init container | `RuntimeDefault` | + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` | +| `serviceAccount.create` | Enable creation of ServiceAccount for PostgreSQL pod | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | +| `rbac.rules` | Custom RBAC rules to set | `[]` | +| `psp.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | + +### Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `REGISTRY_NAME` | +| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `REPOSITORY_NAME/postgres-exporter` | +| `metrics.image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` | +| `metrics.collectors` | Control enabled collectors | `{}` | +| `metrics.customMetrics` | Define additional custom metrics | `{}` | +| `metrics.extraEnvVars` | Extra environment variables to add to PostgreSQL Prometheus exporter | `[]` | +| `metrics.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `metrics.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `metrics.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `metrics.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `metrics.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `metrics.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `metrics.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `metrics.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `metrics.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `metrics.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Prometheus exporter containers | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Prometheus exporter containers | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.startupProbe.enabled` | Enable startupProbe on PostgreSQL Prometheus exporter containers | `false` | +| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `metrics.containerPorts.metrics` | PostgreSQL Prometheus exporter metrics container port | `9187` | +| `metrics.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production). | `nano` | +| `metrics.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `metrics.service.ports.metrics` | PostgreSQL Prometheus Exporter service port | `9187` | +| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set auth.postgresPassword=secretpassword + oci://REGISTRY_NAME/REPOSITORY_NAME/postgresql +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. +> **Warning** Setting a password will be ignored on new installation in case when previous PostgreSQL release was deleted through the helm command. In that case, old PVC will have an old password, and setting it through helm won't take effect. Deleting persistent volumes (PVs) will solve the issue. Refer to [issue 2061](https://github.com/bitnami/charts/issues/2061) for more details + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/postgresql +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. +> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/postgresql/values.yaml) + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 16.3.0 + +This version introduces image verification for security purposes. To disable it, set `global.security.allowInsecureImages` to `true`. More details at [GitHub issue](https://github.com/bitnami/charts/issues/30850). + +### To 15.0.0 + +This major bump changes the following security defaults: + +- `runAsGroup` is changed from `0` to `1001` +- `readOnlyRootFilesystem` is set to `true` +- `resourcesPreset` is changed from `none` to the minimum size working in our test suites (NOTE: `resourcesPreset` is not meant for production usage, but `resources` adapted to your use case). +- `global.compatibility.openshift.adaptSecurityContext` is changed from `disabled` to `auto`. + +This could potentially break any customization or init scripts used in your deployment. If this is the case, change the default values to the previous ones. + +### To 14.0.0 + +This major version adapts the NetworkPolicy objects to the most recent Bitnami standards. Now there is a separate object for `primary` and for `readReplicas`, being located in their corresponding sections. It is also enabled by default in other to comply with the best security standards. + +Check the parameter section for the new value structure. + +### To 13.0.0 + +This major version changes the default PostgreSQL image from 15.x to 16.x. Follow the [official instructions](https://www.postgresql.org/docs/16/upgrading.html) to upgrade to 16.x. + +### To 12.0.0 + +This major version changes the default PostgreSQL image from 14.x to 15.x. Follow the [official instructions](https://www.postgresql.org/docs/15/upgrading.html) to upgrade to 15.x. + +### To 11.0.0 + +In this version the application version was bumped to _14.x_ series. Also, this major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. + +- _replication.enabled_ parameter is deprecated in favor of _architecture_ parameter that accepts two values: _standalone_ and _replication_. +- _replication.singleService_ and _replication.uniqueServices_ parameters are deprecated. When using replication, each statefulset (primary and read-only) has its own headless service & service allowing to connect to read-only replicas through the service (round-robin) or individually. +- _postgresqlPostgresPassword_, _postgresqlUsername_, _postgresqlPassword_, _postgresqlDatabase_, _replication.user_, _replication.password_, and _existingSecret_ parameters have been regrouped under the _auth_ map. The _auth_ map uses a new perspective to configure authentication, so please read carefully each sub-parameter description. +- _extraEnv_ has been deprecated in favor of _primary.extraEnvVars_ and _readReplicas.extraEnvVars_. +- _postgresqlConfiguration_, _pgHbaConfiguration_, _configurationConfigMap_, _postgresqlExtendedConf_, and _extendedConfConfigMap_ have been deprecated in favor of _primary.configuration_, _primary.pgHbaConfiguration_, _primary.existingConfigmap_, _primary.extendedConfiguration_, and _primary.existingExtendedConfigmap_. +- _postgresqlInitdbArgs_, _postgresqlInitdbWalDir_, _initdbScripts_, _initdbScriptsConfigMap_, _initdbScriptsSecret_, _initdbUser_ and _initdbPassword_ have been regrouped under the _primary.initdb_ map. +- _postgresqlMaxConnections_, _postgresqlPostgresConnectionLimit_, _postgresqlDbUserConnectionLimit_, _postgresqlTcpKeepalivesInterval_, _postgresqlTcpKeepalivesIdle_, _postgresqlTcpKeepalivesCount_, _postgresqlStatementTimeout_ and _postgresqlPghbaRemoveFilters_ parameters are deprecated. Use _XXX.extraEnvVars_ instead. +- _primaryAsStandBy_ has been deprecated in favor of _primary.standby_. +- _securityContext_ and _containerSecurityContext_ have been deprecated in favor of _primary.podSecurityContext_, _primary.containerSecurityContext_, _readReplicas.podSecurityContext_, and _readReplicas.containerSecurityContext_. +- _livenessProbe_ and _readinessProbe_ maps have been deprecated in favor of _primary.livenessProbe_, _primary.readinessProbe_, _readReplicas.livenessProbe_ and _readReplicas.readinessProbe_ maps. +- _persistence_ map has been deprecated in favor of _primary.persistence_ and _readReplicas.persistence_ maps. +- _networkPolicy_ map has been completely refactored. +- _service_ map has been deprecated in favor of _primary.service_ and _readReplicas.service_ maps. +- _metrics.service.port_ has been regrouped under the _metrics.service.ports_ map. +- _serviceAccount.enabled_ and _serviceAccount.autoMount_ have been deprecated in favor of _serviceAccount.create_ and _serviceAccount.automountServiceAccountToken_. + +#### How to upgrade to version 11.0.0 + +To upgrade to _11.0.0_ from _10.x_, it should be done reusing the PVC(s) used to hold the PostgreSQL data on your previous release. To do so, follow the instructions below (the following example assumes that the release name is _postgresql_): + +> NOTE: Please, create a backup of your database before running any of these actions. + +1. Obtain the credentials and the names of the PVCs used to hold the PostgreSQL data on your current release: + +```console +export POSTGRESQL_PASSWORD=$(kubectl get secret --namespace default postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) +export POSTGRESQL_PVC=$(kubectl get pvc -l app.kubernetes.io/instance=postgresql,role=primary -o jsonpath="{.items[0].metadata.name}") +``` + +1. Delete the PostgreSQL statefulset (notice the option _--cascade=false_) and secret: + +```console +kubectl delete statefulsets.apps postgresql-postgresql --namespace default --cascade=false +kubectl delete secret postgresql --namespace default +``` + +1. Upgrade your release using the same PostgreSQL version: + +```console +CURRENT_VERSION=$(kubectl exec postgresql-postgresql-0 -- bash -c 'echo $BITNAMI_IMAGE_VERSION') +helm upgrade postgresql bitnami/postgresql \ + --set auth.postgresPassword=$POSTGRESQL_PASSWORD \ + --set primary.persistence.existingClaim=$POSTGRESQL_PVC \ + --set image.tag=$CURRENT_VERSION +``` + +1. You will have to delete the existing PostgreSQL pod and the new statefulset is going to create a new one + +```console +kubectl delete pod postgresql-postgresql-0 +``` + +1. Finally, you should see the lines below in PostgreSQL container logs: + +```text +$ kubectl logs $(kubectl get pods -l app.kubernetes.io/instance=postgresql,app.kubernetes.io/name=postgresql,app.kubernetes.io/component=primary -o jsonpath="{.items[0].metadata.name}") +... +postgresql 08:05:12.59 INFO ==> Deploying PostgreSQL with persisted data... +... +``` + +> NOTE: the instructions above reuse the same PostgreSQL version you were using in your chart release. Otherwise, you will find an error such as the one below when upgrading since the new chart major version also bumps the application version. To workaround this issue you need to upgrade database, please refer to the [official PostgreSQL documentation](https://www.postgresql.org/docs/current/upgrading.html) for more information about this. + +```console +$ kubectl logs $(kubectl get pods -l app.kubernetes.io/instance=postgresql,app.kubernetes.io/name=postgresql,app.kubernetes.io/component=primary -o jsonpath="{.items[0].metadata.name}") + ... +postgresql 08:10:14.72 INFO ==> ** Starting PostgreSQL ** +2022-02-01 08:10:14.734 GMT [1] FATAL: database files are incompatible with server +2022-02-01 08:10:14.734 GMT [1] DETAIL: The data directory was initialized by PostgreSQL version 11, which is not compatible with this version 14.1. +``` + +### To 10.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Move dependency information from the _requirements.yaml_ to the _Chart.yaml_ +- After running _helm dependency update_, a _Chart.lock_ file is generated containing the same structure used in the previous _requirements.lock_ +- The different fields present in the _Chart.yaml_ file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Chart. +- The term _master_ has been replaced with _primary_ and _slave_ with _readReplicas_ throughout the chart. Role names have changed from _master_ and _slave_ to _primary_ and _read_. + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version does not support Helm v2 anymore. +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3. + +#### Useful links + +- [Bitnami Tutorial](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-resolve-helm2-helm3-post-migration-issues-index.html) +- [Helm docs](https://helm.sh/docs/topics/v2_v3_migration) +- [Helm Blog](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3) + +#### How to upgrade to version 10.0.0 + +To upgrade to _10.0.0_ from _9.x_, it should be done reusing the PVC(s) used to hold the PostgreSQL data on your previous release. To do so, follow the instructions below (the following example assumes that the release name is _postgresql_): + +> NOTE: Please, create a backup of your database before running any of those actions. + +1. Obtain the credentials and the names of the PVCs used to hold the PostgreSQL data on your current release: + +```console +export POSTGRESQL_PASSWORD=$(kubectl get secret --namespace default postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) +export POSTGRESQL_PVC=$(kubectl get pvc -l app.kubernetes.io/instance=postgresql,role=primary -o jsonpath="{.items[0].metadata.name}") +``` + +1. Delete the PostgreSQL statefulset (notice the option _--cascade=false_): + +```console +kubectl delete statefulsets.apps postgresql-postgresql --namespace default --cascade=false +``` + +1. Upgrade your release using the same PostgreSQL version: + +```console +helm upgrade postgresql bitnami/postgresql \ + --set postgresqlPassword=$POSTGRESQL_PASSWORD \ + --set persistence.existingClaim=$POSTGRESQL_PVC +``` + +1. Delete the existing PostgreSQL pod and the new statefulset will create a new one: + +```console +kubectl delete pod postgresql-postgresql-0 +``` + +1. Finally, you should see the lines below in PostgreSQL container logs: + +```text +$ kubectl logs $(kubectl get pods -l app.kubernetes.io/instance=postgresql,app.kubernetes.io/name=postgresql,role=primary -o jsonpath="{.items[0].metadata.name}") +... +postgresql 08:05:12.59 INFO ==> Deploying PostgreSQL with persisted data... +... +``` + +### To 9.0.0 + +In this version the chart was adapted to follow the [Helm standard labels](https://helm.sh/docs/chart_best_practices/labels/#standard-labels). + +- Some inmutable objects were modified to adopt Helm standard labels introducing backward incompatibilities. + +#### How to upgrade to version 9.0.0 + +To upgrade to _9.0.0_ from _8.x_, it should be done reusing the PVC(s) used to hold the PostgreSQL data on your previous release. To do so, follow the instructions below (the following example assumes that the release name is _postgresql_): + +> NOTE: Please, create a backup of your database before running any of those actions. + +1. Obtain the credentials and the names of the PVCs used to hold the PostgreSQL data on your current release: + +```console +export POSTGRESQL_PASSWORD=$(kubectl get secret --namespace default postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) +export POSTGRESQL_PVC=$(kubectl get pvc -l app=postgresql,role=master -o jsonpath="{.items[0].metadata.name}") +``` + +1. Delete the PostgreSQL statefulset (notice the option _--cascade=false_): + +```console +kubectl delete statefulsets.apps postgresql-postgresql --namespace default --cascade=false +``` + +1. Upgrade your release using the same PostgreSQL version: + +```console +helm upgrade postgresql bitnami/postgresql \ + --set postgresqlPassword=$POSTGRESQL_PASSWORD \ + --set persistence.existingClaim=$POSTGRESQL_PVC +``` + +1. Delete the existing PostgreSQL pod and the new statefulset will create a new one: + +```console +kubectl delete pod postgresql-postgresql-0 +``` + +1. Finally, you should see the lines below in PostgreSQL container logs: + +```text +$ kubectl logs $(kubectl get pods -l app.kubernetes.io/instance=postgresql,app.kubernetes.io/name=postgresql,role=master -o jsonpath="{.items[0].metadata.name}") +... +postgresql 08:05:12.59 INFO ==> Deploying PostgreSQL with persisted data... +... +``` + +## License + +Copyright © 2025 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/helm/vendor/postgresql/charts/common-2.31.4.tgz b/helm/vendor/postgresql/charts/common-2.31.4.tgz new file mode 100644 index 00000000..8c7e4d54 Binary files /dev/null and b/helm/vendor/postgresql/charts/common-2.31.4.tgz differ diff --git a/helm/vendor/postgresql/charts/common/.helmignore b/helm/vendor/postgresql/charts/common/.helmignore new file mode 100644 index 00000000..d0e10845 --- /dev/null +++ b/helm/vendor/postgresql/charts/common/.helmignore @@ -0,0 +1,26 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +# img folder +img/ +# Changelog +CHANGELOG.md diff --git a/helm/vendor/postgresql/charts/common/Chart.yaml b/helm/vendor/postgresql/charts/common/Chart.yaml new file mode 100644 index 00000000..fb04f761 --- /dev/null +++ b/helm/vendor/postgresql/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 2.31.4 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://bitnami.com +icon: https://dyltqmyl993wv.cloudfront.net/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Broadcom, Inc. All Rights Reserved. + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/common +type: library +version: 2.31.4 diff --git a/helm/vendor/postgresql/charts/common/README.md b/helm/vendor/postgresql/charts/common/README.md new file mode 100644 index 00000000..71368aa7 --- /dev/null +++ b/helm/vendor/postgresql/charts/common/README.md @@ -0,0 +1,387 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 2.x.x + repository: oci://registry-1.docker.io/bitnamicharts +``` + +```console +helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog. + +## ⚠️ Important Notice: Upcoming changes to the Bitnami Catalog + +Beginning August 28th, 2025, Bitnami will evolve its public catalog to offer a curated set of hardened, security-focused images under the new [Bitnami Secure Images initiative](https://news.broadcom.com/app-dev/broadcom-introduces-bitnami-secure-images-for-production-ready-containerized-applications). As part of this transition: + +- Granting community users access for the first time to security-optimized versions of popular container images. +- Bitnami will begin deprecating support for non-hardened, Debian-based software images in its free tier and will gradually remove non-latest tags from the public catalog. As a result, community users will have access to a reduced number of hardened images. These images are published only under the “latest” tag and are intended for development purposes +- Starting August 28th, over two weeks, all existing container images, including older or versioned tags (e.g., 2.50.0, 10.6), will be migrated from the public catalog (docker.io/bitnami) to the “Bitnami Legacy” repository (docker.io/bitnamilegacy), where they will no longer receive updates. +- For production workloads and long-term support, users are encouraged to adopt Bitnami Secure Images, which include hardened containers, smaller attack surfaces, CVE transparency (via VEX/KEV), SBOMs, and enterprise support. + +These changes aim to improve the security posture of all Bitnami users by promoting best practices for software supply chain integrity and up-to-date deployments. For more details, visit the [Bitnami Secure Images announcement](https://github.com/bitnami/containers/issues/83267). + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.23+ +- Helm 3.8.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +| ------------------------------- | ---------------------------------------------------- | ------------------------------------------------------------ | +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes` | Return a nodeAffinity definition | `dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.topologyKey` | Return a topologyKey definition | `dict "topologyKey" "FOO"` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods` | Return a podAffinity/podAntiAffinity definition | `dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +| --------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | --------------------------------------- | +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.apiVersions.has` | Return true if the apiVersion is supported | `dict "version" "batch/v1" "context" $` | +| `common.capabilities.job.apiVersion` | Return the appropriate apiVersion for job. | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.daemonset.apiVersion` | Return the appropriate apiVersion for daemonset. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.vpa.apiVersion` | Return the appropriate apiVersion for Vertical Pod Autoscaler. | `.` Chart context | +| `common.capabilities.psp.supported` | Returns true if PodSecurityPolicy is supported | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | +| `common.capabilities.admissionConfiguration.supported` | Returns true if AdmissionConfiguration is supported | `.` Chart context | +| `common.capabilities.admissionConfiguration.apiVersion` | Return the appropriate apiVersion for AdmissionConfiguration. | `.` Chart context | +| `common.capabilities.podSecurityConfiguration.apiVersion` | Return the appropriate apiVersion for PodSecurityConfiguration. | `.` Chart context | + +### Compatibility + +| Helper identifier | Description | Expected Input | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------- | +| `common.compatibility.isOpenshift` | Return true if the detected platform is Openshift | `.` Chart context | +| `common.compatibility.renderSecurityContext` | Render a compatible securityContext depending on the platform. By default it is maintained as it is. In other platforms like Openshift we remove default user/group values that do not work out of the box with the restricted-v1 SCC | `dict "secContext" .Values.containerSecurityContext "context" $` | + +### Errors + +| Helper identifier | Description | Expected Input | +| --------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | +| `common.errors.insecureImages` | Throw error when original container images are replaced. The error can be bypassed by setting the `global.security.allowInsecureImages` to true. | `dict "images" (list .Values.path.to.the.imageRoot) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +| --------------------------------- | -------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ | +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | +| `common.images.version` | Return the proper image version | `dict "imageRoot" .Values.path.to.the.image "chart" .Chart` , see [ImageRoot](#imageroot) for the structure. | + +### Ingress + +| Helper identifier | Description | Expected Input | +| ----------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +| --------------------------- | --------------------------------------------------------------------------- | ----------------- | +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +| ---------------------------------- | --------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | +| `common.names.dependency.fullname` | Create a default fully qualified dependency name. | `dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $` | + +### Resources + +| Helper identifier | Description | Expected Input | +| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | -------------------- | +| `common.resources.preset` | Return a resource request/limit object based on a given preset. These presets are for basic testing and not meant to be used in production. | `dict "type" "nano"` | + +### Secrets + +| Helper identifier | Description | Expected Input | +| --------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "honorProvidedValues" false "context" $`, length, strong, honorProvidedValues and chartName fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | +| `common.secrets.lookup` | Reuses the value from an existing secret, otherwise sets its value to a default value. | `dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +| ---------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------- | +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +| ---------------------------------- | ------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | +| `common.tplvalues.merge` | Merge a list of values that contains template after rendering them. | `dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $` | +| `common.tplvalues.merge-overwrite` | Merge a list of values that contains template after rendering them. | `dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $` | + +### Utils + +| Helper identifier | Description | Expected Input | +| ------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | +| `common.utils.checksumTemplate` | Checksum a template at "path" containing a *single* resource (ConfigMap,Secret) for use in pod annotations, excluding the metadata (see #18376) | `dict "path" "/configmap.yaml" "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +| -------------------------------- | ----------------------------------------------------------------- | ---------------------------------------------------------- | +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | +| `common.warnings.modifiedImages` | Warning about replaced images from the original. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | +| `common.warnings.resources` | Warning about not setting the resource object in all deployments. | `dict "sections" (list "path1" "path2") context $` | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy.' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +## License + +Copyright © 2025 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/helm/vendor/postgresql/charts/common/templates/_affinities.tpl b/helm/vendor/postgresql/charts/common/templates/_affinities.tpl new file mode 100644 index 00000000..c6ccc62e --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_affinities.tpl @@ -0,0 +1,169 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "extraNamespaces" (list "namespace1" "namespace2") "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +{{- $extraNamespaces := default (list) .extraNamespaces -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if $extraNamespaces }} + namespaces: + - {{ .context.Release.Namespace }} + {{- with $extraNamespaces }} + {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 + {{- range $extraPodAffinityTerms }} + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .namespaces }} + namespaces: + - {{ $.context.Release.Namespace }} + {{- with .namespaces }} + {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: {{ .weight | default 1 -}} + {{- end -}} +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "extraNamespaces" (list "namespace1" "namespace2") "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +{{- $extraNamespaces := default (list) .extraNamespaces -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if $extraNamespaces }} + namespaces: + - {{ .context.Release.Namespace }} + {{- with $extraNamespaces }} + {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 6 }} + {{- end }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- range $extraPodAffinityTerms }} + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .namespaces }} + namespaces: + - {{ $.context.Release.Namespace }} + {{- with .namespaces }} + {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 6 }} + {{- end }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- end -}} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/_capabilities.tpl b/helm/vendor/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 00000000..58f58c1c --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,178 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- default (default .Capabilities.KubeVersion.Version .Values.kubeVersion) ((.Values.global).kubeVersion) -}} +{{- end -}} + +{{/* +Return true if the apiVersion is supported +Usage: +{{ include "common.capabilities.apiVersions.has" (dict "version" "batch/v1" "context" $) }} +*/}} +{{- define "common.capabilities.apiVersions.has" -}} +{{- $providedAPIVersions := default .context.Values.apiVersions ((.context.Values.global).apiVersions) -}} +{{- if and (empty $providedAPIVersions) (.context.Capabilities.APIVersions.Has .version) -}} + {{- true -}} +{{- else if has .version $providedAPIVersions -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- print "policy/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for job. +*/}} +{{- define "common.capabilities.job.apiVersion" -}} +{{- print "batch/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- print "batch/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "common.capabilities.daemonset.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- $kubeVersion := include "common.capabilities.kubeVersion" .context -}} +{{- print "autoscaling/v2" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Vertical Pod Autoscaler. +*/}} +{{- define "common.capabilities.vpa.apiVersion" -}} +{{- $kubeVersion := include "common.capabilities.kubeVersion" . -}} +{{- if and (not (empty $kubeVersion)) (semverCompare "<1.25-0" $kubeVersion) -}} +{{- print "autoscaling/v1beta2" -}} +{{- else -}} +{{- print "autoscaling/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if PodSecurityPolicy is supported +*/}} +{{- define "common.capabilities.psp.supported" -}} +{{- $kubeVersion := include "common.capabilities.kubeVersion" . -}} +{{- if or (empty $kubeVersion) (semverCompare "<1.25-0" $kubeVersion) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if AdmissionConfiguration is supported +*/}} +{{- define "common.capabilities.admissionConfiguration.supported" -}} +{{- $kubeVersion := include "common.capabilities.kubeVersion" . -}} + {{- true -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for AdmissionConfiguration. +*/}} +{{- define "common.capabilities.admissionConfiguration.apiVersion" -}} +{{- $kubeVersion := include "common.capabilities.kubeVersion" . -}} +{{- if and (not (empty $kubeVersion)) (semverCompare "<1.25-0" $kubeVersion) -}} +{{- print "apiserver.config.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiserver.config.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityConfiguration. +*/}} +{{- define "common.capabilities.podSecurityConfiguration.apiVersion" -}} +{{- $kubeVersion := include "common.capabilities.kubeVersion" . -}} +{{- if and (not (empty $kubeVersion)) (semverCompare "<1.25-0" $kubeVersion) -}} +{{- print "pod-security.admission.config.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "pod-security.admission.config.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/_compatibility.tpl b/helm/vendor/postgresql/charts/common/templates/_compatibility.tpl new file mode 100644 index 00000000..19c26dbd --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_compatibility.tpl @@ -0,0 +1,46 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return true if the detected platform is Openshift +Usage: +{{- include "common.compatibility.isOpenshift" . -}} +*/}} +{{- define "common.compatibility.isOpenshift" -}} +{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1" -}} +{{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Render a compatible securityContext depending on the platform. By default it is maintained as it is. In other platforms like Openshift we remove default user/group values that do not work out of the box with the restricted-v1 SCC +Usage: +{{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) -}} +*/}} +{{- define "common.compatibility.renderSecurityContext" -}} +{{- $adaptedContext := .secContext -}} + +{{- if (((.context.Values.global).compatibility).openshift) -}} + {{- if or (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "force") (and (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "auto") (include "common.compatibility.isOpenshift" .context)) -}} + {{/* Remove incompatible user/group values that do not work in Openshift out of the box */}} + {{- $adaptedContext = omit $adaptedContext "fsGroup" "runAsUser" "runAsGroup" -}} + {{- if not .secContext.seLinuxOptions -}} + {{/* If it is an empty object, we remove it from the resulting context because it causes validation issues */}} + {{- $adaptedContext = omit $adaptedContext "seLinuxOptions" -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{/* Remove empty seLinuxOptions object if global.compatibility.omitEmptySeLinuxOptions is set to true */}} +{{- if and (((.context.Values.global).compatibility).omitEmptySeLinuxOptions) (not .secContext.seLinuxOptions) -}} + {{- $adaptedContext = omit $adaptedContext "seLinuxOptions" -}} +{{- end -}} +{{/* Remove fields that are disregarded when running the container in privileged mode */}} +{{- if $adaptedContext.privileged -}} + {{- $adaptedContext = omit $adaptedContext "capabilities" -}} +{{- end -}} +{{- omit $adaptedContext "enabled" | toYaml -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/_errors.tpl b/helm/vendor/postgresql/charts/common/templates/_errors.tpl new file mode 100644 index 00000000..fb704c99 --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_errors.tpl @@ -0,0 +1,92 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Throw error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} + +{{/* +Throw error when original container images are replaced. +The error can be bypassed by setting the "global.security.allowInsecureImages" to true. In this case, +a warning message will be shown instead. + +Usage: +{{ include "common.errors.insecureImages" (dict "images" (list .Values.path.to.the.imageRoot) "context" $) }} +*/}} +{{- define "common.errors.insecureImages" -}} +{{- $relocatedImages := list -}} +{{- $replacedImages := list -}} +{{- $bitnamiLegacyImages := list -}} +{{- $retaggedImages := list -}} +{{- $globalRegistry := ((.context.Values.global).imageRegistry) -}} +{{- $originalImages := .context.Chart.Annotations.images -}} +{{- range .images -}} + {{- $registryName := default .registry $globalRegistry -}} + {{- $fullImageNameNoTag := printf "%s/%s" $registryName .repository -}} + {{- $fullImageName := printf "%s:%s" $fullImageNameNoTag .tag -}} + {{- if not (contains $fullImageNameNoTag $originalImages) -}} + {{- if not (contains $registryName $originalImages) -}} + {{- $relocatedImages = append $relocatedImages $fullImageName -}} + {{- else if not (contains .repository $originalImages) -}} + {{- $replacedImages = append $replacedImages $fullImageName -}} + {{- if contains "docker.io/bitnamilegacy/" $fullImageNameNoTag -}} + {{- $bitnamiLegacyImages = append $bitnamiLegacyImages $fullImageName -}} + {{- end -}} + {{- end -}} + {{- end -}} + {{- if not (contains (printf "%s:%s" .repository .tag) $originalImages) -}} + {{- $retaggedImages = append $retaggedImages $fullImageName -}} + {{- end -}} +{{- end -}} + +{{- if and (or (gt (len $relocatedImages) 0) (gt (len $replacedImages) 0)) (((.context.Values.global).security).allowInsecureImages) -}} + {{- print "\n\n⚠ SECURITY WARNING: Verifying original container images was skipped. Please note this Helm chart was designed, tested, and validated on multiple platforms using a specific set of Bitnami and Bitnami Secure Images containers. Substituting other containers is likely to cause degraded security and performance, broken chart features, and missing environment variables.\n" -}} +{{- else if (or (gt (len $relocatedImages) 0) (gt (len $replacedImages) 0)) -}} + {{- $errorString := "Original containers have been substituted for unrecognized ones. Deploying this chart with non-standard containers is likely to cause degraded security and performance, broken chart features, and missing environment variables." -}} + {{- $errorString = print $errorString "\n\nUnrecognized images:" -}} + {{- range (concat $relocatedImages $replacedImages) -}} + {{- $errorString = print $errorString "\n - " . -}} + {{- end -}} + {{- if and (eq (len $relocatedImages) 0) (eq (len $replacedImages) (len $bitnamiLegacyImages)) -}} + {{- $errorString = print "\n\n⚠ WARNING: " $errorString -}} + {{- print $errorString -}} + {{- else if or (contains "docker.io/bitnami/" $originalImages) (contains "docker.io/bitnamiprem/" $originalImages) (contains "docker.io/bitnamisecure/" $originalImages) -}} + {{- $errorString = print "\n\n⚠ ERROR: " $errorString -}} + {{- $errorString = print $errorString "\n\nIf you are sure you want to proceed with non-standard containers, you can skip container image verification by setting the global parameter 'global.security.allowInsecureImages' to true." -}} + {{- $errorString = print $errorString "\nFurther information can be obtained at https://github.com/bitnami/charts/issues/30850" -}} + {{- print $errorString | fail -}} + {{- else if gt (len $replacedImages) 0 -}} + {{- $errorString = print "\n\n⚠ WARNING: " $errorString -}} + {{- print $errorString -}} + {{- end -}} +{{- else if gt (len $retaggedImages) 0 -}} + {{- $warnString := "\n\n⚠ WARNING: Original containers have been retagged. Please note this Helm chart was tested, and validated on multiple platforms using a specific set of Bitnami and Bitnami Secure Images containers. Substituting original image tags could cause unexpected behavior." -}} + {{- $warnString = print $warnString "\n\nRetagged images:" -}} + {{- range $retaggedImages -}} + {{- $warnString = print $warnString "\n - " . -}} + {{- end -}} + {{- print $warnString -}} +{{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/_images.tpl b/helm/vendor/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 00000000..76bb7ce4 --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,115 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name. +If image tag and digest are not defined, termination fallbacks to chart appVersion. +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global "chart" .Chart ) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := default .imageRoot.registry ((.global).imageRegistry) -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} + +{{- if not .imageRoot.tag }} + {{- if .chart }} + {{- $termination = .chart.AppVersion | toString -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- if $registryName }} + {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- else -}} + {{- printf "%s%s%s" $repositoryName $separator $termination -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- range ((.global).imagePullSecrets) -}} + {{- if kindIs "map" . -}} + {{- $pullSecrets = append $pullSecrets .name -}} + {{- else -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end }} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- if kindIs "map" . -}} + {{- $pullSecrets = append $pullSecrets .name -}} + {{- else -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) -}} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- range (($context.Values.global).imagePullSecrets) -}} + {{- if kindIs "map" . -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" .name "context" $context)) -}} + {{- else -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- if kindIs "map" . -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" .name "context" $context)) -}} + {{- else -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) -}} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper image version (ingores image revision/prerelease info & fallbacks to chart appVersion) +{{ include "common.images.version" ( dict "imageRoot" .Values.path.to.the.image "chart" .Chart ) }} +*/}} +{{- define "common.images.version" -}} +{{- $imageTag := .imageRoot.tag | toString -}} +{{/* regexp from https://github.com/Masterminds/semver/blob/23f51de38a0866c5ef0bfc42b3f735c73107b700/version.go#L41-L44 */}} +{{- if regexMatch `^([0-9]+)(\.[0-9]+)?(\.[0-9]+)?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$` $imageTag -}} + {{- $version := semver $imageTag -}} + {{- printf "%d.%d.%d" $version.Major $version.Minor $version.Patch -}} +{{- else -}} + {{- print .chart.AppVersion -}} +{{- end -}} +{{- end -}} + diff --git a/helm/vendor/postgresql/charts/common/templates/_ingress.tpl b/helm/vendor/postgresql/charts/common/templates/_ingress.tpl new file mode 100644 index 00000000..2d0dbf1e --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_ingress.tpl @@ -0,0 +1,41 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/_labels.tpl b/helm/vendor/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 00000000..0a0cc548 --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,46 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Kubernetes standard labels +{{ include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) -}} +*/}} +{{- define "common.labels.standard" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{- $default := dict "app.kubernetes.io/name" (include "common.names.name" .context) "helm.sh/chart" (include "common.names.chart" .context) "app.kubernetes.io/instance" .context.Release.Name "app.kubernetes.io/managed-by" .context.Release.Service -}} +{{- with .context.Chart.AppVersion -}} +{{- $_ := set $default "app.kubernetes.io/version" . -}} +{{- end -}} +{{ template "common.tplvalues.merge" (dict "values" (list .customLabels $default) "context" .context) }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Chart.AppVersion }} +app.kubernetes.io/version: {{ . | quote }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Labels used on immutable fields such as deploy.spec.selector.matchLabels or svc.spec.selector +{{ include "common.labels.matchLabels" (dict "customLabels" .Values.podLabels "context" $) -}} + +We don't want to loop over custom labels appending them to the selector +since it's very likely that it will break deployments, services, etc. +However, it's important to overwrite the standard labels if the user +overwrote them on metadata.labels fields. +*/}} +{{- define "common.labels.matchLabels" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") (dict "app.kubernetes.io/name" (include "common.names.name" .context) "app.kubernetes.io/instance" .context.Release.Name ) | toYaml }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/_names.tpl b/helm/vendor/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 00000000..d5d0ae43 --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,72 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $releaseName := regexReplaceAll "(-?[^a-z\\d\\-])+-?" (lower .Release.Name) "-" -}} +{{- if contains $name $releaseName -}} +{{- $releaseName | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $releaseName $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/_resources.tpl b/helm/vendor/postgresql/charts/common/templates/_resources.tpl new file mode 100644 index 00000000..d8a43e1c --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_resources.tpl @@ -0,0 +1,50 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a resource request/limit object based on a given preset. +These presets are for basic testing and not meant to be used in production +{{ include "common.resources.preset" (dict "type" "nano") -}} +*/}} +{{- define "common.resources.preset" -}} +{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}} +{{- $presets := dict + "nano" (dict + "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") + "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi") + ) + "micro" (dict + "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") + "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi") + ) + "small" (dict + "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") + "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi") + ) + "medium" (dict + "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi") + "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi") + ) + "large" (dict + "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi") + "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi") + ) + "xlarge" (dict + "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") + "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi") + ) + "2xlarge" (dict + "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") + "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi") + ) + }} +{{- if hasKey $presets .type -}} +{{- index $presets .type | toYaml -}} +{{- else -}} +{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}} +{{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/_secrets.tpl b/helm/vendor/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 00000000..7868c00a --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,192 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "honorProvidedValues" false "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + - failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets. + - skipB64enc - Boolean - Optional - Default to false. If set to true, no the secret will not be base64 encrypted. + - skipQuote - Boolean - Optional - Default to false. If set to true, no quotes will be added around the secret. + - honorProvidedValues - Boolean - Optional - Default to false. If set to true, the values in providedValues have higher priority than an existing secret +The order in which this function returns a secret password: + 1. Password provided via the values.yaml if honorProvidedValues = true + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 2. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 3. Password provided via the values.yaml if honorProvidedValues = false + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 4. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | b64dec }} + {{- else if not (eq .failOnNew false) }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- end }} + +{{- if and $providedPasswordValue .honorProvidedValues }} + {{- $password = tpl ($providedPasswordValue | toString) .context }} +{{- end }} + +{{- if not $password }} + {{- if $providedPasswordValue }} + {{- $password = tpl ($providedPasswordValue | toString) .context }} + {{- else }} + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- if not (eq .failOnNew false) }} + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + {{- end }} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle }} + {{- else }} + {{- $password = randAlphaNum $passwordLength }} + {{- end }} + {{- end -}} +{{- end -}} +{{- if not .skipB64enc }} +{{- $password = $password | b64enc }} +{{- end -}} +{{- if .skipQuote -}} +{{- printf "%s" $password -}} +{{- else -}} +{{- printf "%s" $password | quote -}} +{{- end -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else if .defaultValue -}} + {{- $value = .defaultValue | toString | b64enc -}} +{{- end -}} +{{- if $value -}} +{{- printf "%s" $value -}} +{{- end -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/_storage.tpl b/helm/vendor/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 00000000..aa75856c --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,21 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} +{{- $storageClass := (.global).storageClass | default .persistence.storageClass | default (.global).defaultStorageClass | default "" -}} +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else -}} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/_tplvalues.tpl b/helm/vendor/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 00000000..a04f4c1e --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,52 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template perhaps with scope if the scope is present. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }} +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }} +*/}} +{{- define "common.tplvalues.render" -}} +{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }} +{{- if contains "{{" (toJson .value) }} + {{- if .scope }} + {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }} + {{- else }} + {{- tpl $value .context }} + {{- end }} +{{- else }} + {{- $value }} +{{- end }} +{{- end -}} + +{{/* +Merge a list of values that contains template after rendering them. +Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge +Usage: +{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }} +*/}} +{{- define "common.tplvalues.merge" -}} +{{- $dst := dict -}} +{{- range .values -}} +{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}} +{{- end -}} +{{ $dst | toYaml }} +{{- end -}} + +{{/* +Merge a list of values that contains template after rendering them. +Merge precedence is consistent with https://masterminds.github.io/sprig/dicts.html#mergeoverwrite-mustmergeoverwrite +Usage: +{{ include "common.tplvalues.merge-overwrite" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }} +*/}} +{{- define "common.tplvalues.merge-overwrite" -}} +{{- $dst := dict -}} +{{- range .values -}} +{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | mergeOverwrite $dst -}} +{{- end -}} +{{ $dst | toYaml }} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/_utils.tpl b/helm/vendor/postgresql/charts/common/templates/_utils.tpl new file mode 100644 index 00000000..d53c74aa --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_utils.tpl @@ -0,0 +1,77 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Checksum a template at "path" containing a *single* resource (ConfigMap,Secret) for use in pod annotations, excluding the metadata (see #18376). +Usage: +{{ include "common.utils.checksumTemplate" (dict "path" "/configmap.yaml" "context" $) }} +*/}} +{{- define "common.utils.checksumTemplate" -}} +{{- $obj := include (print .context.Template.BasePath .path) .context | fromYaml -}} +{{ omit $obj "apiVersion" "kind" "metadata" | toYaml | sha256sum }} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/_warnings.tpl b/helm/vendor/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 00000000..62c44dfc --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,109 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html +{{- end }} +{{- end -}} + +{{/* +Warning about replaced images from the original. +Usage: +{{ include "common.warnings.modifiedImages" (dict "images" (list .Values.path.to.the.imageRoot) "context" $) }} +*/}} +{{- define "common.warnings.modifiedImages" -}} +{{- $affectedImages := list -}} +{{- $printMessage := false -}} +{{- $originalImages := .context.Chart.Annotations.images -}} +{{- range .images -}} + {{- $fullImageName := printf (printf "%s/%s:%s" .registry .repository .tag) -}} + {{- if not (contains $fullImageName $originalImages) }} + {{- $affectedImages = append $affectedImages (printf "%s/%s:%s" .registry .repository .tag) -}} + {{- $printMessage = true -}} + {{- end -}} +{{- end -}} +{{- if $printMessage }} + +⚠ SECURITY WARNING: Original containers have been substituted. This Helm chart was designed, tested, and validated on multiple platforms using a specific set of Bitnami and Tanzu Application Catalog containers. Substituting other containers is likely to cause degraded security and performance, broken chart features, and missing environment variables. + +Substituted images detected: +{{- range $affectedImages }} + - {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Warning about not setting the resource object in all deployments. +Usage: +{{ include "common.warnings.resources" (dict "sections" (list "path1" "path2") context $) }} +Example: +{{- include "common.warnings.resources" (dict "sections" (list "csiProvider.provider" "server" "volumePermissions" "") "context" $) }} +The list in the example assumes that the following values exist: + - csiProvider.provider.resources + - server.resources + - volumePermissions.resources + - resources +*/}} +{{- define "common.warnings.resources" -}} +{{- $values := .context.Values -}} +{{- $printMessage := false -}} +{{ $affectedSections := list -}} +{{- range .sections -}} + {{- if eq . "" -}} + {{/* Case where the resources section is at the root (one main deployment in the chart) */}} + {{- if not (index $values "resources") -}} + {{- $affectedSections = append $affectedSections "resources" -}} + {{- $printMessage = true -}} + {{- end -}} + {{- else -}} + {{/* Case where the are multiple resources sections (more than one main deployment in the chart) */}} + {{- $keys := split "." . -}} + {{/* We iterate through the different levels until arriving to the resource section. Example: a.b.c.resources */}} + {{- $section := $values -}} + {{- range $keys -}} + {{- $section = index $section . -}} + {{- end -}} + {{- if not (index $section "resources") -}} + {{/* If the section has enabled=false or replicaCount=0, do not include it */}} + {{- if and (hasKey $section "enabled") -}} + {{- if index $section "enabled" -}} + {{/* enabled=true */}} + {{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}} + {{- $printMessage = true -}} + {{- end -}} + {{- else if and (hasKey $section "replicaCount") -}} + {{/* We need a casting to int because number 0 is not treated as an int by default */}} + {{- if (gt (index $section "replicaCount" | int) 0) -}} + {{/* replicaCount > 0 */}} + {{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}} + {{- $printMessage = true -}} + {{- end -}} + {{- else -}} + {{/* Default case, add it to the affected sections */}} + {{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}} + {{- $printMessage = true -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- if $printMessage }} + +WARNING: There are "resources" sections in the chart not set. Using "resourcesPreset" is not recommended for production. For production installations, please set the following values according to your workload needs: +{{- range $affectedSections }} + - {{ . }} +{{- end }} ++info https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +{{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/validations/_cassandra.tpl b/helm/vendor/postgresql/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 00000000..f8fd213b --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,51 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/validations/_mariadb.tpl b/helm/vendor/postgresql/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 00000000..6ea8c0f4 --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/validations/_mongodb.tpl b/helm/vendor/postgresql/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 00000000..e678a6de --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,67 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/validations/_mysql.tpl b/helm/vendor/postgresql/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 00000000..fbb65c33 --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,67 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/validations/_postgresql.tpl b/helm/vendor/postgresql/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 00000000..51d47162 --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,105 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/validations/_redis.tpl b/helm/vendor/postgresql/charts/common/templates/validations/_redis.tpl new file mode 100644 index 00000000..9fedfef9 --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,48 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{/* vim: set filetype=mustache: */}} +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/templates/validations/_validations.tpl b/helm/vendor/postgresql/charts/common/templates/validations/_validations.tpl new file mode 100644 index 00000000..7cdee617 --- /dev/null +++ b/helm/vendor/postgresql/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,51 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/charts/common/values.yaml b/helm/vendor/postgresql/charts/common/values.yaml new file mode 100644 index 00000000..de2cac57 --- /dev/null +++ b/helm/vendor/postgresql/charts/common/values.yaml @@ -0,0 +1,8 @@ +# Copyright Broadcom, Inc. All Rights Reserved. +# SPDX-License-Identifier: APACHE-2.0 + +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/helm/vendor/postgresql/templates/NOTES.txt b/helm/vendor/postgresql/templates/NOTES.txt new file mode 100644 index 00000000..1a086d24 --- /dev/null +++ b/helm/vendor/postgresql/templates/NOTES.txt @@ -0,0 +1,123 @@ +{{- $releaseNamespace := include "common.names.namespace" . }} +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +⚠ WARNING: Since August 28th, 2025, only a limited subset of images/charts are available for free. + Subscribe to Bitnami Secure Images to receive continued support and security updates. + More info at https://bitnami.com and https://github.com/bitnami/containers/issues/83267 + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ $releaseNamespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ $releaseNamespace }} -ti -- /opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/postgresql/entrypoint.sh /opt/bitnami/scripts/postgresql/run.sh + +{{- else }} + +{{- $customUser := include "postgresql.v1.username" . }} +{{- $postgresPassword := include "common.secrets.lookup" (dict "secret" (include "postgresql.v1.chart.fullname" .) "key" .Values.auth.secretKeys.adminPasswordKey "defaultValue" (ternary .Values.auth.postgresPassword .Values.auth.password (eq $customUser "postgres")) "context" $) -}} +{{- $authEnabled := and (not (or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret)) (or $postgresPassword .Values.auth.enablePostgresUser (and (not (empty $customUser)) (ne $customUser "postgres"))) }} +{{- if not $authEnabled }} + +WARNING: PostgreSQL has been configured without authentication, this is not recommended for production environments. +{{- end }} + +PostgreSQL can be accessed via port {{ include "postgresql.v1.service.port" . }} on the following DNS names from within your cluster: + + {{ include "postgresql.v1.primary.fullname" . }}.{{ $releaseNamespace }}.svc.cluster.local - Read/Write connection + +{{- if eq .Values.architecture "replication" }} + + {{ include "postgresql.v1.readReplica.fullname" . }}.{{ $releaseNamespace }}.svc.cluster.local - Read only connection + +{{- end }} + +{{- if and (not (empty $customUser)) (ne $customUser "postgres") }} +{{- if .Values.auth.enablePostgresUser }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ $releaseNamespace }} {{ include "postgresql.v1.secretName" . }} -o jsonpath="{.data.{{include "postgresql.v1.adminPasswordKey" .}}}" | base64 -d) +{{- end }} + +To get the password for "{{ $customUser }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ $releaseNamespace }} {{ include "postgresql.v1.secretName" . }} -o jsonpath="{.data.{{include "postgresql.v1.userPasswordKey" .}}}" | base64 -d) +{{- else }} +{{- if .Values.auth.enablePostgresUser }} + +To get the password for "{{ default "postgres" $customUser }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ $releaseNamespace }} {{ include "postgresql.v1.secretName" . }} -o jsonpath="{.data.{{ ternary "password" (include "postgresql.v1.adminPasswordKey" .) (and (not (empty $customUser)) (ne $customUser "postgres")) }}}" | base64 -d) +{{- end }} +{{- end }} + +To connect to your database run the following command: + {{- if $authEnabled }} + + kubectl run {{ include "postgresql.v1.chart.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ $releaseNamespace }} --image {{ include "postgresql.v1.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" \ + --command -- psql --host {{ include "postgresql.v1.primary.fullname" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.v1.service.port" . }} + {{- else }} + + kubectl run {{ include "postgresql.v1.chart.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ $releaseNamespace }} --image {{ include "postgresql.v1.image" . }} \ + --command -- psql --host {{ include "postgresql.v1.primary.fullname" . }} -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.v1.service.port" . }} + {{- end }} + + > NOTE: If you access the container using bash, make sure that you execute "/opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash" in order to avoid the error "psql: local user with ID {{ .Values.primary.containerSecurityContext.runAsUser }}} does not exist" + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.primary.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ $releaseNamespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ $releaseNamespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "postgresql.v1.primary.fullname" . }}) + {{- if $authEnabled }} + PGPASSWORD="$POSTGRES_PASSWORD" psql --host $NODE_IP --port $NODE_PORT -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }} + {{- else }} + psql --host $NODE_IP --port $NODE_PORT -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }} + {{- end }} +{{- else if contains "LoadBalancer" .Values.primary.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ $releaseNamespace }} -w {{ include "postgresql.v1.primary.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ $releaseNamespace }} {{ include "postgresql.v1.primary.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + {{- if $authEnabled }} + PGPASSWORD="$POSTGRES_PASSWORD" psql --host $SERVICE_IP --port {{ include "postgresql.v1.service.port" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }} + {{- else }} + psql --host $SERVICE_IP --port {{ include "postgresql.v1.service.port" . }} -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }} + {{- end }} +{{- else if contains "ClusterIP" .Values.primary.service.type }} + + kubectl port-forward --namespace {{ $releaseNamespace }} svc/{{ include "postgresql.v1.primary.fullname" . }} {{ include "postgresql.v1.service.port" . }}:{{ include "postgresql.v1.service.port" . }} & + {{- if $authEnabled }} + PGPASSWORD="$POSTGRES_PASSWORD" psql --host 127.0.0.1 -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.v1.service.port" . }} + {{- else }} + psql --host 127.0.0.1 -d {{- if include "postgresql.v1.database" . }} {{ include "postgresql.v1.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.v1.service.port" . }} + {{- end }} +{{- end }} +{{- end }} + +WARNING: The configured password will be ignored on new installation in case when previous PostgreSQL release was deleted through the helm command. In that case, old PVC will have an old password, and setting it through helm won't take effect. Deleting persistent volumes (PVs) will solve the issue. + +{{- include "postgresql.v1.validateValues" . -}} +{{- include "common.warnings.rollingTag" .Values.image -}} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "common.warnings.resources" (dict "sections" (list "metrics" "primary" "readReplicas" "volumePermissions") "context" $) }} +{{- include "common.warnings.modifiedImages" (dict "images" (list .Values.image .Values.volumePermissions.image .Values.metrics.image) "context" $) }} +{{- include "common.errors.insecureImages" (dict "images" (list .Values.image .Values.volumePermissions.image .Values.metrics.image) "context" $) }} diff --git a/helm/vendor/postgresql/templates/_helpers.tpl b/helm/vendor/postgresql/templates/_helpers.tpl new file mode 100644 index 00000000..c582fc19 --- /dev/null +++ b/helm/vendor/postgresql/templates/_helpers.tpl @@ -0,0 +1,458 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Create a global name for the chart to use and parse with other naming functions +Please use instead of "common.names.fullname" to preserve support for .Values.global.postgresql.fullnameOverride +*/}} +{{- define "postgresql.v1.chart.fullname" -}} +{{- default (include "common.names.fullname" .) .Values.global.postgresql.fullnameOverride -}} +{{- end -}} + +{{/* +Create a default fully qualified app name for PostgreSQL Primary objects +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.v1.primary.fullname" -}} +{{- $fullname := include "postgresql.v1.chart.fullname" . -}} +{{- ternary (printf "%s-%s" $fullname .Values.primary.name | trunc 63 | trimSuffix "-") $fullname (eq .Values.architecture "replication") -}} +{{- end -}} + +{{/* +Create a default fully qualified app name for PostgreSQL read-only replicas objects +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.v1.readReplica.fullname" -}} +{{- printf "%s-%s" (include "postgresql.v1.chart.fullname" .) .Values.readReplicas.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the default FQDN for PostgreSQL primary headless service +We truncate at 63 chars because of the DNS naming spec. +*/}} +{{- define "postgresql.v1.primary.svc.headless" -}} +{{- printf "%s-hl" (include "postgresql.v1.primary.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the default FQDN for PostgreSQL read-only replicas headless service +We truncate at 63 chars because of the DNS naming spec. +*/}} +{{- define "postgresql.v1.readReplica.svc.headless" -}} +{{- printf "%s-hl" (include "postgresql.v1.readReplica.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.v1.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.v1.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "postgresql.v1.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.v1.imagePullSecrets" -}} +{{ include "common.images.renderPullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "context" $) }} +{{- end -}} + +{{/* +Return the name for a custom user to create +*/}} +{{- define "postgresql.v1.username" -}} +{{- if .Values.global.postgresql.auth.username -}} + {{- .Values.global.postgresql.auth.username -}} +{{- else -}} + {{- .Values.auth.username -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name for a custom database to create +*/}} +{{- define "postgresql.v1.database" -}} +{{- if .Values.global.postgresql.auth.database -}} + {{- printf "%s" (tpl .Values.global.postgresql.auth.database $) -}} +{{- else if .Values.auth.database -}} + {{- printf "%s" (tpl .Values.auth.database $) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.v1.secretName" -}} +{{- if .Values.global.postgresql.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.global.postgresql.auth.existingSecret $) -}} +{{- else if .Values.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.v1.chart.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the replication-password key. +*/}} +{{- define "postgresql.v1.replicationPasswordKey" -}} +{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret -}} + {{- if .Values.global.postgresql.auth.secretKeys.replicationPasswordKey -}} + {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.replicationPasswordKey $) -}} + {{- else if .Values.auth.secretKeys.replicationPasswordKey -}} + {{- printf "%s" (tpl .Values.auth.secretKeys.replicationPasswordKey $) -}} + {{- else -}} + {{- "replication-password" -}} + {{- end -}} +{{- else -}} + {{- "replication-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the admin-password key. +*/}} +{{- define "postgresql.v1.adminPasswordKey" -}} +{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret -}} + {{- if .Values.global.postgresql.auth.secretKeys.adminPasswordKey -}} + {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.adminPasswordKey $) -}} + {{- else if .Values.auth.secretKeys.adminPasswordKey -}} + {{- printf "%s" (tpl .Values.auth.secretKeys.adminPasswordKey $) -}} + {{- end -}} +{{- else -}} + {{- "postgres-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the user-password key. +*/}} +{{- define "postgresql.v1.userPasswordKey" -}} +{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret -}} + {{- if or (empty (include "postgresql.v1.username" .)) (eq (include "postgresql.v1.username" .) "postgres") -}} + {{- printf "%s" (include "postgresql.v1.adminPasswordKey" .) -}} + {{- else -}} + {{- if .Values.global.postgresql.auth.secretKeys.userPasswordKey -}} + {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.userPasswordKey $) -}} + {{- else if .Values.auth.secretKeys.userPasswordKey -}} + {{- printf "%s" (tpl .Values.auth.secretKeys.userPasswordKey $) -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- "password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.v1.createSecret" -}} +{{- $customUser := include "postgresql.v1.username" . -}} +{{- $postgresPassword := include "common.secrets.lookup" (dict "secret" (include "postgresql.v1.chart.fullname" .) "key" .Values.auth.secretKeys.adminPasswordKey "defaultValue" (ternary (coalesce .Values.global.postgresql.auth.postgresPassword .Values.auth.postgresPassword .Values.global.postgresql.auth.password .Values.auth.password) (coalesce .Values.global.postgresql.auth.postgresPassword .Values.auth.postgresPassword) (or (empty $customUser) (eq $customUser "postgres"))) "context" $) -}} +{{- if and (not (or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret)) (or $postgresPassword .Values.auth.enablePostgresUser (and (not (empty $customUser)) (ne $customUser "postgres")) (eq .Values.architecture "replication") (and .Values.ldap.enabled (or .Values.ldap.bind_password .Values.ldap.bindpw))) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for PostgreSQL +*/}} +{{- define "postgresql.v1.createPreviousSecret" -}} +{{- if and .Values.passwordUpdateJob.previousPasswords.postgresPassword (not .Values.passwordUpdateJob.previousPasswords.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret with previous PostgreSQL credentials +*/}} +{{- define "postgresql.v1.update-job.previousSecretName" -}} + {{- if .Values.passwordUpdateJob.previousPasswords.existingSecret -}} + {{- /* The secret with the new password is managed externally */ -}} + {{- tpl .Values.passwordUpdateJob.previousPasswords.existingSecret $ -}} + {{- else if .Values.passwordUpdateJob.previousPasswords.postgresPassword -}} + {{- /* The secret with the new password is managed externally */ -}} + {{- printf "%s-previous-secret" (include "postgresql.v1.chart.fullname" .) | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- /* The secret with the new password is managed by the helm chart. We use the current secret name as it has the old password */ -}} + {{- include "postgresql.v1.chart.fullname" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return the secret with new PostgreSQL credentials +*/}} +{{- define "postgresql.v1.update-job.newSecretName" -}} + {{- if and (not .Values.passwordUpdateJob.previousPasswords.existingSecret) (not .Values.passwordUpdateJob.previousPasswords.postgresPassword) -}} + {{- /* The secret with the new password is managed by the helm chart. We create a new secret as the current one has the old password */ -}} + {{- printf "%s-new-secret" (include "postgresql.v1.chart.fullname" .) | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- /* The secret with the new password is managed externally */ -}} + {{- include "postgresql.v1.secretName" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL service port +*/}} +{{- define "postgresql.v1.service.port" -}} +{{- if .Values.global.postgresql.service.ports.postgresql -}} + {{- .Values.global.postgresql.service.ports.postgresql -}} +{{- else -}} + {{- .Values.primary.service.ports.postgresql -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL service port +*/}} +{{- define "postgresql.v1.readReplica.service.port" -}} +{{- if .Values.global.postgresql.service.ports.postgresql -}} + {{- .Values.global.postgresql.service.ports.postgresql -}} +{{- else -}} + {{- .Values.readReplicas.service.ports.postgresql -}} +{{- end -}} +{{- end -}} + +{{/* +Get the PostgreSQL primary configuration ConfigMap name. +*/}} +{{- define "postgresql.v1.primary.configmapName" -}} +{{- if .Values.primary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-configuration" (include "postgresql.v1.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for PostgreSQL primary with the configuration +*/}} +{{- define "postgresql.v1.primary.createConfigmap" -}} +{{- if and (or .Values.primary.configuration .Values.primary.pgHbaConfiguration) (not .Values.primary.existingConfigmap) -}} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Get the PostgreSQL primary extended configuration ConfigMap name. +*/}} +{{- define "postgresql.v1.primary.extendedConfigmapName" -}} +{{- if .Values.primary.existingExtendedConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingExtendedConfigmap $) -}} +{{- else -}} + {{- printf "%s-extended-configuration" (include "postgresql.v1.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the PostgreSQL read replica extended configuration ConfigMap name. +*/}} +{{- define "postgresql.v1.readReplicas.extendedConfigmapName" -}} + {{- printf "%s-extended-configuration" (include "postgresql.v1.readReplica.fullname" .) -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for PostgreSQL primary with the extended configuration +*/}} +{{- define "postgresql.v1.primary.createExtendedConfigmap" -}} +{{- if and .Values.primary.extendedConfiguration (not .Values.primary.existingExtendedConfigmap) -}} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for PostgreSQL read replica with the extended configuration +*/}} +{{- define "postgresql.v1.readReplicas.createExtendedConfigmap" -}} +{{- if .Values.readReplicas.extendedConfiguration -}} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "postgresql.v1.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "postgresql.v1.chart.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.v1.mountConfigurationCM" -}} +{{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the pre-initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.v1.preInitDb.scriptsCM" -}} +{{- if .Values.primary.preInitDb.scriptsConfigMap -}} + {{- printf "%s" (tpl .Values.primary.preInitDb.scriptsConfigMap $) -}} +{{- else -}} + {{- printf "%s-preinit-scripts" (include "postgresql.v1.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.v1.initdb.scriptsCM" -}} +{{- if .Values.primary.initdb.scriptsConfigMap -}} + {{- printf "%s" (tpl .Values.primary.initdb.scriptsConfigMap $) -}} +{{- else -}} + {{- printf "%s-init-scripts" (include "postgresql.v1.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if TLS is enabled for LDAP connection +*/}} +{{- define "postgresql.v1.ldap.tls.enabled" -}} +{{- if and (kindIs "string" .Values.ldap.tls) (not (empty .Values.ldap.tls)) -}} + {{- true -}} +{{- else if and (kindIs "map" .Values.ldap.tls) .Values.ldap.tls.enabled -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.v1.readinessProbeCommand" -}} +{{- $customUser := include "postgresql.v1.username" . -}} +- | +{{- if (include "postgresql.v1.database" .) }} + exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.v1.database" . }} {{- if .Values.tls.enabled }} sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} +{{- else }} + exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if .Values.tls.enabled }} -d "sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} +{{- end }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.v1.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.v1.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.v1.validateValues.psp" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.v1.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) -}} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.v1.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) -}} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.v1.tlsCert" -}} +{{- if .Values.tls.autoGenerated -}} + {{- printf "/opt/bitnami/postgresql/certs/tls.crt" -}} +{{- else -}} + {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.v1.tlsCertKey" -}} +{{- if .Values.tls.autoGenerated -}} + {{- printf "/opt/bitnami/postgresql/certs/tls.key" -}} +{{- else -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.v1.tlsCACert" -}} +{{- if .Values.tls.autoGenerated -}} + {{- printf "/opt/bitnami/postgresql/certs/ca.crt" -}} +{{- else -}} + {{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.v1.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "postgresql.v1.createTlsSecret" -}} +{{- if and .Values.tls.autoGenerated (not .Values.tls.certificatesSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.v1.tlsSecretName" -}} +{{- if .Values.tls.autoGenerated -}} + {{- printf "%s-crt" (include "postgresql.v1.chart.fullname" .) -}} +{{- else -}} + {{ tpl (required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret) . }} +{{- end -}} +{{- end -}} diff --git a/helm/vendor/postgresql/templates/backup/cronjob.yaml b/helm/vendor/postgresql/templates/backup/cronjob.yaml new file mode 100644 index 00000000..d3afb078 --- /dev/null +++ b/helm/vendor/postgresql/templates/backup/cronjob.yaml @@ -0,0 +1,147 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.backup.enabled }} +{{- $customUser := include "postgresql.v1.username" . }} +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ include "postgresql.v1.primary.fullname" . }}-pgdumpall + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.backup.cronjob.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: pg_dumpall + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.backup.cronjob.annotations .Values.commonAnnotations ) "context" . ) }} + {{- if $annotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + schedule: {{ quote .Values.backup.cronjob.schedule }} + {{- if .Values.backup.cronjob.timeZone }} + timeZone: {{ .Values.backup.cronjob.timeZone | quote }} + {{- end }} + concurrencyPolicy: {{ .Values.backup.cronjob.concurrencyPolicy }} + failedJobsHistoryLimit: {{ .Values.backup.cronjob.failedJobsHistoryLimit }} + successfulJobsHistoryLimit: {{ .Values.backup.cronjob.successfulJobsHistoryLimit }} + {{- if .Values.backup.cronjob.startingDeadlineSeconds }} + startingDeadlineSeconds: {{ .Values.backup.cronjob.startingDeadlineSeconds }} + {{- end }} + jobTemplate: + spec: + {{- if .Values.backup.cronjob.ttlSecondsAfterFinished }} + ttlSecondsAfterFinished: {{ .Values.backup.cronjob.ttlSecondsAfterFinished }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 12 }} + app.kubernetes.io/component: pg_dumpall + {{- if $annotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 12 }} + {{- end }} + spec: + {{- include "postgresql.v1.imagePullSecrets" . | nindent 10 }} + {{- if .Values.backup.cronjob.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.backup.cronjob.nodeSelector "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.backup.cronjob.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.backup.cronjob.tolerations "context" $) | nindent 12 }} + {{- end }} + containers: + - name: {{ include "postgresql.v1.primary.fullname" . }}-pgdumpall + image: {{ include "postgresql.v1.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + - name: PGUSER + {{- if .Values.auth.enablePostgresUser }} + value: postgres + {{- else }} + value: {{ $customUser | quote }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: PGPASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.adminPasswordKey" .) }} + {{- else }} + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.v1.secretName" . }} + key: {{ include "postgresql.v1.adminPasswordKey" . }} + {{- end }} + - name: PGHOST + value: {{ include "postgresql.v1.primary.fullname" . }} + - name: PGPORT + value: {{ include "postgresql.v1.service.port" . | quote }} + - name: PGDUMP_DIR + value: {{ .Values.backup.cronjob.storage.mountPath }} + {{- if .Values.tls.enabled }} + - name: PGSSLROOTCERT + {{- if .Values.tls.autoGenerated }} + value: /tmp/certs/ca.crt + {{- else }} + value: {{ printf "/tmp/certs/%s" .Values.tls.certCAFilename }} + {{- end }} + {{- end }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.backup.cronjob.command "context" $) | nindent 14 }} + volumeMounts: + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + {{- end }} + {{- if .Values.backup.cronjob.storage.enabled }} + - name: datadir + mountPath: {{ .Values.backup.cronjob.storage.mountPath }} + subPath: {{ .Values.backup.cronjob.storage.subPath }} + {{- end }} + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.backup.cronjob.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.backup.cronjob.extraVolumeMounts "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.backup.cronjob.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.backup.cronjob.containerSecurityContext "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.backup.cronjob.resources }} + resources: {{- toYaml .Values.backup.cronjob.resources | nindent 14 }} + {{- else if ne .Values.backup.cronjob.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.backup.cronjob.resourcesPreset) | nindent 14 }} + {{- end }} + restartPolicy: {{ .Values.backup.cronjob.restartPolicy }} + {{- if .Values.backup.cronjob.podSecurityContext.enabled }} + securityContext: + fsGroup: {{ .Values.backup.cronjob.podSecurityContext.fsGroup }} + {{- end }} + volumes: + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ include "postgresql.v1.tlsSecretName" . }} + {{- end }} + {{- if .Values.backup.cronjob.storage.enabled }} + {{- if .Values.backup.cronjob.storage.existingClaim }} + - name: datadir + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.backup.cronjob.storage.existingClaim .) }} + {{- else }} + - name: datadir + persistentVolumeClaim: + claimName: {{ include "postgresql.v1.primary.fullname" . }}-pgdumpall + {{- end }} + {{- end }} + - name: empty-dir + emptyDir: {} + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + secret: + secretName: {{ include "postgresql.v1.secretName" . }} + {{- end }} + {{- if .Values.backup.cronjob.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.backup.cronjob.extraVolumes "context" $ ) | nindent 12 }} + {{- end }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/backup/networkpolicy.yaml b/helm/vendor/postgresql/templates/backup/networkpolicy.yaml new file mode 100644 index 00000000..21e1add8 --- /dev/null +++ b/helm/vendor/postgresql/templates/backup/networkpolicy.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.backup.enabled .Values.backup.cronjob.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "postgresql.v1.primary.fullname" . }}-pgdumpall + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: pg_dumpall + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.backup.cronjob.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: pg_dumpall + policyTypes: + - Egress + egress: + - ports: + - port: 5432 + protocol: TCP + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP +{{- end }} diff --git a/helm/vendor/postgresql/templates/backup/pvc.yaml b/helm/vendor/postgresql/templates/backup/pvc.yaml new file mode 100644 index 00000000..674ac3d0 --- /dev/null +++ b/helm/vendor/postgresql/templates/backup/pvc.yaml @@ -0,0 +1,34 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.backup.enabled .Values.backup.cronjob.storage.enabled (not .Values.backup.cronjob.storage.existingClaim) -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "postgresql.v1.primary.fullname" . }}-pgdumpall + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.backup.cronjob.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: pg_dumpall + {{- if or .Values.backup.cronjob.annotations .Values.commonAnnotations .Values.backup.cronjob.storage.resourcePolicy }} + annotations: + {{- if or .Values.backup.cronjob.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.backup.cronjob.annotations .Values.commonAnnotations ) "context" . ) }} + {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.backup.cronjob.storage.resourcePolicy }} + helm.sh/resource-policy: {{ .Values.backup.cronjob.storage.resourcePolicy | quote }} + {{- end }} + {{- end }} +spec: + accessModes: + {{- range .Values.backup.cronjob.storage.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.backup.cronjob.storage.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.backup.cronjob.storage "global" .Values.global) }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/extra-list.yaml b/helm/vendor/postgresql/templates/extra-list.yaml new file mode 100644 index 00000000..329f5c65 --- /dev/null +++ b/helm/vendor/postgresql/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/primary/configmap.yaml b/helm/vendor/postgresql/templates/primary/configmap.yaml new file mode 100644 index 00000000..ad231e5c --- /dev/null +++ b/helm/vendor/postgresql/templates/primary/configmap.yaml @@ -0,0 +1,26 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "postgresql.v1.primary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "postgresql.v1.primary.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- if .Values.primary.configuration }} + postgresql.conf: | + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.configuration "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.pgHbaConfiguration }} + pg_hba.conf: | + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.pgHbaConfiguration "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/primary/extended-configmap.yaml b/helm/vendor/postgresql/templates/primary/extended-configmap.yaml new file mode 100644 index 00000000..2d228e4d --- /dev/null +++ b/helm/vendor/postgresql/templates/primary/extended-configmap.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "postgresql.v1.primary.createExtendedConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extended-configuration" (include "postgresql.v1.primary.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + override.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extendedConfiguration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/primary/initialization-configmap.yaml b/helm/vendor/postgresql/templates/primary/initialization-configmap.yaml new file mode 100644 index 00000000..450812a5 --- /dev/null +++ b/helm/vendor/postgresql/templates/primary/initialization-configmap.yaml @@ -0,0 +1,17 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.primary.initdb.scripts (not .Values.primary.initdb.scriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "postgresql.v1.primary.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.primary.initdb.scripts "context" .) | nindent 2 }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/primary/metrics-configmap.yaml b/helm/vendor/postgresql/templates/primary/metrics-configmap.yaml new file mode 100644 index 00000000..9bf889a9 --- /dev/null +++ b/helm/vendor/postgresql/templates/primary/metrics-configmap.yaml @@ -0,0 +1,18 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-metrics" (include "postgresql.v1.primary.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/primary/metrics-svc.yaml b/helm/vendor/postgresql/templates/primary/metrics-svc.yaml new file mode 100644 index 00000000..2e77c5b7 --- /dev/null +++ b/helm/vendor/postgresql/templates/primary/metrics-svc.yaml @@ -0,0 +1,31 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "postgresql.v1.primary.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if or .Values.commonAnnotations .Values.metrics.service.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.service.sessionAffinity }} + {{- if .Values.metrics.service.clusterIP }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.ports.metrics }} + targetPort: http-metrics + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: primary +{{- end }} diff --git a/helm/vendor/postgresql/templates/primary/networkpolicy.yaml b/helm/vendor/postgresql/templates/primary/networkpolicy.yaml new file mode 100644 index 00000000..6e5e28f9 --- /dev/null +++ b/helm/vendor/postgresql/templates/primary/networkpolicy.yaml @@ -0,0 +1,78 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.primary.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "postgresql.v1.primary.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: primary + policyTypes: + - Ingress + - Egress + {{- if .Values.primary.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to read-replicas + - ports: + - port: {{ .Values.containerPorts.postgresql }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + app.kubernetes.io/component: read + {{- if .Values.primary.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.containerPorts.metrics }} + {{- end }} + {{- if not .Values.primary.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }} + - podSelector: + matchLabels: + {{ template "postgresql.v1.primary.fullname" . }}-client: "true" + {{- if .Values.primary.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.primary.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.primary.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.primary.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.primary.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/primary/pdb.yaml b/helm/vendor/postgresql/templates/primary/pdb.yaml new file mode 100644 index 00000000..a03fb20b --- /dev/null +++ b/helm/vendor/postgresql/templates/primary/pdb.yaml @@ -0,0 +1,29 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.primary.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "postgresql.v1.primary.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.primary.pdb.minAvailable }} + minAvailable: {{ .Values.primary.pdb.minAvailable }} + {{- end }} + {{- if or .Values.primary.pdb.maxUnavailable ( not .Values.primary.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.primary.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: primary +{{- end }} diff --git a/helm/vendor/postgresql/templates/primary/preinitialization-configmap.yaml b/helm/vendor/postgresql/templates/primary/preinitialization-configmap.yaml new file mode 100644 index 00000000..ae6d1888 --- /dev/null +++ b/helm/vendor/postgresql/templates/primary/preinitialization-configmap.yaml @@ -0,0 +1,17 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.primary.preInitDb.scripts (not .Values.primary.preInitDb.scriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-preinit-scripts" (include "postgresql.v1.primary.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.primary.preInitDb.scripts "context" .) | nindent 2 }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/primary/servicemonitor.yaml b/helm/vendor/postgresql/templates/primary/servicemonitor.yaml new file mode 100644 index 00000000..b17d7f57 --- /dev/null +++ b/helm/vendor/postgresql/templates/primary/servicemonitor.yaml @@ -0,0 +1,46 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.v1.primary.fullname" . }} + namespace: {{ coalesce .Values.metrics.serviceMonitor.namespace (include "common.names.namespace" .) | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + {{- $svcLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.selector .Values.commonLabels ) "context" . ) }} + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $svcLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/primary/statefulset.yaml b/helm/vendor/postgresql/templates/primary/statefulset.yaml new file mode 100644 index 00000000..d45d6176 --- /dev/null +++ b/helm/vendor/postgresql/templates/primary/statefulset.yaml @@ -0,0 +1,705 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $customUser := include "postgresql.v1.username" . }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "postgresql.v1.primary.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: primary + {{- if or .Values.commonAnnotations .Values.primary.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + serviceName: {{ include "postgresql.v1.primary.svc.headless" . }} + {{- if .Values.primary.updateStrategy }} + updateStrategy: {{- toYaml .Values.primary.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: primary + template: + metadata: + name: {{ include "postgresql.v1.primary.fullname" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: primary + {{- if or .Values.passwordUpdateJob.enabled (include "postgresql.v1.primary.createConfigmap" .) (include "postgresql.v1.primary.createExtendedConfigmap" .) .Values.primary.podAnnotations }} + annotations: + {{- if (include "postgresql.v1.primary.createConfigmap" .) }} + checksum/configuration: {{ pick (include (print $.Template.BasePath "/primary/configmap.yaml") . | fromYaml) "data" | toYaml | sha256sum }} + {{- end }} + {{- if (include "postgresql.v1.primary.createExtendedConfigmap" .) }} + checksum/extended-configuration: {{ pick (include (print $.Template.BasePath "/primary/extended-configmap.yaml") . | fromYaml) "data" | toYaml | sha256sum }} + {{- end }} + {{- if .Values.passwordUpdateJob.enabled }} + charts.bitnami.com/password-last-update: {{ now | date "20060102150405" | quote }} + {{- end }} + {{- if .Values.primary.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- if .Values.primary.extraPodSpec }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraPodSpec "context" $) | nindent 6 }} + {{- end }} + serviceAccountName: {{ include "postgresql.v1.serviceAccountName" . }} + {{- include "postgresql.v1.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.primary.automountServiceAccountToken }} + {{- if .Values.primary.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.primary.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.primary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAffinityPreset "component" "primary" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAntiAffinityPreset "component" "primary" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.primary.nodeAffinityPreset.type "key" .Values.primary.nodeAffinityPreset.key "values" .Values.primary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.primary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.primary.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.primary.priorityClassName }} + priorityClassName: {{ .Values.primary.priorityClassName }} + {{- end }} + {{- if .Values.primary.schedulerName }} + schedulerName: {{ .Values.primary.schedulerName | quote }} + {{- end }} + {{- if .Values.primary.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.primary.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.primary.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.primary.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.primary.hostNetwork }} + hostIPC: {{ .Values.primary.hostIPC }} + {{- if or (and .Values.tls.enabled (not .Values.volumePermissions.enabled)) (and .Values.volumePermissions.enabled (or .Values.primary.persistence.enabled .Values.shmVolume.enabled)) .Values.primary.initContainers }} + initContainers: + {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} + - name: copy-certs + image: {{ include "postgresql.v1.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.primary.resources }} + resources: {{- toYaml .Values.primary.resources | nindent 12 }} + {{- else if ne .Values.primary.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.primary.resourcesPreset) | nindent 12 }} + {{- end }} + # We don't require a privileged container in this case + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.primary.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + chmod 600 {{ include "postgresql.v1.tlsCertKey" . }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- else if and .Values.volumePermissions.enabled (or .Values.primary.persistence.enabled .Values.shmVolume.enabled) }} + - name: init-chmod-data + image: {{ include "postgresql.v1.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- else if ne .Values.volumePermissions.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.volumePermissions.resourcesPreset) | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + {{- if .Values.primary.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.primary.persistence.mountPath }} + {{- else }} + chown {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} {{ .Values.primary.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.v1.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.v1.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }} + find {{ .Values.primary.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.v1.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ include "postgresql.v1.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: {{ .Values.primary.persistence.volumeName }} + mountPath: {{ .Values.primary.persistence.mountPath }} + {{- if .Values.primary.persistence.subPath }} + subPath: {{ .Values.primary.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.primary.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: postgresql + image: {{ include "postgresql.v1.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.primary.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.primary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.primary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.primary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.primary.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: {{ .Values.containerPorts.postgresql | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: {{ .Values.primary.persistence.mountPath | quote }} + {{- if .Values.primary.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + # Authentication + {{- if or (eq $customUser "postgres") (empty $customUser) }} + {{- if .Values.auth.enablePostgresUser }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.adminPasswordKey" .) }} + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.v1.secretName" . }} + key: {{ include "postgresql.v1.adminPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "true" + {{- end }} + {{- else }} + - name: POSTGRES_USER + value: {{ $customUser | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.userPasswordKey" .) }} + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.v1.secretName" . }} + key: {{ include "postgresql.v1.userPasswordKey" . }} + {{- end }} + {{- if .Values.auth.enablePostgresUser }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.adminPasswordKey" .) }} + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.v1.secretName" . }} + key: {{ include "postgresql.v1.adminPasswordKey" . }} + {{- end }} + {{- end }} + {{- end }} + {{- if (include "postgresql.v1.database" .) }} + - name: POSTGRES_DATABASE + value: {{ (include "postgresql.v1.database" .) | quote }} + {{- end }} + {{- if or (eq .Values.architecture "replication") .Values.primary.standby.enabled }} + # Replication + - name: POSTGRES_REPLICATION_MODE + value: {{ ternary "slave" "master" .Values.primary.standby.enabled | quote }} + - name: POSTGRES_REPLICATION_USER + value: {{ .Values.auth.replicationUsername | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.replicationPasswordKey" .) }} + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.v1.secretName" . }} + key: {{ include "postgresql.v1.replicationPasswordKey" . }} + {{- end }} + {{- if ne .Values.replication.synchronousCommit "off" }} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if .Values.primary.initdb.args }} + # Initdb + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.primary.initdb.args | quote }} + {{- end }} + {{- if .Values.primary.initdb.postgresqlWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.primary.initdb.postgresqlWalDir | quote }} + {{- end }} + {{- if .Values.primary.initdb.user }} + - name: POSTGRES_INITSCRIPTS_USERNAME + value: {{ .Values.primary.initdb.user }} + {{- end }} + {{- if .Values.primary.initdb.password }} + - name: POSTGRES_INITSCRIPTS_PASSWORD + value: {{ .Values.primary.initdb.password | quote }} + {{- end }} + {{- if .Values.primary.standby.enabled }} + # Standby + - name: POSTGRES_MASTER_HOST + value: {{ .Values.primary.standby.primaryHost }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ .Values.primary.standby.primaryPort | quote }} + {{- end }} + # LDAP + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + {{- if or .Values.ldap.url .Values.ldap.uri }} + - name: POSTGRESQL_LDAP_URL + value: {{ coalesce .Values.ldap.url .Values.ldap.uri }} + {{- else }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if (include "postgresql.v1.ldap.tls.enabled" .) }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end }} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote }} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ coalesce .Values.ldap.baseDN .Values.ldap.basedn }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ coalesce .Values.ldap.bindDN .Values.ldap.binddn}} + {{- if or (not (empty .Values.ldap.bind_password)) (not (empty .Values.ldap.bindpw)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.v1.secretName" . }} + key: ldap-password + {{- end }} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ coalesce .Values.ldap.search_attr .Values.ldap.searchAttribute }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ coalesce .Values.ldap.search_filter .Values.ldap.searchFilter }} + {{- end }} + {{- end }} + # TLS + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ include "postgresql.v1.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ include "postgresql.v1.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ include "postgresql.v1.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ include "postgresql.v1.tlsCRL" . }} + {{- end }} + {{- end }} + # Audit + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + # Others + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.primary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.primary.extraEnvVarsCM .Values.primary.extraEnvVarsSecret }} + envFrom: + {{- if .Values.primary.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.primary.extraEnvVarsCM }} + {{- end }} + {{- if .Values.primary.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.primary.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ .Values.containerPorts.postgresql }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.primary.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.primary.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.v1.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.v1.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- end }} + {{- if .Values.primary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.primary.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.v1.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.v1.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- end }} + {{- if .Values.primary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.primary.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.v1.readinessProbeCommand" . | nindent 16 }} + {{- end }} + {{- end }} + {{- if .Values.primary.resources }} + resources: {{- toYaml .Values.primary.resources | nindent 12 }} + {{- else if ne .Values.primary.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.primary.resourcesPreset) | nindent 12 }} + {{- end }} + {{- if .Values.primary.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.primary.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/bitnami/postgresql/conf + subPath: app-conf-dir + - name: empty-dir + mountPath: /opt/bitnami/postgresql/tmp + subPath: app-tmp-dir + {{- if or .Values.primary.preInitDb.scriptsConfigMap .Values.primary.preInitDb.scripts }} + - name: custom-preinit-scripts + mountPath: /docker-entrypoint-preinitdb.d/ + {{- end }} + {{- if .Values.primary.preInitDb.scriptsSecret }} + - name: custom-preinit-scripts-secret + mountPath: /docker-entrypoint-preinitdb.d/secret + {{- end }} + {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.primary.initdb.scriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }} + - name: postgresql-extended-config + mountPath: {{ .Values.primary.persistence.mountPath }}/conf/conf.d/ + {{- end }} + {{- if and .Values.auth.usePasswordFiles (or .Values.auth.enablePostgresUser $customUser) }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + - name: {{ .Values.primary.persistence.volumeName }} + mountPath: {{ .Values.primary.persistence.mountPath }} + {{- if .Values.primary.persistence.subPath }} + subPath: {{ .Values.primary.persistence.subPath }} + {{- end }} + {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + - name: postgresql-config + mountPath: {{ .Values.primary.persistence.mountPath }}/conf + {{- end }} + {{- if .Values.primary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "postgresql.v1.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.metrics.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if or .Values.metrics.customMetrics .Values.metrics.collectors }} + args: + {{- if .Values.metrics.customMetrics }} + - --extend.query-path + - /conf/custom-metrics.yaml + {{- end }} + {{- range $name, $enabled := .Values.metrics.collectors }} + - --{{ if not $enabled }}no-{{ end }}collector.{{ $name }} + {{- end }} + {{- end }} + env: + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/postgres?sslmode=disable" (int (include "postgresql.v1.service.port" .)) }} + {{- if .Values.auth.usePasswordFiles }} + - name: DATA_SOURCE_PASS_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include (ternary "postgresql.v1.adminPasswordKey" "postgresql.v1.userPasswordKey" (empty $customUser)) .) }} + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ include "postgresql.v1.secretName" . }} + key: {{ include (ternary "postgresql.v1.adminPasswordKey" "postgresql.v1.userPasswordKey" (empty $customUser)) . }} + {{- end }} + - name: DATA_SOURCE_USER + value: {{ default "postgres" $customUser | quote }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.containerPorts.metrics }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http-metrics + {{- end }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- end }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + {{- if and .Values.auth.usePasswordFiles (or .Values.auth.enablePostgresUser $customUser) }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- else if ne .Values.metrics.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.metrics.resourcesPreset) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.primary.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + - name: postgresql-config + configMap: + name: {{ include "postgresql.v1.primary.configmapName" . }} + {{- end }} + {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }} + - name: postgresql-extended-config + configMap: + name: {{ include "postgresql.v1.primary.extendedConfigmapName" . }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (or .Values.auth.enablePostgresUser $customUser) }} + - name: postgresql-password + secret: + secretName: {{ include "postgresql.v1.secretName" . }} + {{- end }} + {{- if or .Values.primary.preInitDb.scriptsConfigMap .Values.primary.preInitDb.scripts }} + - name: custom-preinit-scripts + configMap: + name: {{ include "postgresql.v1.preInitDb.scriptsCM" . }} + {{- end }} + {{- if .Values.primary.preInitDb.scriptsSecret }} + - name: custom-preinit-scripts-secret + secret: + secretName: {{ tpl .Values.primary.preInitDb.scriptsSecret $ }} + {{- end }} + {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }} + - name: custom-init-scripts + configMap: + name: {{ include "postgresql.v1.initdb.scriptsCM" . }} + {{- end }} + {{- if .Values.primary.initdb.scriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ tpl .Values.primary.initdb.scriptsSecret $ }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ include "postgresql.v1.tlsSecretName" . }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.primary.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ printf "%s-metrics" (include "postgresql.v1.primary.fullname" .) }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.shmVolume.sizeLimit }} + sizeLimit: {{ .Values.shmVolume.sizeLimit }} + {{- end }} + {{- end }} + {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }} + - name: {{ .Values.primary.persistence.volumeName }} + persistentVolumeClaim: + claimName: {{ tpl .Values.primary.persistence.existingClaim $ }} + {{- else if not .Values.primary.persistence.enabled }} + - name: {{ .Values.primary.persistence.volumeName }} + emptyDir: {} + {{- else }} + {{- if .Values.primary.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.primary.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.primary.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: {{ .Values.primary.persistence.volumeName }} + {{- if .Values.primary.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.primary.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.primary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.primary.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + resources: + requests: + storage: {{ .Values.primary.persistence.size | quote }} + {{- if .Values.primary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.primary.persistence "global" .Values.global) | nindent 8 }} + {{- end }} diff --git a/helm/vendor/postgresql/templates/primary/svc-headless.yaml b/helm/vendor/postgresql/templates/primary/svc-headless.yaml new file mode 100644 index 00000000..180cfcbb --- /dev/null +++ b/helm/vendor/postgresql/templates/primary/svc-headless.yaml @@ -0,0 +1,31 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.v1.primary.svc.headless" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: primary + annotations: + {{- if or .Values.primary.service.headless.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other Postgresql pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + ports: + - name: tcp-postgresql + port: {{ template "postgresql.v1.service.port" . }} + targetPort: tcp-postgresql + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/helm/vendor/postgresql/templates/primary/svc.yaml b/helm/vendor/postgresql/templates/primary/svc.yaml new file mode 100644 index 00000000..2eeaea10 --- /dev/null +++ b/helm/vendor/postgresql/templates/primary/svc.yaml @@ -0,0 +1,58 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.v1.primary.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := .Values.commonLabels }} + {{- if .Values.primary.service.labels }} + {{- $labels = include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.service.labels $labels ) "context" . ) }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: primary + {{- if or .Values.commonAnnotations .Values.primary.service.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.primary.service.type }} + {{- if or (eq .Values.primary.service.type "LoadBalancer") (eq .Values.primary.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.primary.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.primary.service.loadBalancerSourceRanges | toJson}} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerClass)) }} + loadBalancerClass: {{ .Values.primary.service.loadBalancerClass }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }} + {{- end }} + {{- if and .Values.primary.service.clusterIP (eq .Values.primary.service.type "ClusterIP") }} + clusterIP: {{ .Values.primary.service.clusterIP }} + {{- end }} + {{- if .Values.primary.service.sessionAffinity }} + sessionAffinity: {{ .Values.primary.service.sessionAffinity }} + {{- end }} + {{- if .Values.primary.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.v1.service.port" . }} + targetPort: tcp-postgresql + {{- if and (or (eq .Values.primary.service.type "NodePort") (eq .Values.primary.service.type "LoadBalancer")) (not (empty .Values.primary.service.nodePorts.postgresql)) }} + nodePort: {{ .Values.primary.service.nodePorts.postgresql }} + {{- else if eq .Values.primary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.primary.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.primary.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/helm/vendor/postgresql/templates/prometheusrule.yaml b/helm/vendor/postgresql/templates/prometheusrule.yaml new file mode 100644 index 00000000..fadcd321 --- /dev/null +++ b/helm/vendor/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,22 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "postgresql.v1.chart.fullname" . }} + namespace: {{ coalesce .Values.metrics.prometheusRule.namespace (include "common.names.namespace" .) | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.prometheusRule.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "postgresql.v1.chart.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 8 }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/psp.yaml b/helm/vendor/postgresql/templates/psp.yaml new file mode 100644 index 00000000..b15efa39 --- /dev/null +++ b/helm/vendor/postgresql/templates/psp.yaml @@ -0,0 +1,42 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "common.capabilities.psp.supported" .) .Values.psp.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "postgresql.v1.chart.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/helm/vendor/postgresql/templates/read/extended-configmap.yaml b/helm/vendor/postgresql/templates/read/extended-configmap.yaml new file mode 100644 index 00000000..6ffc93c4 --- /dev/null +++ b/helm/vendor/postgresql/templates/read/extended-configmap.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "postgresql.v1.readReplicas.createExtendedConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extended-configuration" (include "postgresql.v1.readReplica.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: read + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + override.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.extendedConfiguration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/read/metrics-configmap.yaml b/helm/vendor/postgresql/templates/read/metrics-configmap.yaml new file mode 100644 index 00000000..a4ed6b74 --- /dev/null +++ b/helm/vendor/postgresql/templates/read/metrics-configmap.yaml @@ -0,0 +1,18 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics (eq .Values.architecture "replication") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-metrics" (include "postgresql.v1.readReplica.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/read/metrics-svc.yaml b/helm/vendor/postgresql/templates/read/metrics-svc.yaml new file mode 100644 index 00000000..fccd1b9a --- /dev/null +++ b/helm/vendor/postgresql/templates/read/metrics-svc.yaml @@ -0,0 +1,31 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled (eq .Values.architecture "replication") }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "postgresql.v1.readReplica.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics-read + {{- if or .Values.commonAnnotations .Values.metrics.service.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.service.sessionAffinity }} + {{- if .Values.metrics.service.clusterIP }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.ports.metrics }} + targetPort: http-metrics + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/helm/vendor/postgresql/templates/read/networkpolicy.yaml b/helm/vendor/postgresql/templates/read/networkpolicy.yaml new file mode 100644 index 00000000..9c730d83 --- /dev/null +++ b/helm/vendor/postgresql/templates/read/networkpolicy.yaml @@ -0,0 +1,80 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if eq .Values.architecture "replication" }} +{{- if .Values.readReplicas.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "postgresql.v1.readReplica.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: read + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: read + policyTypes: + - Ingress + - Egress + {{- if .Values.readReplicas.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to primary + - ports: + - port: {{ .Values.containerPorts.postgresql }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + app.kubernetes.io/component: primary + {{- if .Values.readReplicas.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.containerPorts.metrics }} + {{- end }} + {{- if not .Values.readReplicas.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }} + - podSelector: + matchLabels: + {{ template "postgresql.v1.readReplica.fullname" . }}-client: "true" + {{- if .Values.readReplicas.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.readReplicas.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.readReplicas.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.readReplicas.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/read/pdb.yaml b/helm/vendor/postgresql/templates/read/pdb.yaml new file mode 100644 index 00000000..bd2eaadf --- /dev/null +++ b/helm/vendor/postgresql/templates/read/pdb.yaml @@ -0,0 +1,29 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and ( eq .Values.architecture "replication" ) .Values.readReplicas.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "postgresql.v1.readReplica.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: read + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.readReplicas.pdb.minAvailable }} + minAvailable: {{ .Values.readReplicas.pdb.minAvailable }} + {{- end }} + {{- if or .Values.readReplicas.pdb.maxUnavailable ( not .Values.readReplicas.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.readReplicas.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/helm/vendor/postgresql/templates/read/servicemonitor.yaml b/helm/vendor/postgresql/templates/read/servicemonitor.yaml new file mode 100644 index 00000000..b1536a60 --- /dev/null +++ b/helm/vendor/postgresql/templates/read/servicemonitor.yaml @@ -0,0 +1,46 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled (eq .Values.architecture "replication") }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.v1.readReplica.fullname" . }} + namespace: {{ coalesce .Values.metrics.serviceMonitor.namespace (include "common.names.namespace" .) | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics-read + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + {{- $svcLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.selector .Values.commonLabels ) "context" . ) }} + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $svcLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: metrics-read + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/read/statefulset.yaml b/helm/vendor/postgresql/templates/read/statefulset.yaml new file mode 100644 index 00000000..f7376b5e --- /dev/null +++ b/helm/vendor/postgresql/templates/read/statefulset.yaml @@ -0,0 +1,588 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if eq .Values.architecture "replication" }} +{{- $customUser := include "postgresql.v1.username" . }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "postgresql.v1.readReplica.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: read + {{- if or .Values.commonAnnotations .Values.readReplicas.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.readReplicas.replicaCount }} + serviceName: {{ include "postgresql.v1.readReplica.svc.headless" . }} + {{- if .Values.readReplicas.updateStrategy }} + updateStrategy: {{- toYaml .Values.readReplicas.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: read + template: + metadata: + name: {{ include "postgresql.v1.readReplica.fullname" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: read + {{- if or .Values.passwordUpdateJob.enabled (include "postgresql.v1.readReplicas.createExtendedConfigmap" .) .Values.readReplicas.podAnnotations }} + annotations: + {{- if (include "postgresql.v1.readReplicas.createExtendedConfigmap" .) }} + checksum/extended-configuration: {{ pick (include (print $.Template.BasePath "/primary/extended-configmap.yaml") . | fromYaml) "data" | toYaml | sha256sum }} + {{- end }} + {{- if .Values.passwordUpdateJob.enabled }} + charts.bitnami.com/password-last-update: {{ now | date "20060102150405" | quote }} + {{- end }} + {{- if .Values.readReplicas.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- if .Values.readReplicas.extraPodSpec }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraPodSpec "context" $) | nindent 6 }} + {{- end }} + serviceAccountName: {{ include "postgresql.v1.serviceAccountName" . }} + {{- include "postgresql.v1.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.readReplicas.automountServiceAccountToken }} + {{- if .Values.readReplicas.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAffinityPreset "component" "read" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAntiAffinityPreset "component" "read" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.readReplicas.nodeAffinityPreset.type "key" .Values.readReplicas.nodeAffinityPreset.key "values" .Values.readReplicas.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.readReplicas.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.priorityClassName }} + priorityClassName: {{ .Values.readReplicas.priorityClassName }} + {{- end }} + {{- if .Values.readReplicas.schedulerName }} + schedulerName: {{ .Values.readReplicas.schedulerName | quote }} + {{- end }} + {{- if .Values.readReplicas.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.readReplicas.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.readReplicas.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.readReplicas.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.readReplicas.hostNetwork }} + hostIPC: {{ .Values.readReplicas.hostIPC }} + {{- if or (and .Values.tls.enabled (not .Values.volumePermissions.enabled)) (and .Values.volumePermissions.enabled (or .Values.readReplicas.persistence.enabled .Values.shmVolume.enabled)) .Values.readReplicas.initContainers }} + initContainers: + {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} + - name: copy-certs + image: {{ include "postgresql.v1.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- else if ne .Values.readReplicas.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.readReplicas.resourcesPreset) | nindent 12 }} + {{- end }} + # We don't require a privileged container in this case + {{- if .Values.readReplicas.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.readReplicas.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + chmod 600 {{ include "postgresql.v1.tlsCertKey" . }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- else if and .Values.volumePermissions.enabled (or .Values.readReplicas.persistence.enabled .Values.shmVolume.enabled) }} + - name: init-chmod-data + image: {{ include "postgresql.v1.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- else if ne .Values.readReplicas.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.readReplicas.resourcesPreset) | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + {{- if .Values.readReplicas.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.readReplicas.persistence.mountPath }} + {{- else }} + chown {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} {{ .Values.readReplicas.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.v1.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.v1.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }} + find {{ .Values.readReplicas.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.v1.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ include "postgresql.v1.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: data + mountPath: {{ .Values.readReplicas.persistence.mountPath }} + {{- if .Values.readReplicas.persistence.subPath }} + subPath: {{ .Values.readReplicas.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.readReplicas.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: postgresql + image: {{ include "postgresql.v1.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.readReplicas.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.readReplicas.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: {{ .Values.containerPorts.postgresql | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: {{ .Values.readReplicas.persistence.mountPath | quote }} + {{- if .Values.readReplicas.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + # Authentication + {{- if or (eq $customUser "postgres") (empty $customUser) }} + {{- if .Values.auth.enablePostgresUser }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.adminPasswordKey" .) }} + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.v1.secretName" . }} + key: {{ include "postgresql.v1.adminPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "true" + {{- end }} + {{- else }} + - name: POSTGRES_USER + value: {{ $customUser | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.userPasswordKey" .) }} + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.v1.secretName" . }} + key: {{ include "postgresql.v1.userPasswordKey" . }} + {{- end }} + {{- if .Values.auth.enablePostgresUser }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.adminPasswordKey" .) }} + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.v1.secretName" . }} + key: {{ include "postgresql.v1.adminPasswordKey" . }} + {{- end }} + {{- end }} + {{- end }} + # Replication + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ .Values.auth.replicationUsername | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include "postgresql.v1.replicationPasswordKey" .) }} + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.v1.secretName" . }} + key: {{ include "postgresql.v1.replicationPasswordKey" . }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ include "postgresql.v1.primary.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.v1.service.port" . | quote }} + # TLS + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ include "postgresql.v1.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ include "postgresql.v1.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ include "postgresql.v1.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ include "postgresql.v1.tlsCRL" . }} + {{- end }} + {{- end }} + # Audit + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + # Others + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.readReplicas.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.readReplicas.extraEnvVarsCM .Values.readReplicas.extraEnvVarsSecret }} + envFrom: + {{- if .Values.readReplicas.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.readReplicas.extraEnvVarsCM }} + {{- end }} + {{- if .Values.readReplicas.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.readReplicas.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ .Values.containerPorts.postgresql }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.readReplicas.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.v1.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser| quote }} -d "dbname={{ include "postgresql.v1.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.v1.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.v1.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.v1.tlsCert" . }} sslkey={{ include "postgresql.v1.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.v1.readinessProbeCommand" . | nindent 16 }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- else if ne .Values.readReplicas.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.readReplicas.resourcesPreset) | nindent 12 }} + {{- end }} + {{- if .Values.readReplicas.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/bitnami/postgresql/conf + subPath: app-conf-dir + - name: empty-dir + mountPath: /opt/bitnami/postgresql/tmp + subPath: app-tmp-dir + {{- if and .Values.auth.usePasswordFiles (or .Values.auth.enablePostgresUser $customUser) }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.readReplicas.extendedConfiguration }} + - name: postgresql-extended-config + mountPath: {{ .Values.readReplicas.persistence.mountPath }}/conf/conf.d/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + - name: data + mountPath: {{ .Values.readReplicas.persistence.mountPath }} + {{- if .Values.readReplicas.persistence.subPath }} + subPath: {{ .Values.readReplicas.persistence.subPath }} + {{- end }} + {{- if .Values.readReplicas.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "postgresql.v1.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.metrics.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if or .Values.metrics.customMetrics .Values.metrics.collectors }} + args: + {{- if .Values.metrics.customMetrics }} + - --extend.query-path + - /conf/custom-metrics.yaml + {{- end }} + {{- range $name, $enabled := .Values.metrics.collectors }} + - --{{ if not $enabled }}no-{{ end }}collector.{{ $name }} + {{- end }} + {{- end }} + env: + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/postgres?sslmode=disable" (int (include "postgresql.v1.service.port" .)) }} + {{- if .Values.auth.usePasswordFiles }} + - name: DATA_SOURCE_PASS_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (include (ternary "postgresql.v1.adminPasswordKey" "postgresql.v1.userPasswordKey" (empty $customUser)) .) }} + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ include "postgresql.v1.secretName" . }} + key: {{ include (ternary "postgresql.v1.adminPasswordKey" "postgresql.v1.userPasswordKey" (empty $customUser)) . }} + {{- end }} + - name: DATA_SOURCE_USER + value: {{ default "postgres" $customUser | quote }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.containerPorts.metrics }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http-metrics + {{- end }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- end }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + {{- if and .Values.auth.usePasswordFiles (or .Values.auth.enablePostgresUser $customUser) }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- else if ne .Values.metrics.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.metrics.resourcesPreset) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.readReplicas.extendedConfiguration }} + - name: postgresql-extended-config + configMap: + name: {{ include "postgresql.v1.readReplicas.extendedConfigmapName" . }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (or .Values.auth.enablePostgresUser $customUser) }} + - name: postgresql-password + secret: + secretName: {{ include "postgresql.v1.secretName" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ include "postgresql.v1.tlsSecretName" . }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ printf "%s-metrics" (include "postgresql.v1.readReplica.fullname" .) }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.shmVolume.sizeLimit }} + sizeLimit: {{ .Values.shmVolume.sizeLimit }} + {{- end }} + {{- end }} + - name: empty-dir + emptyDir: {} + {{- if .Values.readReplicas.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.readReplicas.persistence.enabled .Values.readReplicas.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ tpl .Values.readReplicas.persistence.existingClaim $ }} + {{- else if not .Values.readReplicas.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + {{- if .Values.readReplicas.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.readReplicas.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.readReplicas.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: data + {{- if .Values.readReplicas.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.readReplicas.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.readReplicas.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.readReplicas.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + resources: + requests: + storage: {{ .Values.readReplicas.persistence.size | quote }} + {{- if .Values.readReplicas.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- include "common.storage.class" (dict "persistence" .Values.readReplicas.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/read/svc-headless.yaml b/helm/vendor/postgresql/templates/read/svc-headless.yaml new file mode 100644 index 00000000..15342a3b --- /dev/null +++ b/helm/vendor/postgresql/templates/read/svc-headless.yaml @@ -0,0 +1,33 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.v1.readReplica.svc.headless" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: read + annotations: + {{- if or .Values.readReplicas.service.headless.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other Postgresql pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + ports: + - name: tcp-postgresql + port: {{ include "postgresql.v1.readReplica.service.port" . }} + targetPort: tcp-postgresql + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/helm/vendor/postgresql/templates/read/svc.yaml b/helm/vendor/postgresql/templates/read/svc.yaml new file mode 100644 index 00000000..7ebace31 --- /dev/null +++ b/helm/vendor/postgresql/templates/read/svc.yaml @@ -0,0 +1,60 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.v1.readReplica.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := .Values.commonLabels }} + {{- if .Values.readReplicas.service.labels }} + {{- $labels = include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.service.labels $labels ) "context" . ) }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: read + {{- if or .Values.commonAnnotations .Values.readReplicas.service.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.readReplicas.service.type }} + {{- if or (eq .Values.readReplicas.service.type "LoadBalancer") (eq .Values.readReplicas.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.readReplicas.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") (not (empty .Values.readReplicas.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.readReplicas.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") (not (empty .Values.readReplicas.service.loadBalancerClass)) }} + loadBalancerClass: {{ .Values.readReplicas.service.loadBalancerClass }} + {{- end }} + {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") (not (empty .Values.readReplicas.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.readReplicas.service.loadBalancerIP }} + {{- end }} + {{- if and .Values.readReplicas.service.clusterIP (eq .Values.readReplicas.service.type "ClusterIP") }} + clusterIP: {{ .Values.readReplicas.service.clusterIP }} + {{- end }} + {{- if .Values.readReplicas.service.sessionAffinity }} + sessionAffinity: {{ .Values.readReplicas.service.sessionAffinity }} + {{- end }} + {{- if .Values.readReplicas.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ include "postgresql.v1.readReplica.service.port" . }} + targetPort: tcp-postgresql + {{- if and (or (eq .Values.readReplicas.service.type "NodePort") (eq .Values.readReplicas.service.type "LoadBalancer")) (not (empty .Values.readReplicas.service.nodePorts.postgresql)) }} + nodePort: {{ .Values.readReplicas.service.nodePorts.postgresql }} + {{- else if eq .Values.readReplicas.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.readReplicas.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.readReplicas.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/helm/vendor/postgresql/templates/role.yaml b/helm/vendor/postgresql/templates/role.yaml new file mode 100644 index 00000000..ba906aa8 --- /dev/null +++ b/helm/vendor/postgresql/templates/role.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "postgresql.v1.chart.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +# yamllint disable rule:indentation +rules: + {{- if and (include "common.capabilities.psp.supported" .) .Values.psp.create }} + - apiGroups: + - 'policy' + resources: + - 'podsecuritypolicies' + verbs: + - 'use' + resourceNames: + - {{ include "postgresql.v1.chart.fullname" . }} + {{- end }} + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +# yamllint enable rule:indentation +{{- end }} diff --git a/helm/vendor/postgresql/templates/rolebinding.yaml b/helm/vendor/postgresql/templates/rolebinding.yaml new file mode 100644 index 00000000..974080a5 --- /dev/null +++ b/helm/vendor/postgresql/templates/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "postgresql.v1.chart.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ include "postgresql.v1.chart.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ include "postgresql.v1.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/secrets.yaml b/helm/vendor/postgresql/templates/secrets.yaml new file mode 100644 index 00000000..82dfdd4e --- /dev/null +++ b/helm/vendor/postgresql/templates/secrets.yaml @@ -0,0 +1,120 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $host := include "postgresql.v1.primary.fullname" . }} +{{- $port := include "postgresql.v1.service.port" . }} +{{- $customUser := include "postgresql.v1.username" . }} +{{- $postgresPassword := (ternary (coalesce .Values.global.postgresql.auth.password .Values.auth.password .Values.global.postgresql.auth.postgresPassword .Values.auth.postgresPassword) (coalesce .Values.global.postgresql.auth.postgresPassword .Values.auth.postgresPassword) (or (empty $customUser) (eq $customUser "postgres"))) }} +{{- if (not $postgresPassword) }} +{{- $postgresPassword = include "common.secrets.lookup" (dict "secret" (include "postgresql.v1.secretName" .) "key" (coalesce .Values.global.postgresql.auth.secretKeys.adminPasswordKey .Values.auth.secretKeys.adminPasswordKey) "defaultValue" (ternary (coalesce .Values.global.postgresql.auth.password .Values.auth.password .Values.global.postgresql.auth.postgresPassword .Values.auth.postgresPassword) (coalesce .Values.global.postgresql.auth.postgresPassword .Values.auth.postgresPassword) (or (empty $customUser) (eq $customUser "postgres"))) "context" $) | trimAll "\"" | b64dec }} +{{- end }} +{{- if and (not $postgresPassword) .Values.auth.enablePostgresUser }} +{{- $postgresPassword = randAlphaNum 10 }} +{{- end }} +{{- $replicationPassword := "" }} +{{- if eq .Values.architecture "replication" }} +{{- $replicationPassword = include "common.secrets.passwords.manage" (dict "secret" (include "postgresql.v1.secretName" .) "key" (coalesce .Values.global.postgresql.auth.secretKeys.replicationPasswordKey .Values.auth.secretKeys.replicationPasswordKey) "providedValues" (list "auth.replicationPassword") "honorProvidedValues" true "context" $) | trimAll "\"" | b64dec }} +{{- end }} +{{- $ldapPassword := "" }} +{{- if and .Values.ldap.enabled (or .Values.ldap.bind_password .Values.ldap.bindpw) }} +{{- $ldapPassword = coalesce .Values.ldap.bind_password .Values.ldap.bindpw }} +{{- end }} +{{- $password := "" }} +{{- if and (not (empty $customUser)) (ne $customUser "postgres") }} +{{- $password = include "common.secrets.passwords.manage" (dict "secret" (include "postgresql.v1.secretName" .) "key" (coalesce .Values.global.postgresql.auth.secretKeys.userPasswordKey .Values.auth.secretKeys.userPasswordKey) "providedValues" (list "global.postgresql.auth.password" "auth.password") "honorProvidedValues" true "context" $) | trimAll "\"" | b64dec }} +{{- end }} +{{- $database := include "postgresql.v1.database" . }} +{{- if (include "postgresql.v1.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "postgresql.v1.chart.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.secretAnnotations .Values.commonAnnotations }} + annotations: + {{- if .Values.secretAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secretAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +type: Opaque +data: + {{- if $postgresPassword }} + postgres-password: {{ $postgresPassword | b64enc | quote }} + {{- end }} + {{- if $password }} + password: {{ $password | b64enc | quote }} + {{- end }} + {{- if $replicationPassword }} + replication-password: {{ $replicationPassword | b64enc | quote }} + {{- end }} + # We don't auto-generate LDAP password when it's not provided as we do for other passwords + {{- if and .Values.ldap.enabled (or .Values.ldap.bind_password .Values.ldap.bindpw) }} + ldap-password: {{ $ldapPassword | b64enc | quote }} + {{- end }} +{{- end }} +{{- if .Values.serviceBindings.enabled }} +{{- if $postgresPassword }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "postgresql.v1.chart.fullname" . }}-svcbind-postgres + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.secretAnnotations .Values.commonAnnotations }} + annotations: + {{- if .Values.secretAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secretAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +type: servicebinding.io/postgresql +data: + provider: {{ print "bitnami" | b64enc | quote }} + type: {{ print "postgresql" | b64enc | quote }} + host: {{ $host | b64enc | quote }} + port: {{ $port | b64enc | quote }} + username: {{ print "postgres" | b64enc | quote }} + database: {{ print "postgres" | b64enc | quote }} + password: {{ $postgresPassword | b64enc | quote }} + uri: {{ printf "postgresql://postgres:%s@%s:%s/postgres" $postgresPassword $host $port | b64enc | quote }} +{{- end }} +{{- if $password }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "postgresql.v1.chart.fullname" . }}-svcbind-custom-user + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.secretAnnotations .Values.commonAnnotations }} + annotations: + {{- if .Values.secretAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secretAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +type: servicebinding.io/postgresql +data: + provider: {{ print "bitnami" | b64enc | quote }} + type: {{ print "postgresql" | b64enc | quote }} + host: {{ $host | b64enc | quote }} + port: {{ $port | b64enc | quote }} + username: {{ $customUser | b64enc | quote }} + password: {{ $password | b64enc | quote }} + {{- if $database }} + database: {{ $database | b64enc | quote }} + {{- end }} + uri: {{ printf "postgresql://%s:%s@%s:%s/%s" $customUser $password $host $port $database | b64enc | quote }} +{{- end }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/serviceaccount.yaml b/helm/vendor/postgresql/templates/serviceaccount.yaml new file mode 100644 index 00000000..3a0ccf0a --- /dev/null +++ b/helm/vendor/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "postgresql.v1.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/tls-secrets.yaml b/helm/vendor/postgresql/templates/tls-secrets.yaml new file mode 100644 index 00000000..350c86e6 --- /dev/null +++ b/helm/vendor/postgresql/templates/tls-secrets.yaml @@ -0,0 +1,30 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "postgresql.v1.createTlsSecret" . ) }} +{{- $secretName := printf "%s-crt" (include "postgresql.v1.chart.fullname" .) }} +{{- $ca := genCA "postgresql-ca" 365 }} +{{- $fullname := include "postgresql.v1.chart.fullname" . }} +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $primaryHeadlessServiceName := include "postgresql.v1.primary.svc.headless" . }} +{{- $readHeadlessServiceName := include "postgresql.v1.readReplica.svc.headless" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) $fullname }} +{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/update-password/job.yaml b/helm/vendor/postgresql/templates/update-password/job.yaml new file mode 100644 index 00000000..6b587b64 --- /dev/null +++ b/helm/vendor/postgresql/templates/update-password/job.yaml @@ -0,0 +1,235 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.passwordUpdateJob.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ printf "%s-password-update" (include "postgresql.v1.chart.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: postgresql + app.kubernetes.io/component: update-job + {{- $defaultAnnotations := dict "helm.sh/hook" "pre-upgrade" "helm.sh/hook-delete-policy" "hook-succeeded" }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.passwordUpdateJob.annotations .Values.commonAnnotations $defaultAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} +spec: + backoffLimit: {{ .Values.passwordUpdateJob.backoffLimit }} + template: + metadata: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.passwordUpdateJob.podLabels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/part-of: postgresql + app.kubernetes.io/component: update-job + {{- if .Values.passwordUpdateJob.podAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.passwordUpdateJob.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "postgresql.v1.imagePullSecrets" . | nindent 6 }} + restartPolicy: OnFailure + {{- if .Values.passwordUpdateJob.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.passwordUpdateJob.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + automountServiceAccountToken: {{ .Values.passwordUpdateJob.automountServiceAccountToken }} + {{- if .Values.passwordUpdateJob.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.passwordUpdateJob.hostAliases "context" $) | nindent 8 }} + {{- end }} + initContainers: + {{- if .Values.passwordUpdateJob.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.passwordUpdateJob.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: update-credentials + image: {{ template "postgresql.v1.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.passwordUpdateJob.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.passwordUpdateJob.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + {{- end }} + {{- if .Values.passwordUpdateJob.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.passwordUpdateJob.args "context" $) | nindent 12 }} + {{- else }} + args: + - | + {{- $customUser := include "postgresql.v1.username" . }} + {{- $customDatabase := include "postgresql.v1.database" . | default "postgres" }} + {{- if .Values.auth.usePasswordFiles }} + # We need to load all the secret env vars to the system + for file in $(find /bitnami/postgresql/secrets -type f); do + env_var_name="$(basename $file)" + echo "Exporting $env_var_name" + export $env_var_name="$(< $file)" + done + {{- end }} + + . /opt/bitnami/scripts/postgresql-env.sh + . /opt/bitnami/scripts/libpostgresql.sh + . /opt/bitnami/scripts/liblog.sh + + primary_host={{ include "postgresql.v1.primary.fullname" . }}-0.{{ include "postgresql.v1.primary.svc.headless" . }} + info "Starting password update job" + {{- if .Values.auth.enablePostgresUser }} + if [[ -f /job-status/postgres-password-changed ]]; then + info "Postgres password already updated. Skipping" + else + info "Updating postgres password" + echo "ALTER USER postgres WITH PASSWORD '$POSTGRESQL_NEW_POSTGRES_PASSWORD';" | postgresql_remote_execute $primary_host {{ .Values.containerPorts.postgresql }} "" postgres $POSTGRESQL_PREVIOUS_POSTGRES_PASSWORD + touch /job-status/postgres-password-changed + info "Postgres password successfully updated" + fi + {{- end }} + {{- if and (not (empty $customUser)) (ne $customUser "postgres") }} + if [[ -f /job-status/password-changed ]]; then + info "User password already updated. Skipping" + else + info "Updating user password" + echo "ALTER USER {{ $customUser }} WITH PASSWORD '$POSTGRESQL_NEW_PASSWORD';" | postgresql_remote_execute $primary_host {{ .Values.containerPorts.postgresql }} "{{ $customDatabase }}" $POSTGRESQL_USER $POSTGRESQL_PREVIOUS_PASSWORD + touch /job-status/password-changed + info "User password successfully updated" + fi + {{- end }} + {{- if or (eq .Values.architecture "replication") .Values.primary.standby.enabled }} + if [[ -f /job-status/replication-password-changed ]]; then + info "Replication password already updated. Skipping" + else + info "Updating replication password" + echo "ALTER USER $POSTGRESQL_REPLICATION_USER WITH PASSWORD '$POSTGRESQL_NEW_REPLICATION_PASSWORD';" | postgresql_remote_execute $primary_host {{ .Values.containerPorts.postgresql }} "{{ $customDatabase }}" $POSTGRESQL_REPLICATION_USER $POSTGRESQL_PREVIOUS_REPLICATION_PASSWORD + touch /job-status/replication-password-changed + info "Replication password successfully updated" + fi + {{- end }} + {{- if .Values.passwordUpdateJob.extraCommands }} + info "Running extra commmands" + {{- include "common.tplvalues.render" (dict "value" .Values.passwordUpdateJob.extraCommands "context" $) | nindent 14 }} + {{- end }} + info "Password update job finished successfully" + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + {{- if not .Values.auth.usePasswordFiles }} + - name: POSTGRESQL_PREVIOUS_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.v1.update-job.previousSecretName" . }} + key: {{ include "postgresql.v1.adminPasswordKey" . }} + - name: POSTGRESQL_NEW_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.v1.update-job.newSecretName" . }} + key: {{ include "postgresql.v1.adminPasswordKey" . }} + {{- end }} + {{- if not (empty .Values.auth.username) }} + - name: POSTGRESQL_USER + value: {{ .Values.auth.username | quote }} + {{- if not .Values.auth.usePasswordFiles }} + - name: POSTGRESQL_PREVIOUS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.v1.update-job.previousSecretName" . }} + key: {{ include "postgresql.v1.userPasswordKey" . }} + - name: POSTGRESQL_NEW_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.v1.update-job.newSecretName" . }} + key: {{ include "postgresql.v1.userPasswordKey" . }} + {{- end }} + {{- end }} + {{- if eq .Values.architecture "replication" }} + - name: POSTGRESQL_REPLICATION_USER + value: {{ .Values.auth.replicationUsername | quote }} + {{- if not .Values.auth.usePasswordFiles }} + - name: POSTGRESQL_PREVIOUS_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.v1.update-job.previousSecretName" . }} + key: {{ include "postgresql.v1.replicationPasswordKey" . }} + - name: POSTGRESQL_NEW_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.v1.update-job.newSecretName" . }} + key: {{ include "postgresql.v1.replicationPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.passwordUpdateJob.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.passwordUpdateJob.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.passwordUpdateJob.extraEnvVarsCM .Values.passwordUpdateJob.extraEnvVarsSecret }} + envFrom: + {{- if .Values.passwordUpdateJob.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.passwordUpdateJob.extraEnvVarsCM }} + {{- end }} + {{- if .Values.passwordUpdateJob.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.passwordUpdateJob.extraEnvVarsSecret }} + {{- end }} + {{- end }} + {{- if .Values.passwordUpdateJob.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.passwordUpdateJob.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.passwordUpdateJob.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.passwordUpdateJob.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.passwordUpdateJob.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.passwordUpdateJob.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.passwordUpdateJob.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.passwordUpdateJob.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /job-status + subPath: job-dir + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-previous-credentials + mountPath: /bitnami/postgresql/secrets/previous + - name: postgresql-new-credentials + mountPath: /bitnami/postgresql/secrets/new + {{- end }} + {{- if .Values.passwordUpdateJob.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.passwordUpdateJob.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.passwordUpdateJob.resources }} + resources: {{- toYaml .Values.passwordUpdateJob.resources | nindent 12 }} + {{- else if ne .Values.passwordUpdateJob.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.passwordUpdateJob.resourcesPreset) | nindent 12 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-previous-credentials + secret: + secretName: {{ template "postgresql.v1.update-job.previousSecretName" . }} + items: + - key: {{ include "postgresql.v1.adminPasswordKey" . }} + path: POSTGRESQL_PREVIOUS_POSTGRES_PASSWORD + - key: {{ include "postgresql.v1.userPasswordKey" . }} + path: POSTGRESQL_PREVIOUS_PASSWORD + {{- if eq .Values.architecture "replication" }} + - key: {{ include "postgresql.v1.replicationPasswordKey" . }} + path: POSTGRESQL_PREVIOUS_REPLICATION_PASSWORD + {{- end }} + - name: postgresql-new-credentials + secret: + secretName: {{ template "postgresql.v1.update-job.newSecretName" . }} + items: + - key: {{ include "postgresql.v1.adminPasswordKey" . }} + path: POSTGRESQL_NEW_POSTGRES_PASSWORD + - key: {{ include "postgresql.v1.userPasswordKey" . }} + path: POSTGRESQL_NEW_PASSWORD + {{- if eq .Values.architecture "replication" }} + - key: {{ include "postgresql.v1.replicationPasswordKey" . }} + path: POSTGRESQL_NEW_REPLICATION_PASSWORD + {{- end }} + {{- end }} + {{- if .Values.passwordUpdateJob.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.passwordUpdateJob.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/update-password/new-secret.yaml b/helm/vendor/postgresql/templates/update-password/new-secret.yaml new file mode 100644 index 00000000..c02f2819 --- /dev/null +++ b/helm/vendor/postgresql/templates/update-password/new-secret.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.passwordUpdateJob.enabled (include "postgresql.v1.createSecret" .) (not ( include "postgresql.v1.createPreviousSecret" . )) (not .Values.passwordUpdateJob.previousPasswords.existingSecret) }} +{{- $customUser := include "postgresql.v1.username" . }} +{{- $postgresPassword := (ternary (coalesce .Values.global.postgresql.auth.password .Values.auth.password .Values.global.postgresql.auth.postgresPassword .Values.auth.postgresPassword) (coalesce .Values.global.postgresql.auth.postgresPassword .Values.auth.postgresPassword) (or (empty $customUser) (eq $customUser "postgres"))) }} +{{- $password := coalesce .Values.global.postgresql.auth.password .Values.auth.password }} +{{- $replicationPassword := .Values.auth.replicationPassword }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-new-secret" (include "postgresql.v1.chart.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: postgresql + {{- $defaultAnnotations := dict "helm.sh/hook" "pre-upgrade" "helm.sh/hook-delete-policy" "hook-succeeded" }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonAnnotations $defaultAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} +type: Opaque +data: + {{- if .Values.auth.enablePostgresUser }} + postgres-password: {{ required "The new postgres password is required!" $postgresPassword | b64enc | quote }} + {{- end }} + {{- if and (not (empty $customUser)) (ne $customUser "postgres") }} + password: {{ required "The new user password is required!" $password | b64enc | quote }} + {{- end }} + {{- if or (eq .Values.architecture "replication") .Values.primary.standby.enabled }} + replication-password: {{ required "The new replication password is required!" $replicationPassword | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/helm/vendor/postgresql/templates/update-password/previous-secret.yaml b/helm/vendor/postgresql/templates/update-password/previous-secret.yaml new file mode 100644 index 00000000..39087c48 --- /dev/null +++ b/helm/vendor/postgresql/templates/update-password/previous-secret.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.passwordUpdateJob.enabled (include "postgresql.v1.createPreviousSecret" .) }} +{{- $customUser := include "postgresql.v1.username" . }} +{{- $postgresPassword := .Values.passwordUpdateJob.previousPasswords.postgresPassword }} +{{- $password := .Values.passwordUpdateJob.previousPasswords.password }} +{{- $replicationPassword := .Values.passwordUpdateJob.previousPasswords.replicationPassword }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-previous-secret" (include "postgresql.v1.chart.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: postgresql + {{- $defaultAnnotations := dict "helm.sh/hook" "pre-upgrade" "helm.sh/hook-delete-policy" "hook-succeeded" }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonAnnotations $defaultAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} +type: Opaque +data: + {{- if .Values.auth.enablePostgresUser }} + postgres-password: {{ required "The previous postgres password is required!" $postgresPassword | b64enc | quote }} + {{- end }} + {{- if and (not (empty $customUser)) (ne $customUser "postgres") }} + password: {{ required "The previous user password is required!" $password | b64enc | quote }} + {{- end }} + {{- if or (eq .Values.architecture "replication") .Values.primary.standby.enabled }} + replication-password: {{ required "The previous replication password is required!" $replicationPassword | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/helm/vendor/postgresql/values.schema.json b/helm/vendor/postgresql/values.schema.json new file mode 100644 index 00000000..fc41483c --- /dev/null +++ b/helm/vendor/postgresql/values.schema.json @@ -0,0 +1,156 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "PostgreSQL architecture", + "form": true, + "description": "Allowed values: `standalone` or `replication`" + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "properties": { + "enablePostgresUser": { + "type": "boolean", + "title": "Enable \"postgres\" admin user", + "description": "Assign a password to the \"postgres\" admin user. Otherwise, remote access will be blocked for this user", + "form": true + }, + "postgresPassword": { + "type": "string", + "title": "Password for the \"postgres\" admin user", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true + }, + "database": { + "type": "string", + "title": "PostgreSQL custom database", + "description": "Name of the custom database to be created during the 1st initialization of PostgreSQL", + "form": true + }, + "username": { + "type": "string", + "title": "PostgreSQL custom user", + "description": "Name of the custom user to be created during the 1st initialization of PostgreSQL. This user only has permissions on the PostgreSQL custom database", + "form": true + }, + "password": { + "type": "string", + "title": "Password for the custom user to create", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true + }, + "replicationUsername": { + "type": "string", + "title": "PostgreSQL replication user", + "description": "Name of user used to manage replication.", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + }, + "replicationPassword": { + "type": "string", + "title": "Password for PostgreSQL replication user", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "readReplicas": { + "type": "integer", + "title": "read Replicas", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/helm/vendor/postgresql/values.yaml b/helm/vendor/postgresql/values.yaml new file mode 100644 index 00000000..86d96dc2 --- /dev/null +++ b/helm/vendor/postgresql/values.yaml @@ -0,0 +1,1933 @@ +# Copyright Broadcom, Inc. All Rights Reserved. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Please, note that this will override the parameters, including dependencies, configured to use the global value +## +global: + ## @param global.imageRegistry Global Docker image registry + ## + imageRegistry: "" + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## e.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + ## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead + ## + defaultStorageClass: "" + storageClass: "" + ## Security parameters + ## + security: + ## @param global.security.allowInsecureImages Allows skipping image verification + allowInsecureImages: false + postgresql: + ## @param global.postgresql.fullnameOverride Full chart name (overrides `fullnameOverride`) + ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) + ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) + ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) + ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) + ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). + ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## + fullnameOverride: "" + + auth: + postgresPassword: "" + username: "" + password: "" + database: "" + existingSecret: "" + secretKeys: + adminPasswordKey: "" + userPasswordKey: "" + replicationPasswordKey: "" + ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) + ## + service: + ports: + postgresql: "" + ## Compatibility adaptations for Kubernetes platforms + ## + compatibility: + ## Compatibility adaptations for Openshift + ## + openshift: + ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) + ## + adaptSecurityContext: auto +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## @param secretAnnotations Add annotations to the secrets +## +secretAnnotations: {} +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity +## @section PostgreSQL common parameters +## + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## @param image.registry [default: REGISTRY_NAME] PostgreSQL image registry +## @param image.repository [default: REPOSITORY_NAME/postgresql] PostgreSQL image repository +## @skip image.tag PostgreSQL image tag (immutable tags are recommended) +## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy PostgreSQL image pull policy +## @param image.pullSecrets Specify image pull secrets +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: latest + digest: "" + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#setting-the-root-password-on-first-run +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-on-first-run +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-user-on-first-run +## +auth: + ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user + ## + enablePostgresUser: true + ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided + ## + postgresPassword: "" + ## @param auth.username Name for a custom user to create + ## + username: "" + ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` is provided + ## + password: "" + ## @param auth.database Name for a custom database to create + ## + database: "" + ## @param auth.replicationUsername Name of the replication user + ## + replicationUsername: repl_user + ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` is provided + ## + replicationPassword: "" + ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. + ## + existingSecret: "" + ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## + secretKeys: + adminPasswordKey: postgres-password + userPasswordKey: password + replicationPasswordKey: replication-password + ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable + ## + usePasswordFiles: true +## @param architecture PostgreSQL architecture (`standalone` or `replication`) +## +architecture: standalone +## Replication configuration +## Ignored if `architecture` is `standalone` +## +replication: + ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` + ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. + ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT + ## + synchronousCommit: "off" + numSynchronousReplicas: 0 + ## @param replication.applicationName Cluster application name. Useful for advanced replication settings + ## + applicationName: my_application +## @param containerPorts.postgresql PostgreSQL container port +## +containerPorts: + postgresql: 5432 +## Audit settings +## https://github.com/bitnami/containers/tree/main/bitnami/postgresql#auditing +## @param audit.logHostname Log client hostnames +## @param audit.logConnections Add client log-in operations to the log file +## @param audit.logDisconnections Add client log-outs operations to the log file +## @param audit.pgAuditLog Add operations to log using the pgAudit extension +## @param audit.pgAuditLogCatalog Log catalog using pgAudit +## @param audit.clientMinMessages Message log level to share with the user +## @param audit.logLinePrefix Template for log line prefix (default if not set) +## @param audit.logTimezone Timezone for the log timestamps +## +audit: + logHostname: false + logConnections: false + logDisconnections: false + pgAuditLog: "" + pgAuditLogCatalog: "off" + clientMinMessages: error + logLinePrefix: "" + logTimezone: "" +## LDAP configuration +## @param ldap.enabled Enable LDAP support +## @param ldap.server IP address or name of the LDAP server. +## @param ldap.port Port number on the LDAP server to connect to +## @param ldap.prefix String to prepend to the user name when forming the DN to bind +## @param ldap.suffix String to append to the user name when forming the DN to bind +## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead +## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead +## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead +## @param ldap.basedn Root DN to begin the search for the user in +## @param ldap.binddn DN of user to bind to LDAP +## @param ldap.bindpw Password for the user to bind to LDAP +## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead +## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead +## @param ldap.searchAttribute Attribute to match against the user name in the search +## @param ldap.searchFilter The search filter to use when doing search+bind authentication +## @param ldap.scheme Set to `ldaps` to use LDAPS +## DEPRECATED ldap.tls as string is deprecated, please use 'ldap.tls.enabled' instead +## @param ldap.tls.enabled Se to true to enable TLS encryption +## +ldap: + enabled: false + server: "" + port: "" + prefix: "" + suffix: "" + basedn: "" + binddn: "" + bindpw: "" + searchAttribute: "" + searchFilter: "" + scheme: "" + tls: + enabled: false + ## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. + ## Ref: https://www.postgresql.org/docs/current/auth-ldap.html + ## + uri: "" +## @param postgresqlDataDir PostgreSQL data dir folder +## +postgresqlDataDir: /bitnami/postgresql/data +## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) +## +postgresqlSharedPreloadLibraries: "pgaudit" +## Start PostgreSQL pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` +## ref: https://github.com/docker-library/postgres/issues/416 +## ref: https://github.com/containerd/containerd/issues/3654 +## +shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## + enabled: true + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic support + ## + enabled: false + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's + ## + preferServerCiphers: true + ## @param tls.certificatesSecret Name of an existing secret that contains the certificates + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html + ## + certCAFilename: "" + ## @param tls.crlFilename File containing a Certificate Revocation List + ## + crlFilename: "" +## @section PostgreSQL Primary parameters +## +primary: + ## @param primary.name Name of the primary database (eg primary, master, leader, ...) + ## + name: primary + ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap + ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html + ## + configuration: "" + ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration + ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html + ## e.g:# + ## pgHbaConfiguration: |- + ## local all all trust + ## host all all localhost trust + ## host mydatabase mysuser 192.168.0.0/24 md5 + ## + pgHbaConfiguration: "" + ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration + ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored + ## + existingConfigmap: "" + ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration + ## NOTE: `primary.extendedConfiguration` will be ignored + ## + existingExtendedConfigmap: "" + ## Initdb configuration + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#specifying-initdb-arguments + ## + initdb: + ## @param primary.initdb.args PostgreSQL initdb extra arguments + ## + args: "" + ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log + ## + postgresqlWalDir: "" + ## @param primary.initdb.scripts Dictionary of initdb scripts + ## Specify dictionary of scripts to be run at first boot + ## e.g: + ## scripts: + ## my_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + ## + scripts: {} + ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot + ## NOTE: This will override `primary.initdb.scripts` + ## + scriptsConfigMap: "" + ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) + ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` + ## + scriptsSecret: "" + ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts + ## + user: "" + ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts + ## + password: "" + ## Pre-init configuration + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql/#on-container-start + preInitDb: + ## @param primary.preInitDb.scripts Dictionary of pre-init scripts + ## Specify dictionary of shell scripts to be run before db boot + ## e.g: + ## scripts: + ## my_pre_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + scripts: {} + ## @param primary.preInitDb.scriptsConfigMap ConfigMap with pre-init scripts to be run + ## NOTE: This will override `primary.preInitDb.scripts` + scriptsConfigMap: "" + ## @param primary.preInitDb.scriptsSecret Secret with pre-init scripts to be run + ## NOTE: This can work along `primary.preInitDb.scripts` or `primary.preInitDb.scriptsConfigMap` + scriptsSecret: "" + ## Configure current cluster's primary server to be the standby server in other cluster. + ## This will allow cross cluster replication and provide cross cluster high availability. + ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. + ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not + ## @param primary.standby.primaryHost The Host of replication primary in the other cluster + ## @param primary.standby.primaryPort The Port of replication primary in the other cluster + ## + standby: + enabled: false + primaryHost: "" + primaryPort: "" + ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsSecret: "" + ## @param primary.command Override default container command (useful when using custom images) + ## + command: [] + ## @param primary.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL Primary resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.podSecurityContext.enabled Enable security context + ## @param primary.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param primary.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param primary.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param primary.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.containerSecurityContext.enabled Enabled containers' Security Context + ## @param primary.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param primary.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param primary.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param primary.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param primary.containerSecurityContext.privileged Set container's Security Context privileged + ## @param primary.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param primary.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param primary.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param primary.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param primary.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param primary.hostAliases PostgreSQL primary pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary) + ## + hostNetwork: false + ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) + ## + labels: {} + ## @param primary.annotations Annotations for PostgreSQL primary pods + ## + annotations: {} + ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) + ## + podLabels: {} + ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) + ## + podAnnotations: {} + ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity Affinity for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) + ## + priorityClassName: "" + ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type + ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param primary.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param primary.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param primary.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `primary.pdb.minAvailable` and `primary.pdb.maxUnavailable` are empty. + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) + ## + extraPodSpec: {} + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param primary.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param primary.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param primary.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param primary.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param primary.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param primary.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param primary.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + ## PostgreSQL Primary service configuration + ## + service: + ## @param primary.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param primary.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param primary.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.labels Map of labels to add to the primary service + ## + labels: {} + ## @param primary.service.annotations Annotations for PostgreSQL primary service + ## + annotations: {} + ## @param primary.service.loadBalancerClass Load balancer class if service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + ## + loadBalancerClass: "" + ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service + ## + extraPorts: [] + ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param primary.service.headless.annotations Additional custom annotations for headless PostgreSQL primary service + ## + annotations: {} + ## PostgreSQL Primary persistence configuration + ## + persistence: + ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC + ## + enabled: true + ## @param primary.persistence.volumeName Name to assign the volume + ## + volumeName: "data" + ## @param primary.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param primary.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param primary.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param primary.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param primary.persistence.labels Labels for the PVC + ## + labels: {} + ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## PostgreSQL Primary Persistent Volume Claim Retention Policy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## + persistentVolumeClaimRetentionPolicy: + ## @param primary.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for Primary Statefulset + ## + enabled: false + ## @param primary.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## + whenScaled: Retain + ## @param primary.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + whenDeleted: Retain +## @section PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`) +## +readReplicas: + ## @param readReplicas.name Name of the read replicas database (eg secondary, slave, ...) + ## + name: read + ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas + ## + replicaCount: 1 + ## @param readReplicas.extendedConfiguration Extended PostgreSQL read only replicas configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsCM: "" + ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsSecret: "" + ## @param readReplicas.command Override default container command (useful when using custom images) + ## + command: [] + ## @param readReplicas.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers + ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers + ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers + ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe + ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL read only resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param readReplicas.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if readReplicas.resources is set (readReplicas.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param readReplicas.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.podSecurityContext.enabled Enable security context + ## @param readReplicas.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param readReplicas.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param readReplicas.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.containerSecurityContext.enabled Enabled containers' Security Context + ## @param readReplicas.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param readReplicas.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param readReplicas.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param readReplicas.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param readReplicas.containerSecurityContext.privileged Set container's Security Context privileged + ## @param readReplicas.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param readReplicas.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param readReplicas.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param readReplicas.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param readReplicas.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) + ## + hostNetwork: false + ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) + ## + labels: {} + ## @param readReplicas.annotations Annotations for PostgreSQL read only pods + ## + annotations: {} + ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) + ## + podLabels: {} + ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) + ## + podAnnotations: {} + ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL read only node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) + ## + priorityClassName: "" + ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type + ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) + ## + extraVolumeMounts: [] + ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) + ## + extraVolumes: [] + ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param readReplicas.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param readReplicas.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param readReplicas.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `readReplicas.pdb.minAvailable` and `readReplicas.pdb.maxUnavailable` are empty. + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) + ## + extraPodSpec: {} + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param readReplicas.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param readReplicas.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param readReplicas.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param readReplicas.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param readReplicas.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param readReplicas.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param readReplicas.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + ## PostgreSQL read only service configuration + ## + service: + ## @param readReplicas.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param readReplicas.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param readReplicas.service.labels Map of labels to add to the read service + ## + labels: {} + ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service + ## + annotations: {} + ## @param readReplicas.service.loadBalancerClass Load balancer class if service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + ## + loadBalancerClass: "" + ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service + ## + extraPorts: [] + ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param readReplicas.service.headless.annotations Additional custom annotations for headless PostgreSQL read only service + ## + annotations: {} + ## PostgreSQL read only persistence configuration + ## + persistence: + ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC + ## + enabled: true + ## @param readReplicas.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param readReplicas.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param readReplicas.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param readReplicas.persistence.labels Labels for the PVC + ## + labels: {} + ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param readReplicas.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## PostgreSQL Read only Persistent Volume Claim Retention Policy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## + persistentVolumeClaimRetentionPolicy: + ## @param readReplicas.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for read only Statefulset + ## + enabled: false + ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## + whenScaled: Retain + ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + whenDeleted: Retain +## @section Backup parameters +## This section implements a trivial logical dump cronjob of the database. +## This only comes with the consistency guarantees of the dump program. +## This is not a snapshot based roll forward/backward recovery backup. +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ +backup: + ## @param backup.enabled Enable the logical dump of the database "regularly" + enabled: false + cronjob: + ## @param backup.cronjob.schedule Set the cronjob parameter schedule + schedule: "@daily" + ## @param backup.cronjob.timeZone Set the cronjob parameter timeZone + timeZone: "" + ## @param backup.cronjob.concurrencyPolicy Set the cronjob parameter concurrencyPolicy + concurrencyPolicy: Allow + ## @param backup.cronjob.failedJobsHistoryLimit Set the cronjob parameter failedJobsHistoryLimit + failedJobsHistoryLimit: 1 + ## @param backup.cronjob.successfulJobsHistoryLimit Set the cronjob parameter successfulJobsHistoryLimit + successfulJobsHistoryLimit: 3 + ## @param backup.cronjob.startingDeadlineSeconds Set the cronjob parameter startingDeadlineSeconds + startingDeadlineSeconds: "" + ## @param backup.cronjob.ttlSecondsAfterFinished Set the cronjob parameter ttlSecondsAfterFinished + ttlSecondsAfterFinished: "" + ## @param backup.cronjob.restartPolicy Set the cronjob parameter restartPolicy + restartPolicy: OnFailure + ## @param backup.cronjob.podSecurityContext.enabled Enable PodSecurityContext for CronJob/Backup + ## @param backup.cronjob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param backup.cronjob.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param backup.cronjob.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param backup.cronjob.podSecurityContext.fsGroup Group ID for the CronJob + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## backup container's Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param backup.cronjob.containerSecurityContext.enabled Enabled containers' Security Context + ## @param backup.cronjob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param backup.cronjob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param backup.cronjob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param backup.cronjob.containerSecurityContext.privileged Set container's Security Context privileged + ## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param backup.cronjob.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param backup.cronjob.command Set backup container's command to run + command: + - /bin/bash + - -c + - PGPASSWORD="${PGPASSWORD:-$(< "$PGPASSWORD_FILE")}" pg_dumpall --clean --if-exists --load-via-partition-root --quote-all-identifiers --no-password --file="${PGDUMP_DIR}/pg_dumpall-$(date '+%Y-%m-%d-%H-%M').pgdump" + ## @param backup.cronjob.labels Set the cronjob labels + labels: {} + ## @param backup.cronjob.annotations Set the cronjob annotations + annotations: {} + ## @param backup.cronjob.nodeSelector Node labels for PostgreSQL backup CronJob pod assignment + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/ + ## + nodeSelector: {} + ## @param backup.cronjob.tolerations Tolerations for PostgreSQL backup CronJob pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## backup cronjob container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param backup.cronjob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if backup.cronjob.resources is set (backup.cronjob.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param backup.cronjob.resources Set container requests and limits for different resources like CPU or memory + ## Example: + resources: {} + ## resources: + ## requests: + ## cpu: 1 + ## memory: 512Mi + ## limits: + ## cpu: 2 + ## memory: 1024Mi + networkPolicy: + ## @param backup.cronjob.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + storage: + ## @param backup.cronjob.storage.enabled Enable using a `PersistentVolumeClaim` as backup data volume + ## + enabled: true + ## @param backup.cronjob.storage.existingClaim Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) + ## If defined, PVC must be created manually before volume will be bound + ## + existingClaim: "" + ## @param backup.cronjob.storage.resourcePolicy Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted + ## + resourcePolicy: "" + ## @param backup.cronjob.storage.storageClass PVC Storage Class for the backup data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param backup.cronjob.storage.accessModes PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## @param backup.cronjob.storage.size PVC Storage Request for the backup data volume + ## + size: 8Gi + ## @param backup.cronjob.storage.annotations PVC annotations + ## + annotations: {} + ## @param backup.cronjob.storage.mountPath Path to mount the volume at + ## + mountPath: /backup/pgdump + ## @param backup.cronjob.storage.subPath Subdirectory of the volume to mount at + ## and one PV for multiple services. + ## + subPath: "" + ## Fine tuning for volumeClaimTemplates + ## + volumeClaimTemplates: + ## @param backup.cronjob.storage.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes) + ## A label query over volumes to consider for binding (e.g. when using local volumes) + ## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details + ## + selector: {} + ## @param backup.cronjob.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the backup container + ## + extraVolumeMounts: [] + ## @param backup.cronjob.extraVolumes Optionally specify extra list of additional volumes for the backup container + ## + extraVolumes: [] + +## @section Password update job +## +passwordUpdateJob: + ## @param passwordUpdateJob.enabled Enable password update job + ## + enabled: false + ## @param passwordUpdateJob.backoffLimit set backoff limit of the job + ## + backoffLimit: 10 + ## @param passwordUpdateJob.command Override default container command on mysql Primary container(s) (useful when using custom images) + ## + command: [] + ## @param passwordUpdateJob.args Override default container args on mysql Primary container(s) (useful when using custom images) + ## + args: [] + ## @param passwordUpdateJob.extraCommands Extra commands to pass to the generation job + ## + extraCommands: "" + ## @param passwordUpdateJob.previousPasswords.postgresPassword Previous postgres password (set if the password secret was already changed) + ## @param passwordUpdateJob.previousPasswords.password Previous password (set if the password secret was already changed) + ## @param passwordUpdateJob.previousPasswords.replicationPassword Previous replication password (set if the password secret was already changed) + ## @param passwordUpdateJob.previousPasswords.existingSecret Name of a secret containing the previous passwords (set if the password secret was already changed) + previousPasswords: + postgresPassword: "" + password: "" + replicationPassword: "" + existingSecret: "" + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param passwordUpdateJob.containerSecurityContext.enabled Enabled containers' Security Context + ## @param passwordUpdateJob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param passwordUpdateJob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param passwordUpdateJob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param passwordUpdateJob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param passwordUpdateJob.containerSecurityContext.privileged Set container's Security Context privileged + ## @param passwordUpdateJob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param passwordUpdateJob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param passwordUpdateJob.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param passwordUpdateJob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param passwordUpdateJob.podSecurityContext.enabled Enabled credential init job pods' Security Context + ## @param passwordUpdateJob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param passwordUpdateJob.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param passwordUpdateJob.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param passwordUpdateJob.podSecurityContext.fsGroup Set credential init job pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## @param passwordUpdateJob.extraEnvVars Array containing extra env vars to configure the credential init job + ## For example: + ## extraEnvVars: + ## - name: GF_DEFAULT_INSTANCE_NAME + ## value: my-instance + ## + extraEnvVars: [] + ## @param passwordUpdateJob.extraEnvVarsCM ConfigMap containing extra env vars to configure the credential init job + ## + extraEnvVarsCM: "" + ## @param passwordUpdateJob.extraEnvVarsSecret Secret containing extra env vars to configure the credential init job (in case of sensitive data) + ## + extraEnvVarsSecret: "" + ## @param passwordUpdateJob.extraVolumes Optionally specify extra list of additional volumes for the credential init job + ## + extraVolumes: [] + ## @param passwordUpdateJob.extraVolumeMounts Array of extra volume mounts to be added to the jwt Container (evaluated as template). Normally used with `extraVolumes`. + ## + extraVolumeMounts: [] + ## @param passwordUpdateJob.initContainers Add additional init containers for the mysql Primary pod(s) + ## + initContainers: [] + ## Container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param passwordUpdateJob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if passwordUpdateJob.resources is set (passwordUpdateJob.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "micro" + ## @param passwordUpdateJob.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param passwordUpdateJob.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param passwordUpdateJob.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param passwordUpdateJob.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param passwordUpdateJob.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param passwordUpdateJob.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param passwordUpdateJob.annotations [object] Add annotations to the job + ## + annotations: {} + ## @param passwordUpdateJob.podLabels Additional pod labels + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param passwordUpdateJob.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository + ## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 12-debian-12-r51 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## @param volumePermissions.containerSecurityContext.runAsGroup Group ID for the init container + ## @param volumePermissions.containerSecurityContext.runAsNonRoot runAsNonRoot for the init container + ## @param volumePermissions.containerSecurityContext.seccompProfile.type seccompProfile.type for the init container + ## + containerSecurityContext: + seLinuxOptions: {} + runAsUser: 0 + runAsGroup: 0 + runAsNonRoot: false + seccompProfile: + type: RuntimeDefault +## @section Other Parameters +## + +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false +## Service account for PostgreSQL to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Creates role for ServiceAccount +## @param rbac.create Create Role and RoleBinding (required for PSP to work) +## +rbac: + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later +## +psp: + create: false +## @section Metrics Parameters +## +metrics: + ## @param metrics.enabled Start a prometheus exporter + ## + enabled: false + ## @param metrics.image.registry [default: REGISTRY_NAME] PostgreSQL Prometheus Exporter image registry + ## @param metrics.image.repository [default: REPOSITORY_NAME/postgres-exporter] PostgreSQL Prometheus Exporter image repository + ## @skip metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) + ## @param metrics.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy + ## @param metrics.image.pullSecrets Specify image pull secrets + ## + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.17.1-debian-12-r16 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.collectors Control enabled collectors + ## ref: https://github.com/prometheus-community/postgres_exporter#flags + ## Example: + ## collectors: + ## wal: false + collectors: {} + ## @param metrics.customMetrics Define additional custom metrics + ## ref: https://github.com/prometheus-community/postgres_exporter#adding-new-metrics-via-a-config-file-deprecated + ## customMetrics: + ## pg_database: + ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + ## metrics: + ## - name: + ## usage: "LABEL" + ## description: "Name of the database" + ## - size_bytes: + ## usage: "GAUGE" + ## description: "Size of the database in bytes" + ## + customMetrics: {} + ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter + ## see: https://github.com/prometheus-community/postgres_exporter#environment-variables + ## For example: + ## extraEnvVars: + ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + ## value: "true" + ## + extraEnvVars: [] + ## PostgreSQL Prometheus exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.containerSecurityContext.enabled Enabled containers' Security Context + ## @param metrics.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param metrics.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param metrics.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.privileged Set container's Security Context privileged + ## @param metrics.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param metrics.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param metrics.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port + ## + containerPorts: + metrics: 9187 + ## PostgreSQL Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param metrics.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Service configuration + ## + service: + ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port + ## + ports: + metrics: 9187 + ## @param metrics.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "postgresql.v1.chart.fullname" .) }}"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ include "postgresql.v1.chart.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] diff --git a/package.json b/package.json index a1e3dc6b..7714f12e 100644 --- a/package.json +++ b/package.json @@ -1,5 +1,6 @@ { "name": "lido-local-devnet", + "version": "2.0.0-alpha.1", "packageManager": "yarn@4.2.2", "private": true, "type": "module", @@ -17,24 +18,32 @@ ], "dependencies": { "@devnet/command": "workspace:*", + "@devnet/fp": "workspace:*", + "@devnet/k8s": "workspace:*", "@devnet/key-manager-api": "workspace:*", "@devnet/keygen": "workspace:*", + "@devnet/utils": "workspace:*", "@fastify/swagger": "^9.4.2", "@fastify/swagger-ui": "^5.2.1", "@oclif/core": "^4.0.37", "@oclif/plugin-help": "^6.2.19", "@types/node": "^22.10.5", + "bcryptjs": "^3.0.2", + "cdk8s": "^2.70.15", "chalk": "^5.4.1", + "constructs": "^10.4.2", "dockerode": "^4.0.4", "ethers": "^6.13.4", "execa": "^9.5.2", "fastify": "^5.2.1", + "ps-list": "^7.2.0", "ts-node": "^10.9.2", "typescript": "^5", "yaml": "^2.6.1", "zod": "^3.24.1" }, "devDependencies": { + "@types/bcryptjs": "^3.0.0", "@types/dockerode": "^3.3.34", "eslint": "^8", "eslint-config-oclif": "^5", @@ -46,7 +55,14 @@ }, "oclif": { "bin": "./bin/run.js", - "commands": "./dist/commands", + "commands": { + "strategy": "pattern", + "globPatterns": [ + "**/*.+(js|cjs|mjs|ts|tsx|mts|cts)", + "!**/*.+(d.*|extension.*|template.*|helpers.*|constants.*)?(x)" + ], + "target": "./dist/commands" + }, "dirname": "devnet", "topicSeparator": " ", "topics": { diff --git a/packages/cl-client/src/index.ts b/packages/cl-client/src/index.ts index fc93e92e..b8d4b87b 100644 --- a/packages/cl-client/src/index.ts +++ b/packages/cl-client/src/index.ts @@ -36,6 +36,8 @@ export type BeaconValidatorsResponse = z.infer; export const BeaconConfigSchema = z.object({ data: z.object({ ELECTRA_FORK_EPOCH: z.string(), + CAPELLA_FORK_EPOCH: z.string(), + FULU_FORK_EPOCH: z.string(), SECONDS_PER_SLOT: z.string(), SLOTS_PER_EPOCH: z.string(), DEPOSIT_CONTRACT_ADDRESS: z.string(), diff --git a/packages/command/package.json b/packages/command/package.json index efa6547e..9e53cdf1 100644 --- a/packages/command/package.json +++ b/packages/command/package.json @@ -16,8 +16,11 @@ }, "dependencies": { "@devnet/cl-client": "workspace:*", + "@devnet/logger": "workspace:*", "@devnet/service": "workspace:*", "@devnet/state": "workspace:*", + "@devnet/types": "workspace:*", + "@devnet/ui": "workspace:*", "@fastify/swagger": "^9.4.2", "@fastify/swagger-ui": "^5.2.1", "@oclif/core": "^4.0.37", @@ -33,6 +36,7 @@ }, "devDependencies": { "@types/dockerode": "^3.3.34", + "@types/ws": "^8.18.0", "eslint": "^8", "eslint-config-oclif": "^5", "eslint-config-oclif-typescript": "^3", diff --git a/packages/command/src/bigint.ts b/packages/command/src/bigint.ts new file mode 100644 index 00000000..762823eb --- /dev/null +++ b/packages/command/src/bigint.ts @@ -0,0 +1,5 @@ +// bigint patch +// eslint-disable-next-line no-extend-native +(BigInt as any).prototype.toJSON = function() { + return this.toString(); +}; diff --git a/packages/command/src/command.ts b/packages/command/src/command.ts index 9be7e794..024882f6 100644 --- a/packages/command/src/command.ts +++ b/packages/command/src/command.ts @@ -1,13 +1,15 @@ +import { EmbeddedServicesConfigs } from "@devnet/service"; +import { DEFAULT_NETWORK_NAME, Network } from "@devnet/types"; +import { DevNetError } from "@devnet/utils"; import { Command as BaseCommand } from "@oclif/core"; import { FlagInput } from "@oclif/core/interfaces"; import { ExecaError } from "execa"; import { ZodError } from "zod"; -import { DEFAULT_NETWORK_NAME } from "./constants.js"; import { CustomDevNetContext, DevNetContext } from "./context.js"; -import { DevNetError } from "./error.js"; +import { CustomDevNetExtension } from "./extension.js"; import { string } from "./params.js"; -import { DevNetRuntimeEnvironment } from "./runtime-env.js"; +import { DevNetRuntimeEnvironment, DevNetRuntimeEnvironmentInterface } from "./runtime-env.js"; import { ExtractFlags } from "./types.js"; export function formatZodErrors(error: ZodError): string[] { @@ -87,7 +89,7 @@ export class DevNetCommand extends BaseCommand { static baseFlags = { network: string({ default: DEFAULT_NETWORK_NAME, - description: "Name of the network", + description: `Name of the network (default: '${DEFAULT_NETWORK_NAME}')`, required: false, }), }; @@ -111,8 +113,8 @@ export class DevNetCommand extends BaseCommand { flags: this.ctor.flags, strict: this.ctor.strict, }); - const dre = await DevNetRuntimeEnvironment.getNew( - params.network, + const dre = await DevNetRuntimeEnvironment.create( + Network.parse(params.network), this.id ?? "anonymous", this.config, ); @@ -124,10 +126,10 @@ export class DevNetCommand extends BaseCommand { public async run(): Promise { const ctor = this.constructor as typeof DevNetCommand; - await executeCommandWithLogging( + return await executeCommandWithLogging( async () => { await this.ctx.dre.runHooks(); - await ctor.handler(this.ctx); + return await ctor.handler(this.ctx); }, this.ctx, ctor.description!, @@ -136,20 +138,32 @@ export class DevNetCommand extends BaseCommand { } export type InferredFlags = T extends FlagInput ? F : unknown; +export type CmdReturn = CMD extends FactoryResult ? F : unknown; -type CommandOptions> = { +type CommandOptions, R> = { description: string; - handler: (ctx: CustomDevNetContext) => Promise; - params: F; + extensions?: CustomDevNetExtension[], + handler: (ctx: CustomDevNetContext) => Promise; + params: Params; }; -export type FactoryResult> = { - exec(dre: DevNetRuntimeEnvironment, params: InferredFlags): Promise; -} & { _internalParams: InferredFlags } & typeof DevNetCommand; +export type FactoryResult, R> = { + exec(dre: DevNetRuntimeEnvironmentInterface, params: InferredFlags): Promise; +} & { _internalParams: InferredFlags } & typeof DevNetCommand; + +const extensions: CustomDevNetExtension[] = []; + +const applyExtensions = (dre: DevNetRuntimeEnvironmentInterface) => { + extensions?.forEach(extension => { + extension(dre); + }); +}; + +function isomorphic, R>( + options: CommandOptions, +): FactoryResult { + extensions.push(...options.extensions ?? []); -function isomorphic>( - options: CommandOptions, -): FactoryResult { class WrappedCommand extends DevNetCommand { static description = options.description; static flags = { @@ -169,7 +183,7 @@ function isomorphic>( this: H, dre: DevNetRuntimeEnvironment, params: InferredFlags, - ): Promise { + ): Promise { const paramsWithNetwork = { ...params, network: dre.network.name, @@ -178,7 +192,7 @@ function isomorphic>( dre: dre.clone(this.id), params: paramsWithNetwork, }); - await executeCommandWithLogging( + return await executeCommandWithLogging( () => this.handler(context), context, this.description!, @@ -186,15 +200,18 @@ function isomorphic>( } static async handler(ctx: CustomDevNetContext) { - await options.handler(ctx); + applyExtensions(ctx.dre); + return await options.handler(ctx); } } - return WrappedCommand as FactoryResult; + return WrappedCommand as FactoryResult; } -function cli>( - options: CommandOptions, -): FactoryResult { +function cli, R>( + options: CommandOptions, +): FactoryResult { + extensions.push(...options.extensions ?? []); + class WrappedCommand extends DevNetCommand { static description = options.description; static flags = { @@ -214,7 +231,7 @@ function cli>( this: H, dre: DevNetRuntimeEnvironment, params: InferredFlags, - ): Promise { + ): Promise { const paramsWithNetwork = { ...params, network: dre.network.name, @@ -223,7 +240,7 @@ function cli>( dre: dre.clone(this.id), params: paramsWithNetwork, }); - await executeCommandWithLogging( + return await executeCommandWithLogging( () => this.handler(context), context, this.description!, @@ -231,15 +248,19 @@ function cli>( } static async handler(ctx: CustomDevNetContext) { - await options.handler(ctx); + applyExtensions(ctx.dre); + + return await options.handler(ctx); } } - return WrappedCommand as FactoryResult; + return WrappedCommand as FactoryResult; } -function hidden>( - options: CommandOptions, -): FactoryResult { +function hidden, R>( + options: CommandOptions, +): FactoryResult { + extensions.push(...options.extensions ?? []); + class WrappedCommand extends DevNetCommand { static description = options.description; static flags = { @@ -260,7 +281,7 @@ function hidden>( this: H, dre: DevNetRuntimeEnvironment, params: InferredFlags, - ): Promise { + ): Promise { const paramsWithNetwork = { ...params, network: dre.network.name, @@ -269,7 +290,7 @@ function hidden>( dre: dre.clone(this.id), params: paramsWithNetwork, }); - await executeCommandWithLogging( + return await executeCommandWithLogging( () => this.handler(context), context, this.description!, @@ -277,10 +298,12 @@ function hidden>( } static async handler(ctx: CustomDevNetContext) { - await options.handler(ctx); + applyExtensions(ctx.dre); + + return await options.handler(ctx); } } - return WrappedCommand as FactoryResult; + return WrappedCommand as FactoryResult; } export const command = { cli, hidden, isomorphic }; diff --git a/packages/command/src/constants.ts b/packages/command/src/constants.ts index b50f866b..600cad9c 100644 --- a/packages/command/src/constants.ts +++ b/packages/command/src/constants.ts @@ -1,4 +1,3 @@ import path from "node:path"; -export const DEFAULT_NETWORK_NAME = 'my-devnet' + export const USER_CONFIG_PATH = path.join(process.cwd(), "config.yml"); -export const ARTIFACTS_ROOT = path.join(process.cwd(), "artifacts"); diff --git a/packages/command/src/context.ts b/packages/command/src/context.ts index c7229533..71aa325f 100644 --- a/packages/command/src/context.ts +++ b/packages/command/src/context.ts @@ -1,16 +1,18 @@ import { Command as BaseCommand, Interfaces } from "@oclif/core"; -import { DevNetRuntimeEnvironment } from "./runtime-env.js"; +import { + DevNetRuntimeEnvironmentInterface, +} from "./runtime-env.js"; import { ExtractFlags } from "./types.js"; export class DevNetContext { // public args: ExtractArgs; - public readonly dre: DevNetRuntimeEnvironment; + public readonly dre: DevNetRuntimeEnvironmentInterface; public params: ExtractFlags; // public runCommand: (id: string, argv?: string[]) => Promise; - constructor(options: { + public constructor(options: { // args: ExtractArgs; - dre: DevNetRuntimeEnvironment; + dre: DevNetRuntimeEnvironmentInterface; params: ExtractFlags; // runCommand: (id: string, argv?: string[]) => Promise; }) { @@ -22,6 +24,6 @@ export class DevNetContext { } export type CustomDevNetContext, T extends typeof BaseCommand> = { - dre: DevNetRuntimeEnvironment; + dre: DevNetRuntimeEnvironmentInterface; params: Interfaces.InferredFlags<(T)["baseFlags"] &F>; }; diff --git a/packages/command/src/error.ts b/packages/command/src/error.ts deleted file mode 100644 index ecae3e3a..00000000 --- a/packages/command/src/error.ts +++ /dev/null @@ -1 +0,0 @@ -export class DevNetError extends Error {} diff --git a/packages/command/src/extension.ts b/packages/command/src/extension.ts new file mode 100644 index 00000000..f5f96070 --- /dev/null +++ b/packages/command/src/extension.ts @@ -0,0 +1,3 @@ +import { DevNetRuntimeEnvironmentInterface } from "./runtime-env.js"; + +export type CustomDevNetExtension = (dre: DevNetRuntimeEnvironmentInterface) => Promise | void; diff --git a/packages/command/src/index.ts b/packages/command/src/index.ts index 8691f0e6..97547f87 100644 --- a/packages/command/src/index.ts +++ b/packages/command/src/index.ts @@ -1,8 +1,8 @@ -export * from "./assert.js"; +import "./bigint.js"; export * from "./command.js"; export * from "./context.js"; -export { DevNetError } from "./error.js"; -export * from "./logger.js"; export * as Params from "./params.js"; export * from "./rpc.js"; +export { DevNetRuntimeEnvironmentInterface } from "./runtime-env.js"; export * from "./types.js"; +export * from "@devnet/types"; diff --git a/packages/command/src/network/index.ts b/packages/command/src/network/index.ts index b162b508..ebc9903a 100644 --- a/packages/command/src/network/index.ts +++ b/packages/command/src/network/index.ts @@ -1,5 +1,8 @@ import { BeaconClient } from "@devnet/cl-client"; -import { State } from "@devnet/state"; +import { DevNetLogger } from "@devnet/logger"; +import { StateInterface } from "@devnet/state"; +import { Network } from "@devnet/types"; +import { assert } from "@devnet/utils"; import { AbstractSigner, JsonRpcProvider, @@ -9,13 +12,11 @@ import { parseEther, } from "ethers"; -import { assert } from "../assert.js"; -import { DevNetLogger } from "../logger.js"; export class DevNetDRENetwork { - name: string; + public readonly name: Network; private logger: DevNetLogger; - private state: State; - constructor(network: string, state: State, logger: DevNetLogger) { + private state: StateInterface; + constructor(network: Network, state: StateInterface, logger: DevNetLogger) { this.name = network; this.state = state; this.logger = logger; @@ -36,6 +37,20 @@ export class DevNetDRENetwork { return wallet as AbstractSigner; } + public async waitCL() { + const { clPublic } = await this.state.getChain(); + this.logger.log(`Ensuring the consensus node at ${clPublic} is ready...`); + await this.fetchGenesisWithRetry(); + this.logger.log("Consensus node is ready."); + } + + public async waitCLFinalizedEpoch(epoch: number) { + const { clPublic } = await this.state.getChain(); + this.logger.log(`Waiting for consensus node at ${clPublic} to reach epoch ${epoch}...`); + await this.waitForFinalizedEpochWithRetry(epoch); + this.logger.log(`Consensus node has reached epoch ${epoch}.`); + } + public async waitEL() { const { elPublic } = await this.state.getChain(); this.logger.log(`Ensuring the execution node at ${elPublic} is ready...`); @@ -43,6 +58,51 @@ export class DevNetDRENetwork { this.logger.log("Execution node is ready."); } + private async fetchGenesisWithRetry(): Promise { + const clClient = await this.getCLClient(); + + const attemptToFetchGenesis = async (): Promise => { + try { + await clClient.getGenesis(); + } catch (error) { + this.logger.log( + `Consensus node not ready yet... Retrying in 5 seconds ${error}`, + ); + await new Promise((resolve) => setTimeout(resolve, 5000)); + return attemptToFetchGenesis(); + } + }; + + return attemptToFetchGenesis(); + } + + private async waitForFinalizedEpochWithRetry(targetEpoch: number): Promise { + const clClient = await this.getCLClient(); + + const attemptToWaitForEpoch = async (): Promise => { + try { + const currentEpoch = await clClient.getFinalizedEpoch(); + if (currentEpoch >= targetEpoch) { + return; + } + + this.logger.log( + `Current epoch is ${currentEpoch}, waiting for epoch ${targetEpoch}... Retrying in 5 seconds`, + ); + await new Promise((resolve) => setTimeout(resolve, 5000)); + return attemptToWaitForEpoch(); + } catch (error: any) { + this.logger.log( + `Error checking epoch: ${error.message}... Retrying in 10 seconds`, + ); + await new Promise((resolve) => setTimeout(resolve, 5000)); + return attemptToWaitForEpoch(); + } + }; + + return attemptToWaitForEpoch(); + } + private async sendTransactionWithRetry( amount = "1", toAddress = "0xf93Ee4Cf8c6c40b329b0c0626F28333c132CF241", diff --git a/packages/command/src/network/utils.ts b/packages/command/src/network/utils.ts index 3db78481..dcc9c173 100644 --- a/packages/command/src/network/utils.ts +++ b/packages/command/src/network/utils.ts @@ -5,7 +5,7 @@ import { parseEther, } from "ethers"; -import { assert } from "../assert.js"; +import { assert } from "@devnet/utils"; interface TransactionDetails { amount: string; diff --git a/packages/command/src/runtime-env.ts b/packages/command/src/runtime-env.ts index 931b7712..7338e3b5 100644 --- a/packages/command/src/runtime-env.ts +++ b/packages/command/src/runtime-env.ts @@ -1,39 +1,81 @@ -import { State } from "@devnet/state"; +import { DevNetLogger } from "@devnet/logger"; +import { DevnetServiceRegistry } from "@devnet/service"; +import { State, StateInterface } from "@devnet/state"; +import { ChainRoot, Network } from "@devnet/types"; +import { assert } from "@devnet/utils"; import { Config as OclifConfig } from "@oclif/core"; +import * as dotenv from "dotenv"; import { readFile, rm } from "node:fs/promises"; import * as YAML from "yaml"; +import { z } from "zod"; -import { assert } from "./assert.js"; -import { FactoryResult } from "./command.js"; +import { CmdReturn, FactoryResult } from "./command.js"; import { USER_CONFIG_PATH } from "./constants.js"; -import { DevNetLogger } from "./logger.js"; import { DevNetDRENetwork } from "./network/index.js"; -import { DevNetServiceRegistry } from "./service/service-registry.js"; -export const loadUserConfig = async () => - YAML.parse(await readFile(USER_CONFIG_PATH, "utf-8")); +dotenv.config({ path: '.env' }); + +// TODO make zod from json-schema +const YamlConfig = z.object({ + networks: z.array( + z.object({ + name: z.string(), + chain: z.record(z.string(), z.any()).optional(), + lido: z.record(z.string(), z.any()).optional(), + csm: z.record(z.string(), z.any()).optional(), + walletMnemonic: z.string().optional(), + }) + ) +}); +type YamlConfig = z.infer; + +const loadUserYamlConfig = async (): Promise => { + const parsedYaml = YAML.parse(await readFile(USER_CONFIG_PATH, "utf-8")); + + return YamlConfig.parse(parsedYaml); +} + + +export interface DevNetRuntimeEnvironmentInterface { + clean(): Promise; + clone(commandName: string): DevNetRuntimeEnvironmentInterface; + readonly logger: DevNetLogger; + readonly network: DevNetDRENetwork; + runCommand< + F extends Record, + R, + CMD extends FactoryResult, + >(cmd: CMD, args: CMD["_internalParams"]): Promise; + + runHooks(): Promise; -export class DevNetRuntimeEnvironment { + readonly services: DevnetServiceRegistry["services"]; + + readonly state: StateInterface +} + +export class DevNetRuntimeEnvironment implements DevNetRuntimeEnvironmentInterface { public readonly logger: DevNetLogger; public readonly network: DevNetDRENetwork; - public readonly services: DevNetServiceRegistry["services"]; - public readonly state: State; + public readonly services: DevnetServiceRegistry["services"]; + public readonly state: StateInterface; private readonly oclifConfig: OclifConfig; - private readonly registry: DevNetServiceRegistry; + private readonly registry: DevnetServiceRegistry; - constructor( - network: string, + protected constructor( + network: Network, rawConfig: unknown, - registry: DevNetServiceRegistry, + registry: DevnetServiceRegistry, logger: DevNetLogger, oclifConfig: OclifConfig, ) { this.state = new State( rawConfig, registry.root, - registry.services.kurtosis.artifact.root, + // TODO make this dynamic (get rid of kurtosis knowledge here) + ChainRoot.parse(registry.services.kurtosis.artifact.root), ); this.network = new DevNetDRENetwork(network, this.state, logger); this.services = registry.services; @@ -45,20 +87,20 @@ export class DevNetRuntimeEnvironment { this.oclifConfig = oclifConfig; } - static async getNew( - network: string, + static async create( + network: Network, commandName: string, oclifConfig: OclifConfig, - ) { + ): Promise { const logger = new DevNetLogger(network, commandName); - const userConfig = await loadUserConfig().catch(() => + const userConfig = await loadUserYamlConfig().catch(() => console.log("User config not found, use empty object"), ); const networkConfig = - userConfig?.networks?.find((net: any) => net?.name === network) ?? {}; + userConfig?.networks?.find((net) => net?.name === network) ?? {}; - const registry = await DevNetServiceRegistry.getNew( + const registry = await DevnetServiceRegistry.create( network, commandName, logger, @@ -84,7 +126,7 @@ export class DevNetRuntimeEnvironment { await rm(this.registry.root, { recursive: true, force: true }); } - public clone(commandName: string) { + public clone(commandName: string): DevNetRuntimeEnvironmentInterface { const newLogger = new DevNetLogger(this.network.name, commandName); return new DevNetRuntimeEnvironment( this.network.name, @@ -97,8 +139,9 @@ export class DevNetRuntimeEnvironment { public runCommand< F extends Record, - CMD extends FactoryResult, - >(cmd: CMD, args: CMD["_internalParams"]) { + R, + CMD extends FactoryResult, + >(cmd: CMD, args: CMD["_internalParams"]): Promise { return cmd.exec(this, args); } @@ -118,13 +161,13 @@ export class DevNetRuntimeEnvironment { `You have specified a command that does not exist, invoked by ${invokedBy}`, ); - const CommandClass = (await cmd.load()) as FactoryResult; + const CommandClass = (await cmd.load()) as FactoryResult; assert( CommandClass.exec !== undefined, `You have specified a command that cannot be invoked with the string, invoked by ${invokedBy}`, ); - await CommandClass.exec(this.clone(commandName), {}); + return await CommandClass.exec(this.clone(commandName), {}); } } diff --git a/packages/command/src/service/index.ts b/packages/command/src/service/index.ts deleted file mode 100644 index c38362d0..00000000 --- a/packages/command/src/service/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './service-registry.js' diff --git a/packages/command/src/service/service-registry.ts b/packages/command/src/service/service-registry.ts deleted file mode 100644 index d5c0d3e0..00000000 --- a/packages/command/src/service/service-registry.ts +++ /dev/null @@ -1,75 +0,0 @@ -import { services } from "@devnet/service"; -import { mkdir, rm } from "node:fs/promises"; -import path from "node:path"; - -import { ARTIFACTS_ROOT } from "../constants.js"; -import { DevNetLogger } from "../logger.js"; -import { DevNetService } from "./service.js"; - -type DevNetServices = typeof services; - -export class DevNetServiceRegistry { - network: string; - root: string; - services: { [K in keyof DevNetServices]: DevNetService }; - - constructor( - network: string, - services: { [K in keyof DevNetServices]: DevNetService }, - ) { - this.root = path.join(ARTIFACTS_ROOT, network); - this.network = network; - this.services = services; - } - - static async createRootDir(network: string) { - await mkdir(this.getRoot(network), { recursive: true }); - } - - static async getNew( - network: string, - commandName: string, - logger: DevNetLogger, - ): Promise { - await this.createRootDir(network); - const rootDir = this.getRoot(network); - - const servicesList = await Promise.all( - Object.entries(services).map(async ([key]) => [ - key, - await DevNetService.getNew( - rootDir, - network, - logger, - commandName, - key as keyof DevNetServices, - ), - ]), - ); - - return new DevNetServiceRegistry( - network, - Object.fromEntries(servicesList) as { - [K in keyof DevNetServices]: DevNetService; - }, - ); - } - - static getRoot(network: string) { - return path.join(ARTIFACTS_ROOT, network); - } - - public async clean() { - await rm(this.root, { force: true, recursive: true }); - } - - public clone(commandName: string, logger: DevNetLogger) { - const clonedServices = Object.fromEntries( - Object.entries(this.services).map(([key, service]) => [ - key, - service.clone(commandName, logger), - ]) - ) as { [K in keyof DevNetServices]: DevNetService }; - return new DevNetServiceRegistry(this.network, clonedServices); - } -} diff --git a/packages/command/tsconfig.tsbuildinfo b/packages/command/tsconfig.tsbuildinfo index 680e805d..11224900 100644 --- a/packages/command/tsconfig.tsbuildinfo +++ b/packages/command/tsconfig.tsbuildinfo @@ -1 +1 @@ -{"root":["./src/assert.ts","./src/command.ts","./src/constants.ts","./src/context.ts","./src/error.ts","./src/index.ts","./src/logger.ts","./src/params.ts","./src/rpc.ts","./src/runtime-env.ts","./src/types.ts","./src/ui.ts","./src/docker/index.ts","./src/network/index.ts","./src/network/utils.ts","./src/service/index.ts","./src/service/service-artifact.ts","./src/service/service-registry.ts","./src/service/service.ts"],"version":"5.7.3"} \ No newline at end of file +{"root":["./src/bigint.ts","./src/command.ts","./src/constants.ts","./src/context.ts","./src/extension.ts","./src/index.ts","./src/params.ts","./src/rpc.ts","./src/runtime-env.ts","./src/types.ts","./src/network/index.ts","./src/network/utils.ts"],"version":"5.7.3"} \ No newline at end of file diff --git a/packages/docker/.eslintignore b/packages/docker/.eslintignore new file mode 100644 index 00000000..4b2c927a --- /dev/null +++ b/packages/docker/.eslintignore @@ -0,0 +1,3 @@ +submodules +dist +node_modules diff --git a/packages/docker/.eslintrc b/packages/docker/.eslintrc new file mode 100644 index 00000000..a0af25f7 --- /dev/null +++ b/packages/docker/.eslintrc @@ -0,0 +1,22 @@ +{ + "extends": ["oclif", "oclif-typescript", "prettier"], + "overrides": [ + { + "files": ["src/**/*.ts", "tests/**/*.ts"], + "rules": { + "no-console": "off", + "no-await-in-loop": "off", + "no-promise-executor-return": "off", + "new-cap": "off", + "@typescript-eslint/no-explicit-any": "off", + "no-return-await": "off", + "unicorn/no-array-reduce": "off", + "unicorn/text-encoding-identifier-case": "off", + "unicorn/consistent-destructuring": "off", + "max-params":"off", + "unicorn/no-array-for-each": "off", + "perfectionist/sort-objects": "off" + }, + }, + ], +} diff --git a/packages/docker/package.json b/packages/docker/package.json new file mode 100644 index 00000000..36e46a62 --- /dev/null +++ b/packages/docker/package.json @@ -0,0 +1,37 @@ +{ + "name": "@devnet/docker", + "version": "1.0.0", + "type": "module", + "main": "dist/index.js", + "types": "src/index.ts", + "scripts": { + "build": "rm -rf dist && rm -rf tsconfig.tsbuildinfo && tsc -b", + "build:types": "tsc --build --emitDeclarationOnly", + "lint": "eslint . --ext .ts && tsc --noEmit" + }, + "exports": { + ".": "./dist/index.js" + }, + "dependencies": { + "@devnet/logger": "workspace:*", + "@devnet/utils": "workspace:*", + "@oclif/core": "^4.0.37", + "@oclif/plugin-help": "^6.2.19", + "@types/node": "^22.10.5", + "dockerode": "^4.0.4", + "dotenv": "^17.2.1", + "execa": "^9.5.2", + "typescript": "^5" + }, + "devDependencies": { + "eslint": "^8", + "eslint-config-oclif": "^5", + "eslint-config-oclif-typescript": "^3", + "eslint-config-prettier": "^9", + "eslint-plugin-prettier": "^5.2.2", + "prettier": "^3.4.2" + }, + "engines": { + "node": ">=20" + } +} diff --git a/packages/command/src/docker/index.ts b/packages/docker/src/index.ts similarity index 98% rename from packages/command/src/docker/index.ts rename to packages/docker/src/index.ts index b63ca407..f6a05998 100644 --- a/packages/command/src/docker/index.ts +++ b/packages/docker/src/index.ts @@ -1,14 +1,14 @@ +import { DevNetError } from "@devnet/utils"; import Docker from "dockerode"; - -import {DevNetError} from "../error.js"; +export * from "./registry.js"; const docker = new Docker(); export interface ContainerInfo { + client: string; id: string; ip: string; name: string; - client: string; ports: { privatePort?: number; privateUrl?: string; diff --git a/packages/docker/src/registry.ts b/packages/docker/src/registry.ts new file mode 100644 index 00000000..33593e18 --- /dev/null +++ b/packages/docker/src/registry.ts @@ -0,0 +1,96 @@ +import { DevNetError } from "@devnet/utils"; +import { execa } from "execa"; + +export interface DockerPushOptions { + imageName: string; + password: string; + registryHostname: string; + tag: string; + username: string; +} + +/** + * Pushes a Docker image with a specific tag to a custom registry using username/password authentication. + * + * @param options - Configuration options for pushing the Docker image + * @returns Promise that resolves when the image is successfully pushed + * @throws DevNetError if login or push operations fail + */ +export async function pushDockerImage(options: DockerPushOptions): Promise { + const { imageName, tag, registryHostname, username, password } = options; + + try { + // Build the full image name with registry URL and tag + const fullImageName = `${registryHostname}/${imageName}:${tag}`; + + // First, login to the registry + console.log(`Logging in to Docker registry: ${registryHostname}`); + await execa("docker", ["login", registryHostname, "--username", username, "--password-stdin"], { + input: password, + stdio: ["pipe", "inherit", "inherit"] + }); + + console.log(`Successfully logged in to registry: ${registryHostname}`); + + // Tag the local image with the registry URL if it's not already tagged + console.log(`Tagging image: ${imageName}:${tag} as ${fullImageName}`); + await execa("docker", ["tag", `${imageName}:${tag}`, fullImageName], { + stdio: "inherit" + }); + + // Push the image to the registry + console.log(`Pushing image: ${fullImageName}`); + await execa("docker", ["push", fullImageName], { + stdio: "inherit" + }); + + console.log(`Successfully pushed image: ${fullImageName}`); + + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + throw new DevNetError( + `Failed to push Docker image ${imageName}:${tag} to registry ${registryHostname}: ${errorMessage}` + ); + } +} + +/** + * Builds and pushes a Docker image with a specific tag to a custom registry. + * + * @param options - Configuration options including build context path + * @returns Promise that resolves when the image is successfully built and pushed + * @throws DevNetError if build, login or push operations fail + */ +export async function buildAndPushDockerImage( + options: { buildContext: string, cwd: string; dockerfile?: string } & DockerPushOptions +): Promise { + const { imageName, tag, registryHostname, username, password, buildContext, dockerfile } = options; + + try { + // Build the Docker image + const buildArgs = ["build", "--platform", "linux/amd64", "-t", `${imageName}:${tag}`]; + + if (dockerfile) { + buildArgs.push("-f", dockerfile); + } + + buildArgs.push(buildContext); + + console.log(`Building Docker image: ${imageName}:${tag}`); + await execa("docker", buildArgs, { + cwd: options.cwd, + stdio: "inherit" + }); + + console.log(`Successfully built image: ${imageName}:${tag}`); + + // Push the built image + await pushDockerImage({ imageName, tag, registryHostname, username, password }); + + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + throw new DevNetError( + `Failed to build and push Docker image ${imageName}:${tag}: ${errorMessage}` + ); + } +} diff --git a/packages/docker/tsconfig.json b/packages/docker/tsconfig.json new file mode 100644 index 00000000..9ed464ce --- /dev/null +++ b/packages/docker/tsconfig.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "module": "Node16", + "outDir": "./dist", + "rootDir": "src", + "strict": true, + "target": "es2022", + "moduleResolution": "node16", + "resolveJsonModule": true, + "strictNullChecks": true, + + "declaration": true, + "declarationMap": true, + "sourceMap": true + + }, + "include": ["./src/**/*"], + "exclude": [ + "./dist/**/*" + ], + "ts-node": { + "esm": true + } +} diff --git a/packages/docker/tsconfig.tsbuildinfo b/packages/docker/tsconfig.tsbuildinfo new file mode 100644 index 00000000..869eb4c0 --- /dev/null +++ b/packages/docker/tsconfig.tsbuildinfo @@ -0,0 +1 @@ +{"root":["./src/index.ts","./src/registry.ts"],"version":"5.7.3"} \ No newline at end of file diff --git a/packages/fp/.eslintignore b/packages/fp/.eslintignore new file mode 100644 index 00000000..4b2c927a --- /dev/null +++ b/packages/fp/.eslintignore @@ -0,0 +1,3 @@ +submodules +dist +node_modules diff --git a/packages/fp/.eslintrc b/packages/fp/.eslintrc new file mode 100644 index 00000000..a0af25f7 --- /dev/null +++ b/packages/fp/.eslintrc @@ -0,0 +1,22 @@ +{ + "extends": ["oclif", "oclif-typescript", "prettier"], + "overrides": [ + { + "files": ["src/**/*.ts", "tests/**/*.ts"], + "rules": { + "no-console": "off", + "no-await-in-loop": "off", + "no-promise-executor-return": "off", + "new-cap": "off", + "@typescript-eslint/no-explicit-any": "off", + "no-return-await": "off", + "unicorn/no-array-reduce": "off", + "unicorn/text-encoding-identifier-case": "off", + "unicorn/consistent-destructuring": "off", + "max-params":"off", + "unicorn/no-array-for-each": "off", + "perfectionist/sort-objects": "off" + }, + }, + ], +} diff --git a/packages/fp/package.json b/packages/fp/package.json new file mode 100644 index 00000000..ae4f6f25 --- /dev/null +++ b/packages/fp/package.json @@ -0,0 +1,33 @@ +{ + "name": "@devnet/fp", + "version": "1.0.0", + "type": "module", + "main": "dist/index.js", + "types": "src/index.ts", + "scripts": { + "build": "rm -rf dist && rm -rf tsconfig.tsbuildinfo && tsc -b", + "build:types": "tsc --build --emitDeclarationOnly", + "lint": "eslint . --ext .ts && tsc --noEmit" + }, + "exports": { + ".": "./dist/index.js" + }, + "dependencies": { + "@oclif/core": "^4.0.37", + "@oclif/plugin-help": "^6.2.19", + "@types/node": "^22.10.5", + "fp-ts": "^2.16.11", + "typescript": "^5" + }, + "devDependencies": { + "eslint": "^8", + "eslint-config-oclif": "^5", + "eslint-config-oclif-typescript": "^3", + "eslint-config-prettier": "^9", + "eslint-plugin-prettier": "^5.2.2", + "prettier": "^3.4.2" + }, + "engines": { + "node": ">=20" + } +} diff --git a/packages/fp/src/TaskEither.ts b/packages/fp/src/TaskEither.ts new file mode 100644 index 00000000..099544c9 --- /dev/null +++ b/packages/fp/src/TaskEither.ts @@ -0,0 +1,24 @@ +// eslint-disable unicorn/filename-case + +import { isLeft } from 'fp-ts/lib/Either.js'; +import { TaskEither } from 'fp-ts/lib/TaskEither.js'; + +export * from 'fp-ts/lib/TaskEither.js'; + +/** + * Perform TaskEither, return result (right side) or throw error (left side) + * @template E - left side + * @template A - right side + * @returns {Promise} + * @throws {E} - error + */ +export const execute = async (task: TaskEither): Promise => { + const either = await task(); + + if (isLeft(either)) { + + throw either.left; + } + + return either.right; +}; diff --git a/packages/fp/src/index.ts b/packages/fp/src/index.ts new file mode 100644 index 00000000..d3335c2b --- /dev/null +++ b/packages/fp/src/index.ts @@ -0,0 +1,14 @@ +export * as TE from "./TaskEither.js"; +export * as A from "fp-ts/lib/Array.js"; +export * as E from "fp-ts/lib/Either.js"; +export * as Json from "fp-ts/lib/Json.js"; +export * as NEA from "fp-ts/lib/NonEmptyArray.js"; +export * as O from "fp-ts/lib/Option.js"; +export * as RA from "fp-ts/lib/ReadonlyArray.js"; +export * as T from "fp-ts/lib/Task.js"; + +export * from "fp-ts/lib/function.js"; + + + + diff --git a/packages/fp/tsconfig.json b/packages/fp/tsconfig.json new file mode 100644 index 00000000..9ed464ce --- /dev/null +++ b/packages/fp/tsconfig.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "module": "Node16", + "outDir": "./dist", + "rootDir": "src", + "strict": true, + "target": "es2022", + "moduleResolution": "node16", + "resolveJsonModule": true, + "strictNullChecks": true, + + "declaration": true, + "declarationMap": true, + "sourceMap": true + + }, + "include": ["./src/**/*"], + "exclude": [ + "./dist/**/*" + ], + "ts-node": { + "esm": true + } +} diff --git a/packages/fp/tsconfig.tsbuildinfo b/packages/fp/tsconfig.tsbuildinfo new file mode 100644 index 00000000..2c8c11c9 --- /dev/null +++ b/packages/fp/tsconfig.tsbuildinfo @@ -0,0 +1 @@ +{"root":["./src/TaskEither.ts","./src/index.ts"],"version":"5.7.3"} \ No newline at end of file diff --git a/packages/helm/.eslintignore b/packages/helm/.eslintignore new file mode 100644 index 00000000..4b2c927a --- /dev/null +++ b/packages/helm/.eslintignore @@ -0,0 +1,3 @@ +submodules +dist +node_modules diff --git a/packages/helm/.eslintrc b/packages/helm/.eslintrc new file mode 100644 index 00000000..a0af25f7 --- /dev/null +++ b/packages/helm/.eslintrc @@ -0,0 +1,22 @@ +{ + "extends": ["oclif", "oclif-typescript", "prettier"], + "overrides": [ + { + "files": ["src/**/*.ts", "tests/**/*.ts"], + "rules": { + "no-console": "off", + "no-await-in-loop": "off", + "no-promise-executor-return": "off", + "new-cap": "off", + "@typescript-eslint/no-explicit-any": "off", + "no-return-await": "off", + "unicorn/no-array-reduce": "off", + "unicorn/text-encoding-identifier-case": "off", + "unicorn/consistent-destructuring": "off", + "max-params":"off", + "unicorn/no-array-for-each": "off", + "perfectionist/sort-objects": "off" + }, + }, + ], +} diff --git a/packages/helm/package.json b/packages/helm/package.json new file mode 100644 index 00000000..7f4fde29 --- /dev/null +++ b/packages/helm/package.json @@ -0,0 +1,33 @@ +{ + "name": "@devnet/helm", + "version": "1.0.0", + "type": "module", + "main": "dist/index.js", + "types": "src/index.ts", + "scripts": { + "build": "rm -rf dist && rm -rf tsconfig.tsbuildinfo && tsc -b", + "build:types": "tsc --build --emitDeclarationOnly", + "lint": "eslint . --ext .ts && tsc --noEmit" + }, + "exports": { + ".": "./dist/index.js" + }, + "dependencies": { + "@oclif/core": "^4.0.37", + "@oclif/plugin-help": "^6.2.19", + "@types/node": "^22.10.5", + "dotenv": "^17.2.1", + "typescript": "^5" + }, + "devDependencies": { + "eslint": "^8", + "eslint-config-oclif": "^5", + "eslint-config-oclif-typescript": "^3", + "eslint-config-prettier": "^9", + "eslint-plugin-prettier": "^5.2.2", + "prettier": "^3.4.2" + }, + "engines": { + "node": ">=20" + } +} diff --git a/packages/helm/src/constants.ts b/packages/helm/src/constants.ts new file mode 100644 index 00000000..ab269019 --- /dev/null +++ b/packages/helm/src/constants.ts @@ -0,0 +1,12 @@ +import path from "node:path"; +import { fileURLToPath } from "node:url"; + +/** + * Root path to vendor helm charts + */ +export const HELM_VENDOR_CHARTS_ROOT_PATH = path.join( + path.dirname(fileURLToPath(import.meta.url)), + `../../../helm` +); + + diff --git a/packages/helm/src/index.ts b/packages/helm/src/index.ts new file mode 100644 index 00000000..4f07201d --- /dev/null +++ b/packages/helm/src/index.ts @@ -0,0 +1 @@ +export * from './constants.js'; diff --git a/packages/helm/tsconfig.json b/packages/helm/tsconfig.json new file mode 100644 index 00000000..9ed464ce --- /dev/null +++ b/packages/helm/tsconfig.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "module": "Node16", + "outDir": "./dist", + "rootDir": "src", + "strict": true, + "target": "es2022", + "moduleResolution": "node16", + "resolveJsonModule": true, + "strictNullChecks": true, + + "declaration": true, + "declarationMap": true, + "sourceMap": true + + }, + "include": ["./src/**/*"], + "exclude": [ + "./dist/**/*" + ], + "ts-node": { + "esm": true + } +} diff --git a/packages/helm/tsconfig.tsbuildinfo b/packages/helm/tsconfig.tsbuildinfo new file mode 100644 index 00000000..c2165087 --- /dev/null +++ b/packages/helm/tsconfig.tsbuildinfo @@ -0,0 +1 @@ +{"root":["./src/constants.ts","./src/index.ts"],"version":"5.7.3"} \ No newline at end of file diff --git a/packages/k8s/.eslintignore b/packages/k8s/.eslintignore new file mode 100644 index 00000000..4b2c927a --- /dev/null +++ b/packages/k8s/.eslintignore @@ -0,0 +1,3 @@ +submodules +dist +node_modules diff --git a/packages/k8s/.eslintrc b/packages/k8s/.eslintrc new file mode 100644 index 00000000..a0af25f7 --- /dev/null +++ b/packages/k8s/.eslintrc @@ -0,0 +1,22 @@ +{ + "extends": ["oclif", "oclif-typescript", "prettier"], + "overrides": [ + { + "files": ["src/**/*.ts", "tests/**/*.ts"], + "rules": { + "no-console": "off", + "no-await-in-loop": "off", + "no-promise-executor-return": "off", + "new-cap": "off", + "@typescript-eslint/no-explicit-any": "off", + "no-return-await": "off", + "unicorn/no-array-reduce": "off", + "unicorn/text-encoding-identifier-case": "off", + "unicorn/consistent-destructuring": "off", + "max-params":"off", + "unicorn/no-array-for-each": "off", + "perfectionist/sort-objects": "off" + }, + }, + ], +} diff --git a/packages/k8s/package.json b/packages/k8s/package.json new file mode 100644 index 00000000..2d2ebf47 --- /dev/null +++ b/packages/k8s/package.json @@ -0,0 +1,35 @@ +{ + "name": "@devnet/k8s", + "version": "1.0.0", + "type": "module", + "main": "dist/index.js", + "types": "src/index.ts", + "scripts": { + "build": "rm -rf dist && rm -rf tsconfig.tsbuildinfo && tsc -b", + "build:types": "tsc --build --emitDeclarationOnly", + "lint": "eslint . --ext .ts && tsc --noEmit" + }, + "exports": { + ".": "./dist/index.js" + }, + "dependencies": { + "@devnet/command": "workspace:*", + "@kubernetes/client-node": "^1.3.0", + "@oclif/core": "^4.0.37", + "@oclif/plugin-help": "^6.2.19", + "@types/node": "^22.10.5", + "dotenv": "^17.2.1", + "typescript": "^5" + }, + "devDependencies": { + "eslint": "^8", + "eslint-config-oclif": "^5", + "eslint-config-oclif-typescript": "^3", + "eslint-config-prettier": "^9", + "eslint-plugin-prettier": "^5.2.2", + "prettier": "^3.4.2" + }, + "engines": { + "node": ">=20" + } +} diff --git a/packages/k8s/src/index.ts b/packages/k8s/src/index.ts new file mode 100644 index 00000000..211a08a0 --- /dev/null +++ b/packages/k8s/src/index.ts @@ -0,0 +1,2 @@ +export * from './utils.js'; +export * as k8s from "@kubernetes/client-node"; diff --git a/packages/k8s/src/utils.ts b/packages/k8s/src/utils.ts new file mode 100644 index 00000000..4d35325f --- /dev/null +++ b/packages/k8s/src/utils.ts @@ -0,0 +1,176 @@ +import type { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; + +import { DevNetError } from "@devnet/utils"; +import * as k8s from "@kubernetes/client-node"; +import * as dotenv from "dotenv"; + +dotenv.config({ path: '.env' }); + +const DEFAULT_LIMIT = 1000; + +export async function getK8s() { + if (!process.env.K8S_KUBECTL_DEFAULT_CONTEXT) { + throw new DevNetError(`Unable to connect to the k8s cluster. + Ensure 'K8S_KUBECTL_DEFAULT_CONTEXT' env variable in '.env' is set.`); + } + + const kc = new k8s.KubeConfig(); + kc.loadFromDefault(); + kc.setCurrentContext(process.env.K8S_KUBECTL_DEFAULT_CONTEXT); + + return kc; +} + +export async function pingCluster(): Promise { + const kc = await getK8s(); + const k8sCoreApi = kc.makeApiClient(k8s.CoreV1Api); + try { + await k8sCoreApi.listNamespace(); + } catch (error: unknown) { + throw new DevNetError(`Unable to connect to the cluster. + Ensure 'K8S_KUBECTL_DEFAULT_CONTEXT' env variable in '.env' is set and the cluster is reachable. + Original error: ${error instanceof Error ? error.message : String(error)}`); + } +} + +export const getK8sService = async ( + dre: DevNetRuntimeEnvironmentInterface, + filter?: { label?: string, name?: RegExp | string }, + namespace: string = `kt-${dre.network.name}`, +) => { + const kc = await getK8s(); + const k8sCoreApi = kc.makeApiClient(k8s.CoreV1Api); + + const labelSelector = filter?.label; + const k8sNamespaceServices = await k8sCoreApi.listNamespacedService( + { + namespace, + ...(labelSelector? { labelSelector } : {}) + }, + ); + + return k8sNamespaceServices.items.filter((service) => + typeof filter?.name === "string" + ? service.metadata?.name === filter?.name + : filter?.name instanceof RegExp + ? service.metadata?.name?.match(filter?.name) + : true, + ); +} + +export const getK8sIngress = async ( + dre: DevNetRuntimeEnvironmentInterface, + filter?: { label?: Record | string, name?: RegExp | string }, + namespace: string = `kt-${dre.network.name}`, +) => { + const kc = await getK8s(); + const k8sNetworkApi = kc.makeApiClient(k8s.NetworkingV1Api); + + const labelSelector = filter?.label; + + const formattedLabelSelector = typeof labelSelector === 'string' + ? labelSelector + : typeof labelSelector === 'object' + ? toLabelSelector(labelSelector) : undefined; + + const k8sNamespaceIngresses = await k8sNetworkApi.listNamespacedIngress( + { + namespace, + ...(formattedLabelSelector ? { labelSelector: formattedLabelSelector } : {}) + }, + ); + + return k8sNamespaceIngresses.items.filter((ingress) => + typeof filter?.name === "string" + ? ingress.metadata?.name === filter?.name + : filter?.name instanceof RegExp + ? ingress.metadata?.name?.match(filter?.name) + : true, + ); +} + +export const checkK8sIngressExists = async ( + dre: DevNetRuntimeEnvironmentInterface, + filter?: { label?: string, name?: RegExp | string }, + namespace: string = `kt-${dre.network.name}`, +)=> { + const k8sIngresses = await getK8sIngress(dre, filter, namespace); + return k8sIngresses.length > 0; +} + + +export const checkK8sServiceExists = async ( + dre: DevNetRuntimeEnvironmentInterface, + filter?: { label?: string, name?: RegExp | string }, + namespace: string = `kt-${dre.network.name}`, +)=> { + const k8sServices = await getK8sService(dre, filter, namespace); + return k8sServices.length > 0; +} + +export const addPrefixToIngressHostname = ( + hostname: string, +) => { + const GLOBAL_INGRESS_HOST_PREFIX = process.env.GLOBAL_INGRESS_HOST_PREFIX ?? 'prefix'; + + return `${GLOBAL_INGRESS_HOST_PREFIX}-${hostname}`; +} + +export const toLabelSelector = (label: Record) => + Object.entries(label).map(([k, v]) => `${k}=${v}`).join(','); + +export const deleteNamespace = async (name: string) => { + const kc = await getK8s(); + const k8sCoreApi = kc.makeApiClient(k8s.CoreV1Api); + + const namespaces = await k8sCoreApi.listNamespace({ limit: DEFAULT_LIMIT }); + + if (namespaces.items.map(n => n.metadata?.name).includes(name)) { + await k8sCoreApi.deleteNamespace({ name, propagationPolicy: "Background" }); + } +} + +export const deleteNamespacedPersistentVolumeClaimIfExists = async ( + namespace: string, + name: string +) => { + const kc = await getK8s(); + const k8sStorageApi = kc.makeApiClient(k8s.CoreV1Api); + + const pvcs = await k8sStorageApi.listNamespacedPersistentVolumeClaim({ + namespace, + limit: DEFAULT_LIMIT, + }); + + if (pvcs.items.map((pvc) => pvc.metadata?.name).includes(name)) { + await k8sStorageApi.deleteNamespacedPersistentVolumeClaim({ + namespace, + name, + }); + } +} + +export const getNamespacedDeployedHelmReleases = async (namespace: string) => { + const kc = await getK8s(); + const k8sAppsApi = kc.makeApiClient(k8s.AppsV1Api); + + const deployments = await k8sAppsApi.listNamespacedDeployment({ namespace, limit: DEFAULT_LIMIT }); + + const helmReleases = ( + deployments.items?.map( + (deployment) => + deployment.metadata?.annotations?.["meta.helm.sh/release-name"], + ) ?? [] + ).filter((x) => typeof x === "string"); + + return helmReleases; +} + +export const createNamespaceIfNotExists = async (name: string) => { + // TODO more better way to check if namespace exists + try { + const kc = await getK8s(); + const k8sCoreApi = kc.makeApiClient(k8s.CoreV1Api); + await k8sCoreApi.createNamespace({ body: { metadata: { name } } }); + } catch {} +} diff --git a/packages/k8s/tsconfig.json b/packages/k8s/tsconfig.json new file mode 100644 index 00000000..9ed464ce --- /dev/null +++ b/packages/k8s/tsconfig.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "module": "Node16", + "outDir": "./dist", + "rootDir": "src", + "strict": true, + "target": "es2022", + "moduleResolution": "node16", + "resolveJsonModule": true, + "strictNullChecks": true, + + "declaration": true, + "declarationMap": true, + "sourceMap": true + + }, + "include": ["./src/**/*"], + "exclude": [ + "./dist/**/*" + ], + "ts-node": { + "esm": true + } +} diff --git a/packages/k8s/tsconfig.tsbuildinfo b/packages/k8s/tsconfig.tsbuildinfo new file mode 100644 index 00000000..f413986b --- /dev/null +++ b/packages/k8s/tsconfig.tsbuildinfo @@ -0,0 +1 @@ +{"root":["./src/index.ts","./src/utils.ts"],"version":"5.7.3"} \ No newline at end of file diff --git a/packages/logger/.eslintignore b/packages/logger/.eslintignore new file mode 100644 index 00000000..4b2c927a --- /dev/null +++ b/packages/logger/.eslintignore @@ -0,0 +1,3 @@ +submodules +dist +node_modules diff --git a/packages/logger/.eslintrc b/packages/logger/.eslintrc new file mode 100644 index 00000000..a0af25f7 --- /dev/null +++ b/packages/logger/.eslintrc @@ -0,0 +1,22 @@ +{ + "extends": ["oclif", "oclif-typescript", "prettier"], + "overrides": [ + { + "files": ["src/**/*.ts", "tests/**/*.ts"], + "rules": { + "no-console": "off", + "no-await-in-loop": "off", + "no-promise-executor-return": "off", + "new-cap": "off", + "@typescript-eslint/no-explicit-any": "off", + "no-return-await": "off", + "unicorn/no-array-reduce": "off", + "unicorn/text-encoding-identifier-case": "off", + "unicorn/consistent-destructuring": "off", + "max-params":"off", + "unicorn/no-array-for-each": "off", + "perfectionist/sort-objects": "off" + }, + }, + ], +} diff --git a/packages/logger/package.json b/packages/logger/package.json new file mode 100644 index 00000000..639bc650 --- /dev/null +++ b/packages/logger/package.json @@ -0,0 +1,33 @@ +{ + "name": "@devnet/logger", + "version": "1.0.0", + "type": "module", + "main": "dist/index.js", + "types": "src/index.ts", + "scripts": { + "build": "rm -rf dist && rm -rf tsconfig.tsbuildinfo && tsc -b", + "build:types": "tsc --build --emitDeclarationOnly", + "lint": "eslint . --ext .ts && tsc --noEmit" + }, + "exports": { + ".": "./dist/index.js" + }, + "dependencies": { + "@devnet/ui": "workspace:*", + "@oclif/core": "^4.0.37", + "@oclif/plugin-help": "^6.2.19", + "@types/node": "^22.10.5", + "typescript": "^5" + }, + "devDependencies": { + "eslint": "^8", + "eslint-config-oclif": "^5", + "eslint-config-oclif-typescript": "^3", + "eslint-config-prettier": "^9", + "eslint-plugin-prettier": "^5.2.2", + "prettier": "^3.4.2" + }, + "engines": { + "node": ">=20" + } +} diff --git a/packages/command/src/logger.ts b/packages/logger/src/index.ts similarity index 96% rename from packages/command/src/logger.ts rename to packages/logger/src/index.ts index f91b0ac2..38a55cdb 100644 --- a/packages/command/src/logger.ts +++ b/packages/logger/src/index.ts @@ -1,7 +1,6 @@ +import { applyColor, getColorForText } from "@devnet/ui"; import chalk from "chalk"; -import { applyColor, getColorForText } from "./ui.js"; - export class DevNetLogger { color: string; commandName: string; @@ -70,3 +69,4 @@ export class DevNetLogger { this.log(chalk.yellow(msg)); } } + diff --git a/packages/logger/tsconfig.json b/packages/logger/tsconfig.json new file mode 100644 index 00000000..9ed464ce --- /dev/null +++ b/packages/logger/tsconfig.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "module": "Node16", + "outDir": "./dist", + "rootDir": "src", + "strict": true, + "target": "es2022", + "moduleResolution": "node16", + "resolveJsonModule": true, + "strictNullChecks": true, + + "declaration": true, + "declarationMap": true, + "sourceMap": true + + }, + "include": ["./src/**/*"], + "exclude": [ + "./dist/**/*" + ], + "ts-node": { + "esm": true + } +} diff --git a/packages/logger/tsconfig.tsbuildinfo b/packages/logger/tsconfig.tsbuildinfo new file mode 100644 index 00000000..44940f36 --- /dev/null +++ b/packages/logger/tsconfig.tsbuildinfo @@ -0,0 +1 @@ +{"root":["./src/index.ts"],"version":"5.7.3"} \ No newline at end of file diff --git a/packages/services/package.json b/packages/services/package.json index c77a5744..da20c68e 100644 --- a/packages/services/package.json +++ b/packages/services/package.json @@ -13,12 +13,18 @@ ".": "./dist/index.js" }, "dependencies": { + "@devnet/docker": "workspace:*", + "@devnet/logger": "workspace:*", + "@devnet/ui": "workspace:*", + "@devnet/utils": "workspace:*", "@fastify/swagger": "^9.4.2", "@fastify/swagger-ui": "^5.2.1", "@oclif/core": "^4.0.37", "@oclif/plugin-help": "^6.2.19", "@types/node": "^22.10.5", - "typescript": "^5" + "execa": "^9.5.2", + "typescript": "^5", + "viem": "^2.36.0" }, "devDependencies": { "eslint": "^8", diff --git a/packages/services/src/constants.ts b/packages/services/src/constants.ts new file mode 100644 index 00000000..c36ab9cf --- /dev/null +++ b/packages/services/src/constants.ts @@ -0,0 +1,5 @@ +import { ArtifactRoot } from "@devnet/types"; +import path from "node:path"; + +// TODO what if process.cwd() is not the root of the project? +export const ARTIFACTS_ROOT: ArtifactRoot = ArtifactRoot.parse(path.join(process.cwd(), "artifacts")); diff --git a/packages/command/src/service/service-artifact.ts b/packages/services/src/devnet-service-artifact.ts similarity index 72% rename from packages/command/src/service/service-artifact.ts rename to packages/services/src/devnet-service-artifact.ts index e19dd635..97351440 100644 --- a/packages/command/src/service/service-artifact.ts +++ b/packages/services/src/devnet-service-artifact.ts @@ -1,33 +1,34 @@ /* eslint-disable valid-jsdoc */ -import { DevNetServiceConfig } from "@devnet/service"; +import { DevNetLogger } from "@devnet/logger"; +import { NetworkArtifactRoot, ServiceArtifactRoot } from "@devnet/types"; import { execa } from "execa"; import fs, { rm } from "node:fs/promises"; import path from "node:path"; -import { DevNetLogger } from "../logger.js"; +import { DevnetServiceConfig } from "./devnet-service-config.js"; -export class ServiceArtifact { - public config: DevNetServiceConfig; +export class DevnetServiceArtifact { + public config: DevnetServiceConfig; public emittedCommands: string[] = []; - public root: string; + public readonly root: ServiceArtifactRoot; private logger: DevNetLogger; - constructor( - artifactsRoot: string, - service: DevNetServiceConfig, + protected constructor( + networkArtifactRoot: NetworkArtifactRoot, + service: DevnetServiceConfig, logger: DevNetLogger, ) { - this.root = path.join(artifactsRoot, service.name); + this.root = ServiceArtifactRoot.parse(path.join(networkArtifactRoot, service.name)); this.config = service; this.logger = logger; } - static async getNew( - artifactsRoot: string, - service: DevNetServiceConfig, + static async create( + networkArtifactRoot: NetworkArtifactRoot, + serviceConfig: DevnetServiceConfig, logger: DevNetLogger, ) { - const artifact = new ServiceArtifact(artifactsRoot, service, logger); + const artifact = new DevnetServiceArtifact(networkArtifactRoot, serviceConfig, logger); // Check if the destination path already exists const destinationExists = await artifact.pathExists(artifact.root); @@ -35,13 +36,14 @@ export class ServiceArtifact { return artifact; } - if (artifact.config.hooks?.install) + if (artifact.config.hooks?.install) { artifact.emittedCommands.push(artifact.config.hooks?.install); + } - await artifact.gitInit(service); + await artifact.gitInit(serviceConfig); - if (service.workspace) { - await artifact.copyFilesFrom(service.workspace); + if (serviceConfig.workspace) { + await artifact.copyFilesFrom(serviceConfig.workspace); } return artifact; @@ -87,16 +89,16 @@ export class ServiceArtifact { } } - private async gitInit(service: DevNetServiceConfig): Promise { + private async gitInit(serviceConfig: DevnetServiceConfig): Promise { try { - if (!service.repository) { + if (!serviceConfig.repository) { return; } // Ensure the destination folder exists await fs.mkdir(this.root, { recursive: true }); - const { url, branch } = service.repository; + const { url, branch } = serviceConfig.repository; // TODO: move to git command and use it as hook await execa({ cwd: this.root, diff --git a/packages/services/src/service.ts b/packages/services/src/devnet-service-config.ts similarity index 63% rename from packages/services/src/service.ts rename to packages/services/src/devnet-service-config.ts index db970143..8d167d1d 100644 --- a/packages/services/src/service.ts +++ b/packages/services/src/devnet-service-config.ts @@ -1,19 +1,27 @@ -export class DevNetServiceConfig< - T = unknown, - L extends Record = Record, +import { ServiceGetter } from "./service-getter.js"; + + +export class DevnetServiceConfig< + CustomServiceGetters extends Record> = Record>, + Constants = unknown, + Labels extends Record = Record, > { - constants: T; + constants: Constants; env?: Record; exposedPorts?: number[]; + + getters: CustomServiceGetters; + git?: string; + hooks?: { build?: string; destroy?: string; install?: string; }; - labels: L; + labels: Labels; name: string; repository?: { branch: string, url: string }; @@ -22,6 +30,7 @@ export class DevNetServiceConfig< constructor({ workspace, env, + getters, hooks, name, repository, @@ -29,11 +38,12 @@ export class DevNetServiceConfig< labels, exposedPorts, }: { - constants: T; + constants: Constants; env?: Record; exposedPorts?: number[]; + getters: CustomServiceGetters; hooks?: { build?: string; destroy?: string; install?: string }; - labels: L; + labels: Labels; name: string; repository?: { branch: string, url: string }; workspace?: string; @@ -41,6 +51,7 @@ export class DevNetServiceConfig< this.workspace = workspace; this.env = env; this.hooks = hooks; + this.getters = getters; this.name = name; this.repository = repository; this.constants = constants; diff --git a/packages/command/src/service/service.ts b/packages/services/src/devnet-service.ts similarity index 52% rename from packages/command/src/service/service.ts rename to packages/services/src/devnet-service.ts index b2a378df..eb338a6e 100644 --- a/packages/command/src/service/service.ts +++ b/packages/services/src/devnet-service.ts @@ -1,6 +1,18 @@ -import { DevNetServiceConfig, services } from "@devnet/service"; -import chalk from "chalk"; -import { ExecaMethod, execa } from "execa"; +import { + ContainerInfo, + PublicPortInfo, + getContainersByServiceLabels, + getContainersByServiceLabelsOrNull, + getServiceInfo, + getServiceInfoByLabel, +} from "@devnet/docker"; +import { DevNetLogger } from "@devnet/logger"; +import { + Network, + NetworkArtifactRoot, + Path, +} from "@devnet/types"; +import { assert } from "@devnet/utils"; import { access, constants, @@ -11,66 +23,50 @@ import { import path from "node:path"; import * as YAML from "yaml"; -import { assert } from "../assert.js"; -import { - ContainerInfo, - PublicPortInfo, - getContainersByServiceLabels, - getContainersByServiceLabelsOrNull, - getServiceInfo, - getServiceInfoByLabel, -} from "../docker/index.js"; -import { DevNetLogger } from "../logger.js"; -import { - applyColor, - getCachedColor, - // getSeparator, - // transformCMDOutput, -} from "../ui.js"; -import { ServiceArtifact } from "./service-artifact.js"; - -type DevNetServices = typeof services; -export class DevNetService { - public artifact: ServiceArtifact; - public config: DevNetServiceConfig; - public sh!: ExecaMethod<{ - cwd: string; - env: Record | undefined; - stdio: "inherit"; - }>; +import { DevnetServiceArtifact } from "./devnet-service-artifact.js"; +import { serviceConfigs } from "./embedded/index.js"; +import { DevNetServicesConfigs } from "./services-configs.js"; +import { createShellWrapper } from "./shell-wrapper.js"; + + +export class DevNetService { + public artifact: DevnetServiceArtifact; + public config: DevNetServicesConfigs[Name]; + + public sh: ReturnType>; private commandName: string; private logger: DevNetLogger; + private network: Network; - private network: string; - constructor( + protected constructor( name: Name, - network: string, + network: Network, logger: DevNetLogger, commandName: string, - artifact: ServiceArtifact, + artifact: DevnetServiceArtifact, ) { - this.config = services[name]; + this.config = serviceConfigs[name]; this.artifact = artifact; this.network = network; this.commandName = commandName; this.logger = logger; - this.createShellWrapper(); + this.sh = createShellWrapper(this.config, this.artifact, network, commandName); } - static async getNew( - rootPath: string, - network: string, + public static async create( + networkArtifactRootPath: NetworkArtifactRoot, + network: Network, logger: DevNetLogger, commandName: string, name: Name, ): Promise> { - const artifact = await ServiceArtifact.getNew( - rootPath, - services[name], + const artifact = await DevnetServiceArtifact.create( + networkArtifactRootPath, + serviceConfigs[name], logger, ); const service = new DevNetService( @@ -100,7 +96,7 @@ export class DevNetService { ); } - public async fileExists(relativePath: string): Promise { + public async fileExists(relativePath: string | Path): Promise { const servicePath = this.artifact.root; const fullPath = path.join(servicePath, relativePath); @@ -116,25 +112,25 @@ export class DevNetService { must: M = true as M, ): Promise< M extends true - ? Record - : Record | null + ? Record + : Record | null > { const { labels } = this.config; - + // todo: make something with dora if (must) { - return await getContainersByServiceLabels( + return await getContainersByServiceLabels( labels, `kt-${this.network}`, ); } const result = await getContainersByServiceLabelsOrNull< - DevNetServices[Name]["labels"] + DevNetServicesConfigs[Name]["labels"] >(labels, `kt-${this.network}`); return result as M extends true - ? Record - : Record | null; + ? Record + : Record | null; } public async getDockerServiceInfoByLabel(labelKey: string, label: string) { @@ -197,7 +193,7 @@ export class DevNetService { } public async readYaml(relativePath: string) { - return YAML.parse(await this.readFile(relativePath)); + return YAML.parse(await this.readFile(relativePath), { intAsBigInt: true }); } public async writeENV(relativePath: string, env: Record) { @@ -233,80 +229,4 @@ export class DevNetService { public async writeYaml(relativePath: string, fileContent: unknown) { return await this.writeFile(relativePath, YAML.stringify(fileContent)); } - - private createShellWrapper() { - const { env } = this.config; - const { root: cwd } = this.artifact; - const { name } = this.config; - const { network } = this; - - const nestedCommandColor = getCachedColor(`${network}/${name}`); - const commandColor = DevNetLogger.getColor(network, this.commandName); - const sh = execa({ - cwd, - env, - shell: true, - stdout: [ - // async function* (chunk: any) { - // yield* transformCMDOutput(color, "||", chunk); - // }, - "inherit", - ], - stderr: [ - // async function* (chunk: any) { - // yield* transformCMDOutput(color, "||", chunk); - // }, - "inherit", - ], - stdin: "inherit", - - verbose(_: any, { ...verboseObject }: any) { - if (verboseObject.type === "command") { - // console.log(`${getSeparator(commandColor, "||")}`); - console.log( - applyColor( - commandColor, - `\\\\ [${`${network}/${name}`}]: ${applyColor(nestedCommandColor, verboseObject.escapedCommand)}`, - ), - ); - - // return console.log(getSeparator(color, "||")); - return console.log(); - } - - if (verboseObject.type === "duration") { - console.log(); - // console.log(verboseObject.result.failed); - if (verboseObject.result.failed) { - // console.log(verboseObject.result) - // shortMessage - // const errorMessage = verboseObject.result.stderr.replaceAll(getSeparator(color, '||'), '') - // console.log(`${getSeparator(color, '||')}${chalk.red(errorMessage)}`) - // console.log( - // `${getSeparator(commandColor, "||")} ${chalk.red(verboseObject.result.shortMessage)}`, - // ); - console.log(`${chalk.red(verboseObject.result.shortMessage)}`); - console.log(); - } - - // console.log(getSeparator(color, "||")); - // console.log(); - - const ms = Math.floor(verboseObject.result.durationMs); - return console.log( - applyColor( - commandColor, - `// [${`${network}/${name}`}]: ${applyColor(nestedCommandColor, `${verboseObject.escapedCommand} finished in ${ms}ms`)}`, - ), - ); - } - }, - }); - - this.sh = sh as unknown as ExecaMethod<{ - cwd: string; - env: Record | undefined; - stdio: "inherit"; - }>; - } } diff --git a/packages/services/src/embedded/assertoor.ts b/packages/services/src/embedded/assertoor.ts new file mode 100644 index 00000000..80e25ea8 --- /dev/null +++ b/packages/services/src/embedded/assertoor.ts @@ -0,0 +1,9 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const assertoor = new DevnetServiceConfig({ + workspace: "workspaces/assertoor", + name: "assertoor" as const, + constants: {}, + labels: { api: "devnet_service_name=assertoorApi" }, + getters: {}, +}); diff --git a/packages/services/src/embedded/blockscout.ts b/packages/services/src/embedded/blockscout.ts new file mode 100644 index 00000000..51b14348 --- /dev/null +++ b/packages/services/src/embedded/blockscout.ts @@ -0,0 +1,9 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const blockscout = new DevnetServiceConfig({ + workspace: "workspaces/blockscout", + name: "blockscout" as const, + constants: {}, + labels: { blockscout: "devnet_service_name=blockscout" }, + getters: {}, +}); diff --git a/packages/services/src/embedded/council-daemon.ts b/packages/services/src/embedded/council-daemon.ts new file mode 100644 index 00000000..c4e66c3a --- /dev/null +++ b/packages/services/src/embedded/council-daemon.ts @@ -0,0 +1,13 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const council = new DevnetServiceConfig({ + repository: { + url: "https://github.com/lidofinance/lido-council-daemon.git", + branch: "feat/devnet", + }, + workspace: "workspaces/council", + name: "council" as const, + constants: {}, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/embedded/csm-prover-tool.ts b/packages/services/src/embedded/csm-prover-tool.ts new file mode 100644 index 00000000..06a98842 --- /dev/null +++ b/packages/services/src/embedded/csm-prover-tool.ts @@ -0,0 +1,17 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const csmProverTool = new DevnetServiceConfig({ + repository: { + url: "git@github.com:lidofinance/csm-prover-tool.git", + branch: "develop", + }, + workspace: "workspaces/csm-prover-tool", + name: "csmProverTool" as const, + exposedPorts: [9030], + constants: { + LOG_FORMAT: "simple", + LOG_LEVEL: "debug", + }, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/embedded/csm.ts b/packages/services/src/embedded/csm.ts new file mode 100644 index 00000000..5172fb38 --- /dev/null +++ b/packages/services/src/embedded/csm.ts @@ -0,0 +1,26 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const csm = new DevnetServiceConfig({ + repository: { + url: "git@github.com:lidofinance/community-staking-module.git", + branch: "main", + }, + name: "csm" as const, + constants: { + FOUNDRY_PROFILE: "deploy", + DEPLOY_CONFIG: "artifacts/latest/deploy-local-devnet.json", + UPGRADE_CONFIG: "artifacts/latest/deploy-local-devnet.json", + VERIFIER_API_KEY: "local-testnet", + ARTIFACTS_DIR: "artifacts/latest/", + DEPLOYED_VERIFIER: "artifacts/latest/deploy-verifier-devnet.json", + CSM_STAKING_MODULE_ID: "3", + }, + env: { + CHAIN: "local-devnet", + }, + hooks: { + install: "csm:install", + }, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/embedded/data-bus.ts b/packages/services/src/embedded/data-bus.ts new file mode 100644 index 00000000..a2ddbb0b --- /dev/null +++ b/packages/services/src/embedded/data-bus.ts @@ -0,0 +1,14 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const dataBus = new DevnetServiceConfig({ + repository: { + url: "https://github.com/lidofinance/data-bus.git", + branch: "feat/devnet", + }, + name: "dataBus" as const, + constants: { + DEPLOYED_FILE: "deployed/local-devnet.json", + }, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/embedded/docker-registry.ts b/packages/services/src/embedded/docker-registry.ts new file mode 100644 index 00000000..6f9241e4 --- /dev/null +++ b/packages/services/src/embedded/docker-registry.ts @@ -0,0 +1,10 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + + +export const dockerRegistry = new DevnetServiceConfig({ + workspace: "workspaces/docker-registry", + name: "dockerRegistry" as const, + constants: {}, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/embedded/dsm-bots.ts b/packages/services/src/embedded/dsm-bots.ts new file mode 100644 index 00000000..e0f0ecb9 --- /dev/null +++ b/packages/services/src/embedded/dsm-bots.ts @@ -0,0 +1,13 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const dsmBots = new DevnetServiceConfig({ + repository: { + url: "https://github.com/lidofinance/depositor-bot.git", + branch: "feat/devnet", + }, + workspace: "workspaces/dsm-bots", + name: "dsmBots" as const, + constants: {}, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/embedded/index.ts b/packages/services/src/embedded/index.ts new file mode 100644 index 00000000..b65e5998 --- /dev/null +++ b/packages/services/src/embedded/index.ts @@ -0,0 +1,41 @@ +import { assertoor } from "./assertoor.js"; +import { blockscout } from "./blockscout.js"; +import { council } from "./council-daemon.js"; +import { csm } from "./csm.js"; +import { csmProverTool } from "./csm-prover-tool.js"; +import { dataBus } from "./data-bus.js"; +import { dockerRegistry } from "./docker-registry.js"; +import { dsmBots } from "./dsm-bots.js"; +import { kapi } from "./kapi.js"; +import { kubo } from "./kubo.js"; +import { kurtosis } from "./kurtosis.js" +import { lateProverBot } from "./late-prover-bot.js"; +import { lidoCLI } from "./lido-cli.js"; +import { lidoCore } from "./lido-core.js"; +import { noWidget } from "./no-widget.js"; +import { noWidgetBackend } from "./no-widget-backend.js"; +import { oracle } from "./oracle.js"; +import { voting } from "./voting.js"; + +export const serviceConfigs = { + blockscout, + lateProverBot, + lidoCore, + lidoCLI, + kurtosis, + csm, + csmProverTool, + kapi, + oracle, + voting, + assertoor, + council, + dataBus, + dsmBots, + dockerRegistry, + kubo, + noWidgetBackend, + noWidget, +}; + +export type EmbeddedServicesConfigs = typeof serviceConfigs; diff --git a/packages/services/src/embedded/kapi.ts b/packages/services/src/embedded/kapi.ts new file mode 100644 index 00000000..539a6d00 --- /dev/null +++ b/packages/services/src/embedded/kapi.ts @@ -0,0 +1,28 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const kapi = new DevnetServiceConfig({ + repository: { + url: "https://github.com/lidofinance/lido-keys-api.git", + branch: "feat/devnet", + }, + workspace: "workspaces/kapi", + name: "kapi" as const, + exposedPorts: [9030], + constants: { + DB_HOST: "127.0.0.1", + DB_NAME: "node_operator_keys_service_db", + DB_PASSWORD: "postgres", + DB_PORT: "5432", + DB_USER: "postgres", + LOG_FORMAT: "simple", + LOG_LEVEL: "debug", + MIKRO_ORM_DISABLE_FOREIGN_KEYS: "false", + PORT: "9030", + PROVIDER_BATCH_AGGREGATION_WAIT_MS: "10", + PROVIDER_CONCURRENT_REQUESTS: "1", + PROVIDER_JSON_RPC_MAX_BATCH_SIZE: "100", + VALIDATOR_REGISTRY_ENABLE: "false", + }, + labels: { kapi: "devnet_service_name=kapi" }, + getters: {}, +}); diff --git a/packages/services/src/embedded/kubo.ts b/packages/services/src/embedded/kubo.ts new file mode 100644 index 00000000..8795fe9d --- /dev/null +++ b/packages/services/src/embedded/kubo.ts @@ -0,0 +1,9 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const kubo = new DevnetServiceConfig({ + workspace: "workspaces/kubo", + name: "kubo" as const, + constants: {}, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/embedded/kurtosis.ts b/packages/services/src/embedded/kurtosis.ts new file mode 100644 index 00000000..85d5e73d --- /dev/null +++ b/packages/services/src/embedded/kurtosis.ts @@ -0,0 +1,33 @@ +import { toHex, getAddress } from 'viem' + +import { DevnetServiceConfig } from "../devnet-service-config.js"; + + +export const kurtosis = new DevnetServiceConfig({ + workspace: "workspaces/kurtosis", + name: "kurtosis" as const, + constants: {}, + labels: { + dora: "service_name=dora", + el: "com.kurtosistech.custom.ethereum-package.client-type=execution", + cl: "com.kurtosistech.custom.ethereum-package.client-type=beacon", + vc: "com.kurtosistech.custom.ethereum-package.client-type=validator", + }, + getters: { + async DEPOSIT_CONTRACT_ADDRESS(service) { + const json = await service.readJson('network/genesis.json'); + + return getAddress(json?.config?.depositContractAddress ?? ''); + }, + async GENESIS_TIME(service) { + const json = await service.readJson('network/genesis.json'); + + return Number(json.timestamp as number); + }, + async GENESIS_FORK_VERSION(service) { + const yaml = await service.readYaml('network/config.yaml'); + + return toHex(yaml.GENESIS_FORK_VERSION as number); + } + } +}); diff --git a/packages/services/src/embedded/late-prover-bot.ts b/packages/services/src/embedded/late-prover-bot.ts new file mode 100644 index 00000000..03a43763 --- /dev/null +++ b/packages/services/src/embedded/late-prover-bot.ts @@ -0,0 +1,17 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const lateProverBot = new DevnetServiceConfig({ + repository: { + url: "git@github.com:lidofinance/late-prover-bot.git", + branch: "develop", + }, + workspace: "workspaces/late-prover-bot", + name: "lateProverBot" as const, + exposedPorts: [9030], + constants: { + LOG_FORMAT: "simple", + LOG_LEVEL: "debug", + }, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/embedded/lido-cli.ts b/packages/services/src/embedded/lido-cli.ts new file mode 100644 index 00000000..be6eefff --- /dev/null +++ b/packages/services/src/embedded/lido-cli.ts @@ -0,0 +1,24 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const lidoCLI = new DevnetServiceConfig({ + repository: { + url: "https://github.com/lidofinance/lido-cli.git", + branch: "fix/vroom-306-temp-fix-fusaka-1", + }, + name: "lidoCLI" as const, + constants: { + DEPLOYED_NETWORK_CONFIG_PATH: "configs/deployed-local-devnet.json", + DEPLOYED_NETWORK_CONFIG_NAME: "deployed-local-devnet.json", + DEPLOYED_NETWORK_CONFIG_EXTRA_PATH: + "configs/extra-deployed-local-devnet.json", + ENV_CONFIG_PATH: ".env", + }, + env: { + LIDO_CLI_NON_INTERACTIVE: "true", + }, + hooks: { + install: "lido-cli:install", + }, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/embedded/lido-core.ts b/packages/services/src/embedded/lido-core.ts new file mode 100644 index 00000000..9a0fbce6 --- /dev/null +++ b/packages/services/src/embedded/lido-core.ts @@ -0,0 +1,25 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const lidoCore = new DevnetServiceConfig({ + repository: { + url: "git@github.com:lidofinance/core.git", + branch: "develop", + }, + name: "lidoCore" as const, + constants: { + DEPLOYED: "deployed-local-devnet.json", + EL_NETWORK_NAME: "local-devnet", + GAS_MAX_FEE: "100", + GAS_PRIORITY_FEE: "1", + NETWORK: "local-devnet", + NETWORK_STATE_DEFAULTS_FILE: + "scripts/scratch/deployed-testnet-defaults.json", + NETWORK_STATE_FILE: `deployed-local-devnet.json`, + SLOTS_PER_EPOCH: "32", + }, + hooks: { + install: "lido-core:install", + }, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/embedded/no-widget-backend.ts b/packages/services/src/embedded/no-widget-backend.ts new file mode 100644 index 00000000..a4ac73d1 --- /dev/null +++ b/packages/services/src/embedded/no-widget-backend.ts @@ -0,0 +1,23 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const noWidgetBackend = new DevnetServiceConfig({ + repository: { + url: "git@github.com:lidofinance/node-operators-widget-backend-ts.git", + branch: "feat/fusaka-devnet", + }, + workspace: "workspaces/no-widget-backend", + name: "noWidgetBackend" as const, + constants: { + PORT: "3000", + NODE_ENV: "production", + LOG_FORMAT: "simple", + LOG_LEVEL: "debug", + CORS_WHITELIST_REGEXP: "", + GLOBAL_THROTTLE_TTL: "5", + GLOBAL_THROTTLE_LIMIT: "100", + GLOBAL_CACHE_TTL: "1", + SENTRY_DSN: "", + }, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/embedded/no-widget.ts b/packages/services/src/embedded/no-widget.ts new file mode 100644 index 00000000..778a9d54 --- /dev/null +++ b/packages/services/src/embedded/no-widget.ts @@ -0,0 +1,13 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const noWidget = new DevnetServiceConfig({ + repository: { + url: "git@github.com:lidofinance/node-operators-widget.git", + branch: "feat/fusaka-devnet", + }, + workspace: "workspaces/no-widget", + name: "noWidget" as const, + constants: {}, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/embedded/oracle.ts b/packages/services/src/embedded/oracle.ts new file mode 100644 index 00000000..c6a5f566 --- /dev/null +++ b/packages/services/src/embedded/oracle.ts @@ -0,0 +1,17 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const oracle = new DevnetServiceConfig({ + repository: { + url: "https://github.com/lidofinance/lido-oracle.git", + branch: "fix/vroom-306-temp-fix-fusaka-1", + }, + workspace: "workspaces/oracle", + name: "oracle" as const, + constants: { + HASH_CONSENSUS_AO_EPOCHS_PER_FRAME: 8, + HASH_CONSENSUS_VEBO_EPOCHS_PER_FRAME: 8, + HASH_CONSENSUS_CSM_EPOCHS_PER_FRAME: 24 + }, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/embedded/voting.ts b/packages/services/src/embedded/voting.ts new file mode 100644 index 00000000..8cd0abf3 --- /dev/null +++ b/packages/services/src/embedded/voting.ts @@ -0,0 +1,12 @@ +import { DevnetServiceConfig } from "../devnet-service-config.js"; + +export const voting = new DevnetServiceConfig({ + repository: { + url: "https://github.com/lidofinance/scripts.git", + branch: "feat/pectra-devnet", + }, + name: "voting" as const, + constants: {}, + labels: {}, + getters: {}, +}); diff --git a/packages/services/src/index.ts b/packages/services/src/index.ts index e0aa6fc7..fd307746 100644 --- a/packages/services/src/index.ts +++ b/packages/services/src/index.ts @@ -1,199 +1,3 @@ -import { DevNetServiceConfig } from "./service.js"; - -const blockscout = new DevNetServiceConfig({ - workspace: "workspaces/blockscout", - name: "blockscout" as const, - exposedPorts: [80], - constants: {}, - labels: { blockscout: "devnet_service_name=blockscout" }, -}); - -const lidoCore = new DevNetServiceConfig({ - repository: { - url: "https://github.com/lidofinance/core.git", - branch: "develop", - }, - name: "lidoCore" as const, - constants: { - DEPLOYED: "deployed-local-devnet.json", - EL_NETWORK_NAME: "local-devnet", - DEPOSIT_CONTRACT: "0x00000000219ab540356cBB839Cbe05303d7705Fa", - GAS_MAX_FEE: "100", - GAS_PRIORITY_FEE: "1", - NETWORK: "local-devnet", - NETWORK_STATE_DEFAULTS_FILE: - "scripts/scratch/deployed-testnet-defaults.json", - NETWORK_STATE_FILE: `deployed-local-devnet.json`, - SLOTS_PER_EPOCH: "32", - }, - hooks: { - install: "lido-core:install", - }, - labels: {}, -}); - -const lidoCLI = new DevNetServiceConfig({ - repository: { - url: "https://github.com/lidofinance/lido-cli.git", - branch: "feature/devnet-command", - }, - name: "lidoCLI" as const, - constants: { - DEPLOYED_NETWORK_CONFIG_PATH: "configs/deployed-local-devnet.json", - DEPLOYED_NETWORK_CONFIG_NAME: "deployed-local-devnet.json", - DEPLOYED_NETWORK_CONFIG_EXTRA_PATH: - "configs/extra-deployed-local-devnet.json", - ENV_CONFIG_PATH: ".env", - }, - env: { - LIDO_CLI_NON_INTERACTIVE: "true", - }, - hooks: { - install: "lido-cli:install", - }, - labels: {}, -}); - -const kurtosis = new DevNetServiceConfig({ - workspace: "workspaces/kurtosis", - name: "kurtosis" as const, - constants: {}, - labels: { - dora: "service_name=dora", - el: "com.kurtosistech.custom.ethereum-package.client-type=execution", - cl: "com.kurtosistech.custom.ethereum-package.client-type=beacon", - vc: "com.kurtosistech.custom.ethereum-package.client-type=validator", - }, -}); - -const voting = new DevNetServiceConfig({ - repository: { - url: "https://github.com/lidofinance/scripts.git", - branch: "feat/pectra-devnet", - }, - name: "voting" as const, - constants: {}, - labels: {}, -}); - -const assertoor = new DevNetServiceConfig({ - workspace: "workspaces/assertoor", - name: "assertoor" as const, - constants: {}, - labels: { api: "devnet_service_name=assertoorApi" }, -}); - -const kapi = new DevNetServiceConfig({ - repository: { - url: "https://github.com/lidofinance/lido-keys-api.git", - branch: "feat/devnet", - }, - workspace: "workspaces/kapi", - name: "kapi" as const, - exposedPorts: [9030], - constants: { - DB_HOST: "127.0.0.1", - DB_NAME: "node_operator_keys_service_db", - DB_PASSWORD: "postgres", - DB_PORT: "5432", - DB_USER: "postgres", - LOG_FORMAT: "simple", - LOG_LEVEL: "debug", - MIKRO_ORM_DISABLE_FOREIGN_KEYS: "false", - PORT: "9030", - PROVIDER_BATCH_AGGREGATION_WAIT_MS: "10", - PROVIDER_CONCURRENT_REQUESTS: "1", - PROVIDER_JSON_RPC_MAX_BATCH_SIZE: "100", - VALIDATOR_REGISTRY_ENABLE: "false", - }, - labels: { kapi: "devnet_service_name=kapi" }, -}); - -const oracle = new DevNetServiceConfig({ - repository: { - url: "https://github.com/lidofinance/lido-oracle.git", - branch: "feat/oracle-v6", - }, - workspace: "workspaces/oracle-v6", - name: "oracle" as const, - constants: { - HASH_CONSENSUS_AO_EPOCHS_PER_FRAME: 8, - HASH_CONSENSUS_VEBO_EPOCHS_PER_FRAME: 8, - HASH_CONSENSUS_CSM_EPOCHS_PER_FRAME: 24 - }, - labels: {}, -}); - -const council = new DevNetServiceConfig({ - repository: { - url: "https://github.com/lidofinance/lido-council-daemon.git", - branch: "feat/devnet", - }, - workspace: "workspaces/council", - name: "council" as const, - constants: {}, - labels: {}, -}); - -const dataBus = new DevNetServiceConfig({ - repository: { - url: "https://github.com/lidofinance/data-bus.git", - branch: "feat/devnet", - }, - name: "dataBus" as const, - constants: { - DEPLOYED_FILE: "deployed/local-devnet.json", - }, - labels: {}, -}); - -const dsmBots = new DevNetServiceConfig({ - repository: { - url: "https://github.com/lidofinance/depositor-bot.git", - branch: "feat/devnet", - }, - workspace: "workspaces/dsm-bots", - name: "dsmBots" as const, - constants: {}, - labels: {}, -}); - -const csm = new DevNetServiceConfig({ - repository: { - url: "https://github.com/lidofinance/community-staking-module.git", - branch: "tags/v1", - }, - name: "csm" as const, - constants: { - FOUNDRY_PROFILE: "deploy", - DEPLOY_CONFIG: "artifacts/latest/deploy-local-devnet.json", - UPGRADE_CONFIG: "artifacts/latest/deploy-local-devnet.json", - VERIFIER_API_KEY: "local-testnet", - ARTIFACTS_DIR: "artifacts/latest/", - DEPLOYED_VERIFIER: "artifacts/latest/deploy-verifier-devnet.json", - }, - env: { - CHAIN: "local-devnet", - }, - hooks: { - install: "csm:install", - }, - labels: {}, -}); - -export const services = { - blockscout, - lidoCore, - lidoCLI, - kurtosis, - csm, - kapi, - oracle, - voting, - assertoor, - council, - dataBus, - dsmBots, -}; - -export { DevNetServiceConfig } from "./service.js"; +export { DevnetServiceConfig } from "./devnet-service-config.js"; +export * from './embedded/index.js'; +export { DevnetServiceRegistry } from "./service-registry.js"; diff --git a/packages/services/src/service-getter.ts b/packages/services/src/service-getter.ts new file mode 100644 index 00000000..8cdb19ce --- /dev/null +++ b/packages/services/src/service-getter.ts @@ -0,0 +1,4 @@ +import { DevNetService } from "./devnet-service.js"; + +export type ServiceGetter = + (dre: DevNetService) => (Promise | unknown) diff --git a/packages/services/src/service-registry.ts b/packages/services/src/service-registry.ts new file mode 100644 index 00000000..09dbecda --- /dev/null +++ b/packages/services/src/service-registry.ts @@ -0,0 +1,82 @@ +import { DevNetLogger } from "@devnet/logger"; +import { serviceConfigs } from "@devnet/service"; +import { Network, NetworkArtifactRoot } from "@devnet/types"; +import { mkdir, rm } from "node:fs/promises"; +import path from "node:path"; + +import { ARTIFACTS_ROOT } from "./constants.js"; +import { DevNetService } from "./devnet-service.js"; +import { DevNetServicesConfigs } from "./services-configs.js"; + + +export class DevnetServiceRegistry { + protected readonly network: Network; + public readonly root: NetworkArtifactRoot; + public readonly services: { [K in keyof DevNetServicesConfigs]: DevNetService }; + + protected constructor( + network: Network, + root: NetworkArtifactRoot, + services: { [K in keyof DevNetServicesConfigs]: DevNetService }, + ) { + this.root = root; + this.network = network; + this.services = services; + } + + public static async create( + network: Network, + commandName: string, + logger: DevNetLogger, + ): Promise { + await this.createRootDir(network); + const rootDir = this.getRoot(network); + + const servicesList = await Promise.all( + Object.entries(serviceConfigs).map(async ([key]) => [ + key, + await DevNetService.create( + rootDir, + network, + logger, + commandName, + key as keyof DevNetServicesConfigs, + ), + ]), + ); + + return new DevnetServiceRegistry( + network, + rootDir, + Object.fromEntries(servicesList) as { + [K in keyof DevNetServicesConfigs]: DevNetService; + }, + ); + } + + protected static async createRootDir(network: Network) { + await mkdir(this.getRoot(network), { recursive: true }); + } + + protected static getRoot(network: Network): NetworkArtifactRoot { + return NetworkArtifactRoot.parse(path.join(ARTIFACTS_ROOT, network)); + } + + public async clean() { + if (this.root === path.sep) { + return; + } + + await rm(this.root, { force: true, recursive: true }); + } + + public clone(commandName: string, logger: DevNetLogger) { + const clonedServices = Object.fromEntries( + Object.entries(this.services).map(([key, service]) => [ + key, + service.clone(commandName, logger), + ]) + ) as { [K in keyof DevNetServicesConfigs]: DevNetService }; + return new DevnetServiceRegistry(this.network, this.root, clonedServices); + } +} diff --git a/packages/services/src/services-configs.ts b/packages/services/src/services-configs.ts new file mode 100644 index 00000000..ed8c7691 --- /dev/null +++ b/packages/services/src/services-configs.ts @@ -0,0 +1,5 @@ +import { EmbeddedServicesConfigs } from "@devnet/service"; + +export interface DevNetServicesConfigs extends EmbeddedServicesConfigs { + // augmented somewhere +} diff --git a/packages/services/src/shell-wrapper.ts b/packages/services/src/shell-wrapper.ts new file mode 100644 index 00000000..da65503a --- /dev/null +++ b/packages/services/src/shell-wrapper.ts @@ -0,0 +1,89 @@ +import { DevNetLogger } from "@devnet/logger"; +import { Network } from "@devnet/types"; +import { applyColor, getCachedColor } from "@devnet/ui"; +import chalk from "chalk"; +import { ExecaMethod, execa } from "execa"; + +import { DevnetServiceArtifact } from "./devnet-service-artifact.js"; +import { DevNetServicesConfigs } from "./services-configs.js"; + +export const createShellWrapper = ( + serviceConfig: DevNetServicesConfigs[Name], + serviceArtifact: DevnetServiceArtifact, + network: Network, + commandName: string +) => { + const { env, name } = serviceConfig; + const { root: serviceArtifactRoot } = serviceArtifact; + + const nestedCommandColor = getCachedColor(`${network}/${name}`); + const commandColor = DevNetLogger.getColor(network, commandName); + + + const sh = execa({ + cwd: serviceArtifactRoot, + env, + shell: true, + stdout: [ + // async function* (chunk: any) { + // yield* transformCMDOutput(color, "||", chunk); + // }, + "inherit", + ], + stderr: [ + // async function* (chunk: any) { + // yield* transformCMDOutput(color, "||", chunk); + // }, + "inherit", + ], + stdin: "inherit", + + verbose(_: any, { type, ...verboseObject }: any) { + if (type === "command") { + // console.log(`${getSeparator(commandColor, "||")}`); + console.log( + applyColor( + commandColor, + `\\\\ [${`${network}/${name}`}]: ${applyColor(nestedCommandColor, verboseObject.escapedCommand)}`, + ), + ); + + // return console.log(getSeparator(color, "||")); + return console.log(); + } + + if (type === "duration") { + console.log(); + // console.log(verboseObject.result.failed); + if (verboseObject.result.failed) { + // console.log(verboseObject.result) + // shortMessage + // const errorMessage = verboseObject.result.stderr.replaceAll(getSeparator(color, '||'), '') + // console.log(`${getSeparator(color, '||')}${chalk.red(errorMessage)}`) + // console.log( + // `${getSeparator(commandColor, "||")} ${chalk.red(verboseObject.result.shortMessage)}`, + // ); + console.log(`${chalk.red(verboseObject.result.shortMessage)}`); + console.log(); + } + + // console.log(getSeparator(color, "||")); + // console.log(); + + const ms = Math.floor(verboseObject.result.durationMs); + return console.log( + applyColor( + commandColor, + `// [${`${network}/${name}`}]: ${applyColor(nestedCommandColor, `${verboseObject.escapedCommand} finished in ${ms}ms`)}`, + ), + ); + } + }, + }); + + return sh as unknown as ExecaMethod<{ + cwd: string; + env: Record | undefined; + stdio: "inherit"; + }>; +} diff --git a/packages/services/tsconfig.tsbuildinfo b/packages/services/tsconfig.tsbuildinfo index 9713ecb5..76b206fd 100644 --- a/packages/services/tsconfig.tsbuildinfo +++ b/packages/services/tsconfig.tsbuildinfo @@ -1 +1 @@ -{"root":["./src/index.ts","./src/service.ts"],"version":"5.7.3"} \ No newline at end of file +{"root":["./src/constants.ts","./src/devnet-service-artifact.ts","./src/devnet-service-config.ts","./src/devnet-service.ts","./src/index.ts","./src/service-getter.ts","./src/service-registry.ts","./src/services-configs.ts","./src/shell-wrapper.ts","./src/embedded/assertoor.ts","./src/embedded/blockscout.ts","./src/embedded/council-daemon.ts","./src/embedded/csm-prover-tool.ts","./src/embedded/csm.ts","./src/embedded/data-bus.ts","./src/embedded/docker-registry.ts","./src/embedded/dsm-bots.ts","./src/embedded/index.ts","./src/embedded/kapi.ts","./src/embedded/kubo.ts","./src/embedded/kurtosis.ts","./src/embedded/late-prover-bot.ts","./src/embedded/lido-cli.ts","./src/embedded/lido-core.ts","./src/embedded/no-widget-backend.ts","./src/embedded/no-widget.ts","./src/embedded/oracle.ts","./src/embedded/voting.ts"],"version":"5.7.3"} \ No newline at end of file diff --git a/packages/state/package.json b/packages/state/package.json index 0c8172c1..84cffb68 100644 --- a/packages/state/package.json +++ b/packages/state/package.json @@ -14,6 +14,8 @@ }, "dependencies": { "@devnet/keygen": "workspace:*", + "@devnet/types": "workspace:*", + "@devnet/utils": "workspace:*", "@types/node": "^22.10.5", "typescript": "^5", "zod": "^3.24.1" diff --git a/packages/state/src/base-state.ts b/packages/state/src/base-state.ts index 4d166283..6fa03c54 100644 --- a/packages/state/src/base-state.ts +++ b/packages/state/src/base-state.ts @@ -1,3 +1,4 @@ +import { ChainRoot, NetworkArtifactRoot } from "@devnet/types"; import path from "node:path"; import { ZodSchema, z } from "zod"; @@ -5,12 +6,12 @@ import { JsonDb } from "./json-db/index.js"; import { Config, ConfigValidator } from "./schemas.js"; export abstract class BaseState { - protected appState: JsonDb; - protected config: Config; + protected readonly config: Config; protected parsedConsensusGenesisState: JsonDb; protected validators: JsonDb; + private appState: JsonDb; - constructor(rawConfig: unknown, artifactsRoot: string, chainRoot: string) { + public constructor(rawConfig: unknown, artifactsRoot: NetworkArtifactRoot, chainRoot: ChainRoot) { this.config = ConfigValidator.validate(rawConfig); this.appState = new JsonDb(path.join(artifactsRoot, "state.json")); this.parsedConsensusGenesisState = new JsonDb( @@ -20,19 +21,23 @@ export abstract class BaseState { } protected async getProperties( - keys: { [K in keyof T]: string }, + keysOrRootKey: { [K in keyof T]: string } | string, group: keyof Config, schema: ZodSchema, must: M, ): Promise> { const reader = await this.appState.getReader(); - const result: Partial = {}; + let result: Partial = {}; const groupConfig = this.config[group] || {}; - for (const key in keys) { - if (Object.hasOwn(keys, key)) { - const dbPath = keys[key]; - result[key] = (groupConfig as any)[key] ?? reader.get(dbPath); + if (typeof keysOrRootKey === "string") { + result = reader.get(keysOrRootKey) + } else { + for (const key in keysOrRootKey) { + if (Object.hasOwn(keysOrRootKey, key)) { + const dbPath = keysOrRootKey[key]; + result[key] = (groupConfig as any)[key] ?? reader.get(dbPath); + } } } diff --git a/packages/state/src/constants.ts b/packages/state/src/constants.ts index c52eeaf0..8c6698a9 100644 --- a/packages/state/src/constants.ts +++ b/packages/state/src/constants.ts @@ -4,4 +4,4 @@ export const PARSED_CONSENSUS_GENESIS_FILE = export const VALIDATORS_STATE = "validators/state.json"; export const WALLET_KEYS_COUNT = 20; -export const KURTOSIS_DEFAULT_PRESET = "pectra-devnet4"; + diff --git a/packages/state/src/index.ts b/packages/state/src/index.ts index d23f9d3b..ed467892 100644 --- a/packages/state/src/index.ts +++ b/packages/state/src/index.ts @@ -1,79 +1,39 @@ import { DepositData, DepositDataResult, Keystores } from "@devnet/keygen"; +import { ChainRoot, NetworkArtifactRoot } from "@devnet/types"; +import { isEmptyObject } from "@devnet/utils"; import { BaseState } from "./base-state.js"; import { WALLET_KEYS_COUNT } from "./constants.js"; import { - BlockScoutSchema, - CSMConfigSchema, - CSMNewVerifierSchema, - ChainConfigSchema, + ChainState, DataBusConfigSchema, - KurtosisSchema, - LidoConfigSchema, - NodesChainConfigSchema, ParsedConsensusGenesisStateSchema, WalletSchema, } from "./schemas.js"; import { sharedWallet } from "./shared-wallet.js"; import { generateKeysFromMnemonicOnce } from "./wallet/index.js"; +export { Config } from './schemas.js'; + +export interface StateInterface extends State { + // augmented in user code +} + export class State extends BaseState { - async getBlockScout(must: M = true as M) { - return this.getProperties( - { url: "blockscout.url", api: "blockscout.api" }, - "blockscout", - BlockScoutSchema, - must, - ); + public constructor(rawConfig: unknown, networkArtifactsRoot: NetworkArtifactRoot, chainRoot: ChainRoot) { + super(rawConfig, networkArtifactsRoot, chainRoot); } - async getChain(must: M = true as M) { - return this.getProperties( - { - clPrivate: "chain.binding.clNodesPrivate.0", - // clWsPrivate1: "chain.binding.clWsPrivate.1", - // ... - clPublic: "chain.binding.clNodes.0", - elPrivate: "chain.binding.elNodesPrivate.0", - elPublic: "chain.binding.elNodes.0", - elWsPublic: "chain.binding.elWs.0", - elWsPrivate: "chain.binding.elWsPrivate.0", - validatorsApi: "chain.binding.validatorsApi.0", - validatorsApiPrivate: "chain.binding.validatorsApiPrivate.0", - }, - "chain", - ChainConfigSchema, - must, - ); + async isChainDeployed() { + const state = await this.getChain(false); + return state && !isEmptyObject(state); } - async getNodes(must: M = true as M) { + async getChain(must: M = true as M) { return this.getProperties( - { - clNodesSpecs: "chain.binding.clNodesSpecs", - }, "chain", - NodesChainConfigSchema, - must, - ); - } - - async getCSM(must: M = true as M) { - return this.getProperties( - { - accounting: "csm.CSAccounting", - earlyAdoption: "csm.CSEarlyAdoption", - feeDistributor: "csm.CSFeeDistributor", - feeOracle: "csm.CSFeeOracle", - gateSeal: "csm.GateSeal", - hashConsensus: "csm.HashConsensus", - lidoLocator: "csm.LidoLocator", - module: "csm.CSModule", - verifier: "csm.CSVerifier", - permissionlessGate: "csm.PermissionlessGate", - }, - "csm", - CSMConfigSchema, + "chain", + ChainState, must, ); } @@ -99,43 +59,6 @@ export class State extends BaseState { return currentState?.keystores as Keystores[]; } - async getKurtosis() { - const { kurtosis } = this.config; - const loadConfig = await KurtosisSchema.parseAsync(kurtosis); - - return loadConfig; - } - - async getLido(must: M = true as M) { - return this.getProperties( - { - accountingOracle: "lidoCore.accountingOracle.proxy.address", - agent: "lidoCore.app:aragon-agent.proxy.address", - locator: "lidoCore.lidoLocator.proxy.address", - sanityChecker: "lidoCore.oracleReportSanityChecker.address", - tokenManager: "lidoCore.app:aragon-token-manager.proxy.address", - validatorExitBus: "lidoCore.validatorsExitBusOracle.proxy.address", - voting: "lidoCore.app:aragon-voting.proxy.address", - treasury: - "lidoCore.withdrawalVault.implementation.constructorArgs.1", - - stakingRouter: "lidoCore.stakingRouter.proxy.address", - curatedModule: "lidoCore.app:node-operators-registry.proxy.address", - acl: "lidoCore.aragon-acl.proxy.address", - oracleDaemonConfig: "lidoCore.oracleDaemonConfig.address", - withdrawalVault: "lidoCore.withdrawalVault.proxy.address", - withdrawalQueue: "lidoCore.withdrawalQueueERC721.proxy.address", - withdrawalVaultImpl: "lidoCore.withdrawalVault.implementation.address", - validatorExitBusImpl: "lidoCore.validatorsExitBusOracle.implementation.address", - withdrawalQueueImpl: "lidoCore.withdrawalQueueERC721.implementation.address", - finance: "lidoCore.app:aragon-finance.proxy.address" - }, - "lido", - LidoConfigSchema, - must, - ); - } - async getNamedWallet() { const [ deployer, @@ -160,17 +83,6 @@ export class State extends BaseState { }; } - async getNewVerifier(must: M = true as M) { - return this.getProperties( - { - CSVerifier: "electraVerifier.CSVerifier", - }, - "csm", - CSMNewVerifierSchema, - must, - ); - } - async getParsedConsensusGenesisState( must: M = true as M, ) { @@ -197,16 +109,14 @@ export class State extends BaseState { return WalletSchema.parseAsync(wallet ?? sharedWallet); } - async updateBlockScout(jsonData: unknown) { - await this.updateProperties("blockscout", jsonData); - } - async updateChain(jsonData: unknown) { - await this.updateProperties("chain", jsonData); + + async removeChain() { + await this.updateProperties("chain", {}); } - async updateCSM(jsonData: unknown) { - await this.updateProperties("csm", jsonData); + async updateChain(state: ChainState) { + await this.updateProperties("chain", state); } async updateDataBus(jsonData: unknown) { @@ -222,13 +132,7 @@ export class State extends BaseState { await this.validators.update(updated); } - async updateElectraVerifier(jsonData: unknown) { - await this.appState.update({ electraVerifier: jsonData }); - } - async updateLido(jsonData: unknown) { - await this.updateProperties("lidoCore", jsonData); - } async updateValidatorsData(newData: DepositDataResult) { const currentState = await this.validators.read(); diff --git a/packages/state/src/schemas.ts b/packages/state/src/schemas.ts index d6645cb5..84503461 100644 --- a/packages/state/src/schemas.ts +++ b/packages/state/src/schemas.ts @@ -1,13 +1,5 @@ import { z } from "zod"; -import { KURTOSIS_DEFAULT_PRESET } from "./constants.js"; - -export const BlockScoutSchema = z.object({ - url: z.string().url(), - // http://localhost:3080/api - api: z.string().url(), -}); - export const PortSchema = z.object({ publicPort: z.number().optional(), privatePort: z.number().optional(), @@ -27,99 +19,55 @@ export const NodesChainConfigSchema = z.object({ clNodesSpecs: z.array(ContainerInfoSchema), }); -export const ChainConfigSchema = z.object({ +export const ChainState = z.object({ clPrivate: z.string().url(), clPublic: z.string().url(), + elClientType: z.string(), // geth | reth | ... elPrivate: z.string().url(), elPublic: z.string().url(), elWsPublic: z.string().url(), elWsPrivate: z.string().url(), - validatorsApi: z.string().url(), + validatorsApiPublic: z.string().url(), validatorsApiPrivate: z.string().url(), }); +export type ChainState = z.infer; + export const ParsedConsensusGenesisStateSchema = z.object({ genesisValidatorsRoot: z.string(), genesisTime: z.string(), }); -export const LidoConfigSchema = z.object({ - accountingOracle: z.string(), - agent: z.string(), - locator: z.string(), - sanityChecker: z.string(), - tokenManager: z.string(), - validatorExitBus: z.string(), - voting: z.string(), - treasury: z.string(), - withdrawalVault: z.string(), - stakingRouter: z.string(), - curatedModule: z.string(), - acl: z.string(), - oracleDaemonConfig: z.string(), - withdrawalQueue: z.string(), - finance: z.string(), - - withdrawalVaultImpl: z.string(), - withdrawalQueueImpl: z.string(), - validatorExitBusImpl: z.string(), -}); - -export const CSMConfigSchema = z.object({ - accounting: z.string(), - earlyAdoption: z.string(), - feeDistributor: z.string(), - feeOracle: z.string(), - gateSeal: z.string(), - hashConsensus: z.string(), - lidoLocator: z.string(), - module: z.string(), - verifier: z.string(), - permissionlessGate: z.string(), -}); - export const DataBusConfigSchema = z.object({ address: z.string(), }); -export const CSMNewVerifierSchema = z.object({ - CSVerifier: z.string(), -}); + export const WalletSchema = z .array(z.object({ privateKey: z.string(), publicKey: z.string() })) .min(20, { message: "Wallet must have at least 20 items" }); -export const KurtosisSchema = z - .object({ preset: z.string() }) - .default({ preset: KURTOSIS_DEFAULT_PRESET }); - export const WalletMnemonic = z.string(); const ConfigSchema = z.object({ - chain: ChainConfigSchema.partial().optional(), - csm: CSMConfigSchema.partial().optional(), - lido: LidoConfigSchema.partial().optional(), + chain: ChainState.partial().optional(), wallet: WalletSchema.optional(), walletMnemonic: WalletMnemonic.optional(), parsedConsensusGenesisState: ParsedConsensusGenesisStateSchema.partial().optional(), - kurtosis: KurtosisSchema.optional(), - blockscout: BlockScoutSchema.optional(), dataBus: DataBusConfigSchema.optional(), }); -export type BlockScoutConfig = z.infer; -export type ChainConfig = z.infer; -export type LidoConfig = z.infer; +export type ChainConfig = z.infer; export type ParsedConsensusGenesisState = z.infer< typeof ParsedConsensusGenesisStateSchema >; -export type CSMConfig = z.infer; export type WalletConfig = z.infer; -export type KurtosisConfig = z.infer; -export type Config = z.infer; +export interface Config extends z.infer { + +} export const ConfigValidator = { validate(config: unknown): Config { diff --git a/packages/types/.eslintignore b/packages/types/.eslintignore new file mode 100644 index 00000000..4b2c927a --- /dev/null +++ b/packages/types/.eslintignore @@ -0,0 +1,3 @@ +submodules +dist +node_modules diff --git a/packages/types/.eslintrc b/packages/types/.eslintrc new file mode 100644 index 00000000..a0af25f7 --- /dev/null +++ b/packages/types/.eslintrc @@ -0,0 +1,22 @@ +{ + "extends": ["oclif", "oclif-typescript", "prettier"], + "overrides": [ + { + "files": ["src/**/*.ts", "tests/**/*.ts"], + "rules": { + "no-console": "off", + "no-await-in-loop": "off", + "no-promise-executor-return": "off", + "new-cap": "off", + "@typescript-eslint/no-explicit-any": "off", + "no-return-await": "off", + "unicorn/no-array-reduce": "off", + "unicorn/text-encoding-identifier-case": "off", + "unicorn/consistent-destructuring": "off", + "max-params":"off", + "unicorn/no-array-for-each": "off", + "perfectionist/sort-objects": "off" + }, + }, + ], +} diff --git a/packages/types/package.json b/packages/types/package.json new file mode 100644 index 00000000..80faf2e9 --- /dev/null +++ b/packages/types/package.json @@ -0,0 +1,33 @@ +{ + "name": "@devnet/types", + "version": "1.0.0", + "type": "module", + "main": "dist/index.js", + "types": "src/index.ts", + "scripts": { + "build": "rm -rf dist && rm -rf tsconfig.tsbuildinfo && tsc -b", + "build:types": "tsc --build --emitDeclarationOnly", + "lint": "eslint . --ext .ts && tsc --noEmit" + }, + "exports": { + ".": "./dist/index.js" + }, + "dependencies": { + "@oclif/core": "^4.0.37", + "@oclif/plugin-help": "^6.2.19", + "@types/node": "^22.10.5", + "dotenv": "^17.2.1", + "typescript": "^5" + }, + "devDependencies": { + "eslint": "^8", + "eslint-config-oclif": "^5", + "eslint-config-oclif-typescript": "^3", + "eslint-config-prettier": "^9", + "eslint-plugin-prettier": "^5.2.2", + "prettier": "^3.4.2" + }, + "engines": { + "node": ">=20" + } +} diff --git a/packages/types/src/artifact-root.ts b/packages/types/src/artifact-root.ts new file mode 100644 index 00000000..dffe1340 --- /dev/null +++ b/packages/types/src/artifact-root.ts @@ -0,0 +1,27 @@ +import { z } from "zod"; + +import { Path } from "./path.js"; + +/** + * ArtifactRoot = /artifacts/ + * NetworkArtifactRoot = // + * ServiceArtifactRoot = // + * ChainRoot = /// + */ + +export const ArtifactRootBrand = Symbol('ArtifactRoot'); +export const ChainRootBrand = Symbol('ChainRootBrand'); +export const NetworkArtifactRootBrand = Symbol('NetworkArtifactRootBrand'); +export const ServiceArtifactRootBrand = Symbol('ServiceArtifactRootBrand'); + +export const ArtifactRoot = Path.brand(ArtifactRootBrand); +export type ArtifactRoot = z.infer; + +export const NetworkArtifactRoot = Path.brand(NetworkArtifactRootBrand); +export type NetworkArtifactRoot = z.infer; + +export const ServiceArtifactRoot = Path.brand(ServiceArtifactRootBrand); +export type ServiceArtifactRoot = z.infer; + +export const ChainRoot = Path.brand(ChainRootBrand); +export type ChainRoot = z.infer; diff --git a/packages/types/src/index.ts b/packages/types/src/index.ts new file mode 100644 index 00000000..92b619f1 --- /dev/null +++ b/packages/types/src/index.ts @@ -0,0 +1,6 @@ +import * as dotenv from "dotenv"; +dotenv.config({ path: '.env' }); + +export * from './artifact-root.js'; +export * from './network.js'; +export * from './path.js'; diff --git a/packages/types/src/network.ts b/packages/types/src/network.ts new file mode 100644 index 00000000..ffd03f33 --- /dev/null +++ b/packages/types/src/network.ts @@ -0,0 +1,13 @@ +import { z } from "zod"; + +export const NetworkBrand = Symbol('NetworkBrand'); + +export const DEFAULT_NETWORK_NAME = process.env.DEVNET_NAME ?? 'my-devnet'; +export const NETWORK_NAME_SUBSTITUTION = '$(DEVNET_NAME)'; + +/** + * Unique name of the devnet network + * @see DEFAULT_NETWORK_NAME + */ +export const Network = z.string().brand(NetworkBrand); +export type Network = z.infer; diff --git a/packages/types/src/path.ts b/packages/types/src/path.ts new file mode 100644 index 00000000..953bc0f3 --- /dev/null +++ b/packages/types/src/path.ts @@ -0,0 +1,6 @@ +import { z } from "zod"; + +export const PathBrand = Symbol('PathBrand'); + +export const Path = z.string().brand(PathBrand); +export type Path = z.infer; diff --git a/packages/types/tsconfig.json b/packages/types/tsconfig.json new file mode 100644 index 00000000..9ed464ce --- /dev/null +++ b/packages/types/tsconfig.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "module": "Node16", + "outDir": "./dist", + "rootDir": "src", + "strict": true, + "target": "es2022", + "moduleResolution": "node16", + "resolveJsonModule": true, + "strictNullChecks": true, + + "declaration": true, + "declarationMap": true, + "sourceMap": true + + }, + "include": ["./src/**/*"], + "exclude": [ + "./dist/**/*" + ], + "ts-node": { + "esm": true + } +} diff --git a/packages/types/tsconfig.tsbuildinfo b/packages/types/tsconfig.tsbuildinfo new file mode 100644 index 00000000..3b5b324e --- /dev/null +++ b/packages/types/tsconfig.tsbuildinfo @@ -0,0 +1 @@ +{"root":["./src/artifact-root.ts","./src/index.ts","./src/network.ts","./src/path.ts"],"version":"5.7.3"} \ No newline at end of file diff --git a/packages/ui/.eslintignore b/packages/ui/.eslintignore new file mode 100644 index 00000000..4b2c927a --- /dev/null +++ b/packages/ui/.eslintignore @@ -0,0 +1,3 @@ +submodules +dist +node_modules diff --git a/packages/ui/.eslintrc b/packages/ui/.eslintrc new file mode 100644 index 00000000..a0af25f7 --- /dev/null +++ b/packages/ui/.eslintrc @@ -0,0 +1,22 @@ +{ + "extends": ["oclif", "oclif-typescript", "prettier"], + "overrides": [ + { + "files": ["src/**/*.ts", "tests/**/*.ts"], + "rules": { + "no-console": "off", + "no-await-in-loop": "off", + "no-promise-executor-return": "off", + "new-cap": "off", + "@typescript-eslint/no-explicit-any": "off", + "no-return-await": "off", + "unicorn/no-array-reduce": "off", + "unicorn/text-encoding-identifier-case": "off", + "unicorn/consistent-destructuring": "off", + "max-params":"off", + "unicorn/no-array-for-each": "off", + "perfectionist/sort-objects": "off" + }, + }, + ], +} diff --git a/packages/ui/package.json b/packages/ui/package.json new file mode 100644 index 00000000..d619d1f4 --- /dev/null +++ b/packages/ui/package.json @@ -0,0 +1,32 @@ +{ + "name": "@devnet/ui", + "version": "1.0.0", + "type": "module", + "main": "dist/index.js", + "types": "src/index.ts", + "scripts": { + "build": "rm -rf dist && rm -rf tsconfig.tsbuildinfo && tsc -b", + "build:types": "tsc --build --emitDeclarationOnly", + "lint": "eslint . --ext .ts && tsc --noEmit" + }, + "exports": { + ".": "./dist/index.js" + }, + "dependencies": { + "@oclif/core": "^4.0.37", + "@oclif/plugin-help": "^6.2.19", + "@types/node": "^22.10.5", + "typescript": "^5" + }, + "devDependencies": { + "eslint": "^8", + "eslint-config-oclif": "^5", + "eslint-config-oclif-typescript": "^3", + "eslint-config-prettier": "^9", + "eslint-plugin-prettier": "^5.2.2", + "prettier": "^3.4.2" + }, + "engines": { + "node": ">=20" + } +} diff --git a/packages/command/src/ui.ts b/packages/ui/src/index.ts similarity index 99% rename from packages/command/src/ui.ts rename to packages/ui/src/index.ts index da7a52d6..7215b091 100644 --- a/packages/command/src/ui.ts +++ b/packages/ui/src/index.ts @@ -1,7 +1,9 @@ /* eslint-disable no-bitwise */ import chalk from "chalk"; + export const applyColor = (color: string, text: string) => chalk.hex(color)(text); + export const getColorForText = (text: string): string => { let hash = 0; for (let i = 0; i < text.length; i++) { diff --git a/packages/ui/tsconfig.json b/packages/ui/tsconfig.json new file mode 100644 index 00000000..9ed464ce --- /dev/null +++ b/packages/ui/tsconfig.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "module": "Node16", + "outDir": "./dist", + "rootDir": "src", + "strict": true, + "target": "es2022", + "moduleResolution": "node16", + "resolveJsonModule": true, + "strictNullChecks": true, + + "declaration": true, + "declarationMap": true, + "sourceMap": true + + }, + "include": ["./src/**/*"], + "exclude": [ + "./dist/**/*" + ], + "ts-node": { + "esm": true + } +} diff --git a/packages/ui/tsconfig.tsbuildinfo b/packages/ui/tsconfig.tsbuildinfo new file mode 100644 index 00000000..44940f36 --- /dev/null +++ b/packages/ui/tsconfig.tsbuildinfo @@ -0,0 +1 @@ +{"root":["./src/index.ts"],"version":"5.7.3"} \ No newline at end of file diff --git a/packages/utils/.eslintignore b/packages/utils/.eslintignore new file mode 100644 index 00000000..4b2c927a --- /dev/null +++ b/packages/utils/.eslintignore @@ -0,0 +1,3 @@ +submodules +dist +node_modules diff --git a/packages/utils/.eslintrc b/packages/utils/.eslintrc new file mode 100644 index 00000000..a0af25f7 --- /dev/null +++ b/packages/utils/.eslintrc @@ -0,0 +1,22 @@ +{ + "extends": ["oclif", "oclif-typescript", "prettier"], + "overrides": [ + { + "files": ["src/**/*.ts", "tests/**/*.ts"], + "rules": { + "no-console": "off", + "no-await-in-loop": "off", + "no-promise-executor-return": "off", + "new-cap": "off", + "@typescript-eslint/no-explicit-any": "off", + "no-return-await": "off", + "unicorn/no-array-reduce": "off", + "unicorn/text-encoding-identifier-case": "off", + "unicorn/consistent-destructuring": "off", + "max-params":"off", + "unicorn/no-array-for-each": "off", + "perfectionist/sort-objects": "off" + }, + }, + ], +} diff --git a/packages/utils/package.json b/packages/utils/package.json new file mode 100644 index 00000000..4725f2c4 --- /dev/null +++ b/packages/utils/package.json @@ -0,0 +1,33 @@ +{ + "name": "@devnet/utils", + "version": "1.0.0", + "type": "module", + "main": "dist/index.js", + "types": "src/index.ts", + "scripts": { + "build": "rm -rf dist && rm -rf tsconfig.tsbuildinfo && tsc -b", + "build:types": "tsc --build --emitDeclarationOnly", + "lint": "eslint . --ext .ts && tsc --noEmit" + }, + "exports": { + ".": "./dist/index.js" + }, + "dependencies": { + "@oclif/core": "^4.0.37", + "@oclif/plugin-help": "^6.2.19", + "@types/node": "^22.10.5", + "dotenv": "^17.2.1", + "typescript": "^5" + }, + "devDependencies": { + "eslint": "^8", + "eslint-config-oclif": "^5", + "eslint-config-oclif-typescript": "^3", + "eslint-config-prettier": "^9", + "eslint-plugin-prettier": "^5.2.2", + "prettier": "^3.4.2" + }, + "engines": { + "node": ">=20" + } +} diff --git a/packages/utils/src/array.ts b/packages/utils/src/array.ts new file mode 100644 index 00000000..b6c2248b --- /dev/null +++ b/packages/utils/src/array.ts @@ -0,0 +1,20 @@ +import { AssertionError } from "node:assert"; + +export type NonEmptyArray = [T, ...T[]]; + +export const isNonEmptyArray = (arr: T[]): arr is NonEmptyArray => arr.length > 0; + +export const assertNonEmpty = ( + array: T[], + onEmpty?: (array: T[]) => Error +): NonEmptyArray => { + const onEmptyHandler = (array: T[]): never => { + if (onEmpty) { + throw onEmpty(array); + } + + throw new AssertionError({ message: "Array is empty", expected: "Non empty array", actual: array }) + }; + + return isNonEmptyArray(array) ? array : onEmptyHandler(array); +}; diff --git a/packages/command/src/assert.ts b/packages/utils/src/assert.ts similarity index 100% rename from packages/command/src/assert.ts rename to packages/utils/src/assert.ts diff --git a/packages/utils/src/error.ts b/packages/utils/src/error.ts new file mode 100644 index 00000000..9444b264 --- /dev/null +++ b/packages/utils/src/error.ts @@ -0,0 +1,11 @@ +export class DevNetError extends Error { + public constructor(message?: string, public readonly cause?: Error) { + super(message || `DevNetError`); + Object.setPrototypeOf(this, new.target.prototype); + } +} + + +export const throwError = (error: T) => { + throw error; +} diff --git a/packages/utils/src/index.ts b/packages/utils/src/index.ts new file mode 100644 index 00000000..d5975041 --- /dev/null +++ b/packages/utils/src/index.ts @@ -0,0 +1,7 @@ +export * from './array.js'; +export * from './assert.js'; +export * from './error.js'; +export * from './object.js'; +export * from './predicate.js'; +export * from './sleep.js'; + diff --git a/packages/utils/src/object.ts b/packages/utils/src/object.ts new file mode 100644 index 00000000..25349714 --- /dev/null +++ b/packages/utils/src/object.ts @@ -0,0 +1,17 @@ +export const isEmptyObject = (obj: object | unknown): obj is Record => { + if (typeof obj === 'object' && obj === null) { + return true; + } + + if (obj === undefined) { + return true; + } + + for (const prop in obj) { + if (Object.hasOwn(obj, prop)) { + return false; + } + } + + return true; +} diff --git a/packages/utils/src/predicate.ts b/packages/utils/src/predicate.ts new file mode 100644 index 00000000..5cfb01ee --- /dev/null +++ b/packages/utils/src/predicate.ts @@ -0,0 +1,44 @@ +export type Constructor = new (...args: any[]) => T; + +/** + * Composable type guard to check that the value is not type B + * Examples: + * * `not(isInstance(SomeClass))` + * * `not(isNumber())` + * * `not((value: unknown): value is string => (typeof value === 'string'))` + */ +export const not = ( + guard: + ((bValue: A | B) => bValue is B) +): (value: A | B) => value is Exclude => + (value): value is Exclude => !guard(value); + + +export const isInstance = (clazz: Constructor): (value: A | B) => value is B => + (v): v is B => v instanceof clazz; + + +export function isNumber(value: unknown): value is number; +export function isNumber(): (value: unknown) => value is number; +export function isNumber(value?: unknown): any { + return arguments.length === 0 + ? (v: unknown): v is number => typeof v === "number" + : typeof value === "number"; +} + + +export function isString(value: unknown): value is string; +export function isString(): (value: unknown) => value is string; +export function isString(value?: unknown): any { + return arguments.length === 0 + ? (v: unknown): v is string => typeof v === "string" + : typeof value === "string"; +} + +export function isNull(value: unknown): value is null; +export function isNull(): (value: unknown) => value is null; +export function isNull(value?: unknown): any { + return arguments.length === 0 + ? (v: unknown): v is null => v === null + : value === null; +} diff --git a/packages/utils/src/sleep.ts b/packages/utils/src/sleep.ts new file mode 100644 index 00000000..0db69f51 --- /dev/null +++ b/packages/utils/src/sleep.ts @@ -0,0 +1,3 @@ +export const sleep = (timeoutMs: number) => { + return new Promise((resolve) => setTimeout(resolve, timeoutMs)); +}; diff --git a/packages/utils/tsconfig.json b/packages/utils/tsconfig.json new file mode 100644 index 00000000..9ed464ce --- /dev/null +++ b/packages/utils/tsconfig.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "module": "Node16", + "outDir": "./dist", + "rootDir": "src", + "strict": true, + "target": "es2022", + "moduleResolution": "node16", + "resolveJsonModule": true, + "strictNullChecks": true, + + "declaration": true, + "declarationMap": true, + "sourceMap": true + + }, + "include": ["./src/**/*"], + "exclude": [ + "./dist/**/*" + ], + "ts-node": { + "esm": true + } +} diff --git a/packages/utils/tsconfig.tsbuildinfo b/packages/utils/tsconfig.tsbuildinfo new file mode 100644 index 00000000..8667de36 --- /dev/null +++ b/packages/utils/tsconfig.tsbuildinfo @@ -0,0 +1 @@ +{"root":["./src/array.ts","./src/assert.ts","./src/error.ts","./src/index.ts","./src/object.ts","./src/predicate.ts","./src/sleep.ts"],"version":"5.7.3"} \ No newline at end of file diff --git a/src/commands/blockscout/constants/blockscout.constants.ts b/src/commands/blockscout/constants/blockscout.constants.ts new file mode 100644 index 00000000..146133fb --- /dev/null +++ b/src/commands/blockscout/constants/blockscout.constants.ts @@ -0,0 +1,4 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; + +export const NAMESPACE = (dre: DevNetRuntimeEnvironmentInterface) => + `kt-${dre.network.name}-blockscout`; diff --git a/src/commands/blockscout/down.ts b/src/commands/blockscout/down.ts index e8aa7b97..a929e9e8 100644 --- a/src/commands/blockscout/down.ts +++ b/src/commands/blockscout/down.ts @@ -1,30 +1,85 @@ -import { command } from "@devnet/command"; +import { Params, command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + deleteNamespace, + deleteNamespacedPersistentVolumeClaimIfExists, + getK8s, + k8s, +} from "@devnet/k8s"; +import path from "node:path"; + +import { NAMESPACE } from "./constants/blockscout.constants.js"; export const BlockscoutDown = command.cli({ - description: "Down Blockscout", - params: {}, - async handler({ dre, dre: { logger } }) { + description: "Down Blockscout in k8s", + params: { + force: Params.boolean({ + description: "Do not check that the registry was already stopped", + default: false, + required: false, + }), + }, + async handler({ dre, dre: { logger }, params }) { const { state, - network, services: { blockscout }, } = dre; - const { elPrivate, elWsPrivate} = await state.getChain(); + if (!(await dre.state.isBlockscoutDeployed()) && !(params.force)) { + logger.log("Blockscout already stopped."); + return; + } + + + const blockScoutPostgresqlSh = blockscout.sh({ + cwd: path.join(blockscout.artifact.root, 'blockscout-postgresql'), + env: { + NAMESPACE: NAMESPACE(dre), + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + }, + }); + + await blockScoutPostgresqlSh`make debug`; + await blockScoutPostgresqlSh`make lint`; + await blockScoutPostgresqlSh`make uninstall`; - const blockScoutSh = blockscout.sh({ + // TODO remove postgressql persistent volumes + + const blockScoutVerificationSh = blockscout.sh({ + cwd: path.join(blockscout.artifact.root, 'verification'), + env: { + NAMESPACE: NAMESPACE(dre), + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + }, + }); + + await blockScoutVerificationSh`make debug`; + await blockScoutVerificationSh`make lint`; + await blockScoutVerificationSh`make uninstall`; + + const blockScoutStackSh = blockscout.sh({ + cwd: path.join(blockscout.artifact.root, 'blockscout-stack'), env: { - BLOCKSCOUT_RPC_URL: elPrivate, - BLOCKSCOUT_WS_RPC_URL: elWsPrivate, - DOCKER_NETWORK_NAME: `kt-${network.name}`, - COMPOSE_PROJECT_NAME: `blockscout-${network.name}`, + NAMESPACE: NAMESPACE(dre), + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, }, }); - await blockScoutSh`docker compose -f geth.yml down -v`; + await blockScoutStackSh`make debug`; + await blockScoutStackSh`make lint`; + await blockScoutStackSh`make uninstall`; + + // removing postgress persistent volume claim + logger.log("Removing persistent volume claim for postgress"); + await deleteNamespacedPersistentVolumeClaimIfExists( + NAMESPACE(dre), + 'data-postgresql-0', // hardcoded for now + ); + + logger.log("Blockscout stopped."); - logger.log("Blockscout stopped successfully."); + // await deleteNamespace(NAMESPACE(dre)); - await state.updateBlockScout({}); + await state.removeBlockscout(); }, }); diff --git a/src/commands/blockscout/extensions/blockscout.extension.ts b/src/commands/blockscout/extensions/blockscout.extension.ts new file mode 100644 index 00000000..8d955551 --- /dev/null +++ b/src/commands/blockscout/extensions/blockscout.extension.ts @@ -0,0 +1,59 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { z } from "zod"; + +const isEmpty = (obj: object): obj is Record => { + for (const prop in obj) { + if (Object.hasOwn(obj, prop)) { + return false; + } + } + + return true; +} + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getBlockscout(must?: M,): Promise>; + isBlockscoutDeployed(): Promise; + removeBlockscout(): Promise; + updateBlockscout(state: BlockscoutState): Promise; + } + + export interface Config { + blockscout: BlockscoutState; + } +} + +export const BlockscoutState = z.object({ + url: z.string().url(), + api: z.string().url(), +}); + +export type BlockscoutState = z.infer; + +export const blockscoutExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateBlockscout = (async function (state: BlockscoutState) { + await dre.state.updateProperties("blockscout", state); + }); + + dre.state.removeBlockscout = (async function () { + await dre.state.updateProperties("blockscout", {}); + }); + + dre.state.isBlockscoutDeployed = (async function () { + const state = await dre.state.getBlockscout(false); + return state && !isEmpty(state); + }) + + dre.state.getBlockscout = (async function (must: M = true as M) { + return dre.state.getProperties( + "blockscout", + "blockscout", + BlockscoutState, + must, + ); + }); +}; diff --git a/src/commands/blockscout/info.ts b/src/commands/blockscout/info.ts index 99264098..3444ec78 100644 --- a/src/commands/blockscout/info.ts +++ b/src/commands/blockscout/info.ts @@ -5,14 +5,15 @@ export const BlockscoutGetInfo = command.isomorphic({ "Retrieves and displays information about the blockscout service.", params: {}, async handler({ dre: { logger, state } }) { - const blockscoutInfo = await state.getBlockScout(false); - if (!blockscoutInfo) { - logger.log(`Blockscout service is not enabled`); + if (!(await state.isBlockscoutDeployed())) { + logger.log(`Blockscout is not enabled`); return; } logger.log(""); + const blockscoutInfo = await state.getBlockscout(); + logger.table( ["Service", "URL"], [ diff --git a/src/commands/blockscout/restart.ts b/src/commands/blockscout/restart.ts index 33cc338c..ec255cdf 100644 --- a/src/commands/blockscout/restart.ts +++ b/src/commands/blockscout/restart.ts @@ -9,7 +9,7 @@ export const RestartNodes = command.isomorphic({ async handler({ dre, dre: { logger } }) { logger.log("Restarting the blockscout..."); - await dre.runCommand(BlockscoutDown, {}); + await dre.runCommand(BlockscoutDown, { force: true }); logger.log("blockscout successfully stopped."); await dre.runCommand(BlockscoutUp, {}); diff --git a/src/commands/blockscout/up.ts b/src/commands/blockscout/up.ts index 46fcbfa7..0a1d8509 100644 --- a/src/commands/blockscout/up.ts +++ b/src/commands/blockscout/up.ts @@ -1,40 +1,102 @@ -import { command } from "@devnet/command"; +import { + command, + DEFAULT_NETWORK_NAME, + NETWORK_NAME_SUBSTITUTION, +} from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { addPrefixToIngressHostname } from "@devnet/k8s"; +import { DevNetError } from "@devnet/utils"; +import path from "node:path"; + +import { NAMESPACE } from "./constants/blockscout.constants.js"; +import { blockscoutExtension } from "./extensions/blockscout.extension.js"; export const BlockscoutUp = command.cli({ - description: "Start Blockscout", + description: "Start Blockscout in k8s", params: {}, - async handler({ dre, dre: { logger } }) { - const { - state, - network, - services: { blockscout }, - } = dre; + extensions: [blockscoutExtension], + async handler({ dre, dre: { logger, state, services: { blockscout } } }) { + + if (await dre.state.isBlockscoutDeployed()) { + logger.log("Blockscout already deployed."); + return; + } + + const blockscoutIngressHostname = process.env.BLOCKSCOUT_BACKEND_INGRESS_HOSTNAME?. + replace(NETWORK_NAME_SUBSTITUTION, DEFAULT_NETWORK_NAME); + + if (!blockscoutIngressHostname) { + throw new DevNetError(`BLOCKSCOUT_BACKEND_INGRESS_HOSTNAME env variable is not set`); + } - const { elPrivate, elWsPrivate } = await state.getChain(); + const blockscoutFrontendIngressHostname = process.env.BLOCKSCOUT_FRONTEND_INGRESS_HOSTNAME?. + replace(NETWORK_NAME_SUBSTITUTION, DEFAULT_NETWORK_NAME); - const blockScoutSh = blockscout.sh({ + if (!blockscoutFrontendIngressHostname) { + throw new DevNetError(`BLOCKSCOUT_FRONTEND_INGRESS_HOSTNAME env variable is not set`); + } + + const BLOCKSCOUT_BACKEND_INGRESS_HOSTNAME = addPrefixToIngressHostname( + blockscoutIngressHostname + ); + const BLOCKSCOUT_FRONTEND_INGRESS_HOSTNAME = addPrefixToIngressHostname( + blockscoutFrontendIngressHostname + ); + + const { elPrivate, elWsPrivate, elClientType } = await state.getChain(); + + const blockScoutPostgresqlSh = blockscout.sh({ + cwd: path.join(blockscout.artifact.root, 'blockscout-postgresql'), env: { - BLOCKSCOUT_RPC_URL: elPrivate, - BLOCKSCOUT_WS_RPC_URL: elWsPrivate, - DOCKER_NETWORK_NAME: `kt-${network.name}`, - COMPOSE_PROJECT_NAME: `blockscout-${network.name}`, + NAMESPACE: NAMESPACE(dre), + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, }, }); - await blockScoutSh`docker compose -f ./geth.yml up -d`; + await blockScoutPostgresqlSh`make debug`; + await blockScoutPostgresqlSh`make lint`; + await blockScoutPostgresqlSh`make install`; - const [info] = await blockscout.getExposedPorts(); - const apiHost = `localhost:${info.publicPort}`; - const publicUrl = `http://${apiHost}`; + // blockscout verification + const blockScoutVerificationSh = blockscout.sh({ + cwd: path.join(blockscout.artifact.root, 'verification'), + env: { + NAMESPACE: NAMESPACE(dre), + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + // Makefile-related ENV vars for Helm charts overrides + // see workspaces/blockscout/blockscout-*/Makefile + }, + }); + + await blockScoutVerificationSh`make debug`; + await blockScoutVerificationSh`make lint`; + await blockScoutVerificationSh`make install`; + + const blockScoutStackSh = blockscout.sh({ + cwd: path.join(blockscout.artifact.root, 'blockscout-stack'), + env: { + NAMESPACE: NAMESPACE(dre), + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + // Makefile-related ENV vars for Helm charts overrides + // see workspaces/blockscout/blockscout-*/Makefile + BLOCKSCOUT_ETHEREUM_JSONRPC_VARIANT: elClientType, + BLOCKSCOUT_ETHEREUM_JSONRPC_WS_URL: elWsPrivate, + BLOCKSCOUT_ETHEREUM_JSONRPC_TRACE_URL: elPrivate, + BLOCKSCOUT_ETHEREUM_JSONRPC_HTTP_URL: elPrivate, + BLOCKSCOUT_BACKEND_INGRESS_HOSTNAME, + BLOCKSCOUT_FRONTEND_INGRESS_HOSTNAME, + }, + }); - logger.log("Restart the frontend instance to pass the actual public url"); + await blockScoutStackSh`make debug`; + await blockScoutStackSh`make lint`; + await blockScoutStackSh`make install`; - await blockScoutSh({ - env: { NEXT_PUBLIC_API_HOST: apiHost, NEXT_PUBLIC_APP_HOST: apiHost }, - })`docker compose -f geth.yml up -d frontend`; + const publicUrl = `http://${BLOCKSCOUT_FRONTEND_INGRESS_HOSTNAME}`; + const publicBackendUrl = `http://${BLOCKSCOUT_BACKEND_INGRESS_HOSTNAME}`; - logger.log(`Blockscout started successfully on URL: ${publicUrl}`); + logger.log(`Blockscout started on URL: ${BLOCKSCOUT_FRONTEND_INGRESS_HOSTNAME}`); - await state.updateBlockScout({ url: publicUrl, api: `${publicUrl}/api` }); + await state.updateBlockscout({ url: publicUrl, api: `${publicBackendUrl}/api` }); }, }); diff --git a/src/commands/chain/artifacts.ts b/src/commands/chain/artifacts.ts deleted file mode 100644 index dda56772..00000000 --- a/src/commands/chain/artifacts.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { command } from "@devnet/command"; - -export const DownloadKurtosisArtifacts = command.cli({ - description: - "Downloads the genesis data for EL and CL nodes from the Kurtosis enclave.", - params: {}, - async handler({ dre, dre: { logger } }) { - - const { - services: { kurtosis }, - network, - } = dre; - - await kurtosis.sh`rm -rf network` - - await kurtosis.sh`kurtosis files download ${network.name} el_cl_genesis_data network`; - - logger.log("Genesis data downloaded successfully."); - }, -}); diff --git a/src/commands/chain/chain-sync-nodes-state-from-k8s.ts b/src/commands/chain/chain-sync-nodes-state-from-k8s.ts new file mode 100644 index 00000000..815fdb11 --- /dev/null +++ b/src/commands/chain/chain-sync-nodes-state-from-k8s.ts @@ -0,0 +1,150 @@ +import { command } from "@devnet/command"; +import { getK8sService } from "@devnet/k8s"; +import { DevNetError } from "@devnet/utils"; +import { + assertNonEmpty, + isInstance, isNonEmptyArray, + not, + throwError +} from "@devnet/utils"; + +import { nodesExtension } from "./extensions/nodes.extension.js"; + +const SUPPORTED_CL = ["lighthouse", "teku", "prysm"]; +const SUPPORTED_EL = ["geth", "reth", "lodestar", "erigon"]; +const RPC_PORTS = new Set(['rpc', 'ws-rpc']); +const WS_PORTS = new Set(['ws-rpc', 'ws']); +const CL_HTTP_PORTS = ['http']; // TODO add support +const VC_PORTS = ['http-validator']; + +const elRegex = new RegExp(`^el-[1-9]-(${SUPPORTED_EL.join('|')})-(${SUPPORTED_CL.join('|')})$`); +const clRegex = new RegExp(`^cl-[1-9]-(${SUPPORTED_CL.join("|")})-(${SUPPORTED_EL.join('|')})$`); +const vcRegex = new RegExp(`^vc-[1-9]-(${SUPPORTED_EL.join("|")})-(${SUPPORTED_CL.join('|')})$`); + +const getClientTypeFromK8sServiceName = (k8sServiceName: string, regex: RegExp) => { + const results = k8sServiceName.match(regex); + return results?.[1]; +} + +const getVcClientTypeFromK8sServiceName = (k8sServiceName: string, regex: RegExp) => { + const results = k8sServiceName.match(regex); + return results?.[2]; +} + +export const ChainSyncNodesStateFromK8s = command.cli({ + description: + "Get nodes state from K8s and save it to JSON state", + params: {}, + extensions: [nodesExtension], + async handler({ dre, dre: { logger } }) { + logger.log("Getting nodes state from K8s..."); + + const elServices = await getK8sService(dre, { name: elRegex }); + + // execution + const elNodes = elServices.map((elService) => { + logger.log(`Found execution node service: [${elService.metadata?.name}]`); + + const servicePorts = elService.spec?.ports; + + const elRpcPort = servicePorts?.find(p => RPC_PORTS.has(p.name ?? ''))?.port; + const elWsPort = servicePorts?.find(p => WS_PORTS.has(p.name ?? ''))?.port; + + if (!elRpcPort) { + logger.warn(`Execution node service [rpc] port not found [${String(servicePorts?.map(p => p.name).join(','))}]`); + return new DevNetError("❌ Execution node service [rpc] port not found"); + } + + if (!elWsPort) { + logger.warn(`Execution node service [ws] port not found [${String(servicePorts?.map(p => p.name).join(','))}]`); + return new DevNetError("❌ Execution node service [ws] port not found"); + } + + const elK8sServiceName = elService?.metadata?.name; + if (!elK8sServiceName) { + return new DevNetError("❌ Execution node k8s service name not found or empty"); + } + + const clientType = getClientTypeFromK8sServiceName(elK8sServiceName, elRegex); + + if (!clientType) { + return new DevNetError("❌ Execution node client type not found or empty"); + } + + return { clientType, rpcPort: elRpcPort, wsPort: elWsPort, k8sService: elK8sServiceName }; + }); + + // consensus + const clServices = await getK8sService(dre, { name: clRegex }); + const clNodes = clServices.map((clService) => { + logger.log(`Found consensus node service: [${clService.metadata?.name}]`); + + const clHttpPort = clService + .spec?.ports?.find((p) => p.name === 'http')?.port; + + if (!clHttpPort) { + return new DevNetError("❌ Consensus node service [http] port not found"); + } + + const clK8sServiceName = clService?.metadata?.name; + if (!clK8sServiceName) { + return new DevNetError("❌ Consensus node k8s service name not found or empty"); + } + + const clientType = getClientTypeFromK8sServiceName(clK8sServiceName, clRegex); + + if (!clientType) { + return new DevNetError("❌ Consensus node client type not found or empty"); + } + + return { clientType, httpPort: clHttpPort, k8sService: clK8sServiceName }; + }); + + // validator client + const vcServices = await getK8sService(dre, { name: vcRegex }); + const vcNodes = vcServices.map((vlService) => { + logger.log(`Found validator client node service: [${vlService.metadata?.name}]`); + + const vcHttpValidatorPort = vlService + .spec?.ports?.find((p) => p.name === 'http-validator')?.port; + + if (!vcHttpValidatorPort) { + return new DevNetError("❌ Validator client node service [http-validator] port not found"); + } + + const k8sServiceName = vlService?.metadata?.name; + if (!k8sServiceName) { + return new DevNetError("❌ Validator client nodes k8s service name not found or empty"); + } + + const clientType = getVcClientTypeFromK8sServiceName(k8sServiceName, vcRegex); + + if (!clientType) { + return new DevNetError("❌ Validator client node client type not found or empty"); + } + + return { clientType, httpValidatorPort: vcHttpValidatorPort, k8sService: k8sServiceName }; + }); + + if (elNodes.every(isInstance(DevNetError)) && isNonEmptyArray(elNodes)) { + return throwError(elNodes[0]); + } + + if (clNodes.every(isInstance(DevNetError)) && isNonEmptyArray(clNodes)) { + return throwError(clNodes[0]); + } + + if (vcNodes.every(isInstance(DevNetError)) && isNonEmptyArray(vcNodes)) { + return throwError(vcNodes[0]); + } + + await dre.state.updateNodes({ + el: assertNonEmpty(elNodes.filter(not(isInstance(DevNetError))), + () => new DevNetError('No execution nodes found')), + cl: assertNonEmpty(clNodes.filter(not(isInstance(DevNetError))), + () => new DevNetError('No consensus nodes found')), + vc: assertNonEmpty(vcNodes.filter(not(isInstance(DevNetError))), + () => new DevNetError('No validator client nodes found')), + }) + }, +}); diff --git a/src/commands/chain/chain-sync-state.ts b/src/commands/chain/chain-sync-state.ts new file mode 100644 index 00000000..ddd56db3 --- /dev/null +++ b/src/commands/chain/chain-sync-state.ts @@ -0,0 +1,31 @@ +import {command} from "@devnet/command"; + +export const ChainSyncState = command.isomorphic({ + description: + "Sync Chain state and place it in the state. Should be run after chain is up and nodes state synced", + params: {}, + async handler({ dre: { logger, state , network} }) { + logger.log( + "Syncing chain nodes state form k8s", + ); + // TODO check that devnet is in k8s or in docker + + const nodes = await state.getNodes(); + const nodesIngress = await state.getNodesIngress(); + + await state.updateChain({ + clPrivate: `http://${nodes.cl[0].k8sService}.kt-${network.name}.svc.cluster.local:${nodes.cl[0].httpPort}`, + clPublic: nodesIngress.cl[0].publicIngressUrl, + + elClientType: nodes.el[0].clientType, + elPrivate: `http://${nodes.el[0].k8sService}.kt-${network.name}.svc.cluster.local:${nodes.el[0].rpcPort}`, + elPublic: nodesIngress.el[0].publicIngressUrl, + + elWsPrivate: `http://${nodes.el[0].k8sService}.kt-${network.name}.svc.cluster.local:${nodes.el[0].wsPort}`, + elWsPublic: nodesIngress.el[0].publicIngressUrl, + + validatorsApiPublic: nodesIngress.vc[0].publicIngressUrl, + validatorsApiPrivate: `http://${nodes.vc[0].k8sService}.kt-${network.name}.svc.cluster.local:${nodes.vc[0].httpValidatorPort}`, + }); + }, +}); diff --git a/src/commands/chain/constants/nodes-ingress.constants.ts b/src/commands/chain/constants/nodes-ingress.constants.ts new file mode 100644 index 00000000..02e35958 --- /dev/null +++ b/src/commands/chain/constants/nodes-ingress.constants.ts @@ -0,0 +1,5 @@ +export const EXECUTION_INGRESS_LABEL = { 'com.lido.devnet.eth-node.execution': 'ingress' }; +export const VALIDATOR_INGRESS_LABEL = { 'com.lido.devnet.eth-node.validator': 'ingress' }; +export const CONSENSUS_INGRESS_LABEL = { 'com.lido.devnet.eth-node.consensus': 'ingress' }; +export const ETH_NODE_INGRESS_LABEL = { 'com.lido.devnet.eth-node': 'ingress' }; + diff --git a/src/commands/chain/down.ts b/src/commands/chain/down.ts index 45d58c9e..0f849bea 100644 --- a/src/commands/chain/down.ts +++ b/src/commands/chain/down.ts @@ -1,25 +1,33 @@ import { command } from "@devnet/command"; -export const KurtosisCleanUp = command.isomorphic({ +import { BlockscoutDown } from "../blockscout/down.js"; +import { KurtosisDoraK8sIngressDown } from "../kurtosis/dora/down.js"; +import { KurtosisK8sNodesIngressDown } from "../kurtosis/nodes/ingress-down.js"; +import { KurtosisStopPackage } from "../kurtosis/stop-package.js"; + +export const ChainDown = command.isomorphic({ description: - "Destroys the Kurtosis enclave, cleans the JSON database, and removes network artifacts.", + "Destroys the chain, cleans resources, and removes network artifacts.", params: {}, async handler({ + dre, dre: { logger, - services: { kurtosis }, - network, + state, }, }) { - logger.log("Destroying Kurtosis enclave..."); - logger.log("Removing network artifacts..."); + await dre.runCommand(KurtosisDoraK8sIngressDown, {}); + await dre.runCommand(KurtosisK8sNodesIngressDown, {}); + await dre.runCommand(BlockscoutDown, { force: false }); + + await state.removeNodes(); + await state.removeChain(); - await kurtosis.sh`kurtosis enclave rm -f ${network.name}`.catch((error) => - logger.error(error.message), - ); + await dre.runCommand(KurtosisStopPackage, {}); - await kurtosis.artifact.clean(); logger.log("Cleanup completed successfully."); + + }, }); diff --git a/src/commands/chain/extensions/nodes-ingress.extension.ts b/src/commands/chain/extensions/nodes-ingress.extension.ts new file mode 100644 index 00000000..c4cf09c6 --- /dev/null +++ b/src/commands/chain/extensions/nodes-ingress.extension.ts @@ -0,0 +1,46 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getNodesIngress(must?: M,): Promise>; + updateNodesIngress(options: NodesIngressState): Promise; + } + + export interface Config { + nodesIngress: NodesIngressState; + } +} + +export const NodesIngressState = z.object({ + el: z.array(z.object({ + publicIngressUrl: z.string().url(), + })).nonempty(), + cl: z.array(z.object({ + publicIngressUrl: z.string().url(), + })).nonempty(), + vc: z.array(z.object({ + publicIngressUrl: z.string().url(), + })).nonempty(), +}); + +export type NodesIngressState = z.infer; + +export const nodesIngressExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateNodesIngress = (async function (jsonData: NodesIngressState) { + await dre.state.updateProperties("nodesIngress", jsonData); + }); + + dre.state.getNodesIngress = (async function (must: M = true as M) { + return dre.state.getProperties( + "nodesIngress", + "nodesIngress", + NodesIngressState, + must, + ); + }); +}; diff --git a/src/commands/chain/extensions/nodes.extension.ts b/src/commands/chain/extensions/nodes.extension.ts new file mode 100644 index 00000000..50cd954b --- /dev/null +++ b/src/commands/chain/extensions/nodes.extension.ts @@ -0,0 +1,71 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { isEmptyObject } from "@devnet/utils"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getNodes(must?: M): Promise>; + isNodesDeployed(): Promise; + removeNodes(): Promise; + updateNodes(options: NodesState): Promise; + } + + export interface Config { + nodes: NodesState; + } +} + +export const NodesState = z.object({ + el: z.array( + z.object({ + clientType: z.string(), + k8sService: z.string(), + rpcPort: z.number(), + wsPort: z.number(), + }) + ).nonempty(), + cl: z.array( + z.object({ + clientType: z.string(), + k8sService: z.string(), + httpPort: z.number(), + }), + ).nonempty(), + vc: z.array( + z.object({ + clientType: z.string(), + k8sService: z.string(), + httpValidatorPort: z.number(), + }), + ).nonempty(), +}); + +export type NodesState = z.infer; + +export const nodesExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateNodes = (async function (state: NodesState) { + await dre.state.updateProperties("nodes", state); + }); + + dre.state.removeNodes = (async function () { + await dre.state.updateProperties("nodes", {}); + }); + + dre.state.isNodesDeployed = (async function () { + const state = await dre.state.getNodes(false); + return state && !isEmptyObject(state); + }); + + dre.state.getNodes = (async function (must: M = true as M) { + return dre.state.getProperties( + "nodes", + "nodes", + NodesState, + must, + ); + }); +}; diff --git a/src/commands/chain/fork.ts b/src/commands/chain/fork.ts index 8ab74b79..f3200a0d 100644 --- a/src/commands/chain/fork.ts +++ b/src/commands/chain/fork.ts @@ -1,7 +1,7 @@ import { command } from "@devnet/command"; import { execa } from "execa"; -export const StartAnvil = command.cli({ +export const ChainStartAnvilFork = command.cli({ description: "Start Anvil in fork mode connected to a specified Ethereum node", params: {}, diff --git a/src/commands/chain/info.ts b/src/commands/chain/info.ts index 523ed5cf..88424efd 100644 --- a/src/commands/chain/info.ts +++ b/src/commands/chain/info.ts @@ -1,21 +1,12 @@ import { command } from "@devnet/command"; -export const KurtosisGetInfo = command.cli({ - description: "Retrieves and displays information about the Kurtosis enclave.", - params: {}, - async handler({ - dre: { - logger, - state, - services: { kurtosis }, - }, - }) { - const kurtosisInfo = await kurtosis.getDockerInfo(false); - if (!kurtosisInfo) { - logger.log(`Kurtosis service is not enabled`); - return; - } +import { BlockscoutGetInfo } from "../blockscout/info.js"; +import { KurtosisDoraK8sInfo } from "../kurtosis/dora/info.js"; +export const ChainGetInfo = command.cli({ + description: "Retrieves and displays information about the chain.", + params: {}, + async handler({ dre, dre: { logger, state }}) { logger.log(""); const chainServices = Object.entries(await state.getChain()).filter( ([k]) => !k.endsWith("Private"), @@ -24,8 +15,10 @@ export const KurtosisGetInfo = command.cli({ ["Service", "URL"], [ ...chainServices, - ["dora", kurtosisInfo.dora[0].ports[0].publicUrl!], ], ); + + await dre.runCommand(BlockscoutGetInfo, {}); + await dre.runCommand(KurtosisDoraK8sInfo, {}); }, }); diff --git a/src/commands/chain/up.ts b/src/commands/chain/up.ts index 993f9195..7e474abc 100644 --- a/src/commands/chain/up.ts +++ b/src/commands/chain/up.ts @@ -1,40 +1,40 @@ -import { DevNetError, Params, command } from "@devnet/command"; - -import { DownloadKurtosisArtifacts } from "./artifacts.js"; -import { KurtosisUpdate } from "./update.js"; - -export const KurtosisUp = command.isomorphic({ +import { Params, command } from "@devnet/command"; +import { DevNetError, sleep } from "@devnet/utils"; + +import { BlockscoutUp } from "../blockscout/up.js"; +import { K8sPing } from "../k8s/ping.js"; +import { K8sSetDefaultContext } from "../k8s/set-default-context.js"; +import { KurtosisDoraK8sIngressUp } from "../kurtosis/dora/up.js"; +import { KurtosisDownloadArtifacts } from "../kurtosis/download-artifacts.js"; +import { KurtosisK8sNodesIngressUp } from "../kurtosis/nodes/ingress-up.js"; +import { KurtosisRunPackage } from "../kurtosis/run-package.js"; +import { ChainSyncNodesStateFromK8s } from "./chain-sync-nodes-state-from-k8s.js"; +import { ChainSyncState } from "./chain-sync-state.js"; + +export const ChainUp = command.isomorphic({ description: - "Runs a specific Ethereum package in Kurtosis and updates local JSON database with the network information.", + "Starts the chain", params: { preset: Params.string({ description: "Kurtosis config name." }) }, - async handler({ dre, dre: { logger }, params: { preset } }) { - logger.log("Running Ethereum package in Kurtosis..."); - const { name } = dre.network; - const { - state, - services: { kurtosis }, - } = dre; - - const { preset: configPreset } = await state.getKurtosis(); - const configFileName = `${preset ?? configPreset}.yml`; - - const file = await kurtosis.readYaml(configFileName).catch((error: any) => { - logger.warn( - `There was an error in the process of connecting the config, most likely you specified the wrong file name, check the "workspaces/kurtosis" folder`, - ); - - throw new DevNetError(error.message); - }); - - logger.log(`Resolved kurtosis config: ${configFileName}`); - logger.logJson(file); - - await kurtosis.sh`kurtosis run - --enclave ${name} - github.com/ethpandaops/ethereum-package - --args-file ${configFileName}`; - - await dre.runCommand(KurtosisUpdate, {}); - await dre.runCommand(DownloadKurtosisArtifacts, {}); + async handler({ dre, dre: { logger }, params: { preset, } }) { + + const defaultContext = process.env.K8S_KUBECTL_DEFAULT_CONTEXT; + + if (!defaultContext) { + throw new DevNetError('K8S_KUBECTL_DEFAULT_CONTEXT env variable not set'); + } + + await dre.runCommand(K8sSetDefaultContext, { context: defaultContext }); + await dre.runCommand(K8sPing, { context: defaultContext }); + await dre.runCommand(KurtosisRunPackage, { preset: preset ?? '' }); + + await sleep(5000); + + await dre.runCommand(ChainSyncNodesStateFromK8s, {}); + await dre.runCommand(KurtosisK8sNodesIngressUp, {}); + await dre.runCommand(ChainSyncState, {}); + + await dre.runCommand(KurtosisDownloadArtifacts, {}); + await dre.runCommand(KurtosisDoraK8sIngressUp, {}); + await dre.runCommand(BlockscoutUp, {}); }, }); diff --git a/src/commands/chain/update.ts b/src/commands/chain/update.ts deleted file mode 100644 index b36615bb..00000000 --- a/src/commands/chain/update.ts +++ /dev/null @@ -1,105 +0,0 @@ -import {assert, command} from "@devnet/command"; - -const ALLOWED_CLS = new Set(["lighthouse", "teku", "prysm"]); - -export const KurtosisUpdate = command.isomorphic({ - description: - "Updates the network configuration using a specific Ethereum package in Kurtosis and stores the configuration in the local JSON database.", - params: {}, - async handler({ - dre: { - logger, - state, - network, - services: {kurtosis}, - }, - }) { - logger.log( - "Updating network configuration using Ethereum package in Kurtosis...", - ); - - const {cl, el, vc} = await kurtosis.getDockerInfo(); - const RPC_PORT_NUM = 8545; - const WS_PORT_NUM = 8546; - - const VC_API_PORT_NUM = 5056; - - const CL_PRYSM_API_PORT_NUM = 3500; - const CL_API_PORT_NUM = 4000; - - const elPorts = el.map((n) => - n.ports.find((p) => p.privatePort === RPC_PORT_NUM), - ); - - assert(elPorts !== undefined, "EL services not found in Kurtosis"); - - const wsElPorts = el - .map((n) => - // RPC_PORT_NUM - erigon - // WS_PORT_NUM - geth - n.ports.find((p) => p?.publicUrl && (p.privatePort === WS_PORT_NUM || p.privatePort === RPC_PORT_NUM)) - ) - - assert(wsElPorts !== undefined, "wsEl services not found in Kurtosis"); - - const clPorts = cl.map((n) => - n.ports.find( - (p) => - p.privatePort === CL_PRYSM_API_PORT_NUM || - p.privatePort === CL_API_PORT_NUM, - ), - ); - - assert(clPorts !== undefined, "cl services not found in Kurtosis"); - - const validVC = vc.filter(v => v.name.includes('teku')) - // in kurtosis api configuration the keys are stored differently, some validators use the default key, some use a generated key, but they are stored in different places. - // TODO: In the future, we need to either improve etherium-package or write a parser. - // https://github.com/search?q=repo%3Aethpandaops%2Fethereum-package+keymanager&type=code&p=2 - // lighthouse "/validator-keys/keys/api-token.txt", - assert(validVC.length > 0, "Teku validator was not found in the running configuration. At least one teku client must be running to work correctly.") - - const vcPorts = validVC.map((n) => - n.ports.find((p) => p.privatePort === VC_API_PORT_NUM), - ); - - assert(vcPorts !== undefined, "vc services not found in Kurtosis"); - - const clNodesSpecs = cl - .filter((c) => ALLOWED_CLS.has(c.client)) - .map((c) => { - const port = c.ports.find( - (p) => { - if (c.client === "prysm") { - return p.privatePort === CL_PRYSM_API_PORT_NUM - } - return p.privatePort === CL_API_PORT_NUM - } - ); - - return { - ...c, - ports: port ? [port] : [], - }; - }); - - - const binding = { - clNodes: clPorts.map((n) => n!.publicUrl), - clNodesPrivate: clPorts.map((n) => n!.privateUrl), - elNodes: elPorts.map((n) => n!.publicUrl), - elNodesPrivate: elPorts.map((n) => n!.privateUrl), - validatorsApi: vcPorts.map((n) => n!.publicUrl), - validatorsApiPrivate: vcPorts.map((n) => n!.privateUrl), - - elWs: wsElPorts.map((n) => n!.publicUrl), - elWsPrivate: wsElPorts.map((n) => n!.privateUrl), - clNodesSpecs: clNodesSpecs, - }; - - await state.updateChain({ - binding, - name: network.name, - }); - }, -}); diff --git a/src/commands/config.ts b/src/commands/config.ts index db3bdabb..60f6f53d 100644 --- a/src/commands/config.ts +++ b/src/commands/config.ts @@ -1,13 +1,13 @@ import { command } from "@devnet/command"; import { BlockscoutGetInfo } from "./blockscout/info.js"; -import { KurtosisGetInfo } from "./chain/info.js"; +import { ChainGetInfo } from "./chain/info.js"; export const ConfigCommand = command.cli({ description: "Print public DevNet config", params: {}, async handler({ dre }) { await dre.runCommand(BlockscoutGetInfo, {}) - await dre.runCommand(KurtosisGetInfo, {}) + await dre.runCommand(ChainGetInfo, {}) }, }); diff --git a/src/commands/council-k8s/build.ts b/src/commands/council-k8s/build.ts new file mode 100644 index 00000000..bcbefa60 --- /dev/null +++ b/src/commands/council-k8s/build.ts @@ -0,0 +1,38 @@ +import { command } from "@devnet/command"; +import { buildAndPushDockerImage } from "@devnet/docker"; + +import { GitCheckout } from "../git/checkout.js"; + +export const CouncilK8sBuild = command.cli({ + description: "Build Council and push to Docker registry", + params: {}, + async handler({ dre, dre: { state, network, services, logger } }) { + const dockerRegistry = await state.getDockerRegistry(); + + const TAG = `kt-${network.name}`; + const IMAGE = 'lido/council'; + + await dre.runCommand(GitCheckout, { + service: "council", + ref: "feat/devnet", // TODO make configurable from global yaml config + }); + + await buildAndPushDockerImage({ + cwd: services.council.artifact.root, + registryHostname: dockerRegistry.registryHostname, + buildContext: '.', + imageName: IMAGE, + tag: TAG, + password: process.env.DOCKER_REGISTRY_PASSWORD ?? 'admin', + username: process.env.DOCKER_REGISTRY_USERNAME ?? 'changeme', + }); + + logger.log(`Council image pushed to ${dockerRegistry.registryUrl}/${IMAGE}:${TAG}`); + + await state.updateCouncilK8sImage({ + tag: TAG, + image: IMAGE, + registryHostname: dockerRegistry.registryHostname, + }) + }, +}); diff --git a/src/commands/council-k8s/constants/council-k8s.constants.ts b/src/commands/council-k8s/constants/council-k8s.constants.ts new file mode 100644 index 00000000..309f2c4b --- /dev/null +++ b/src/commands/council-k8s/constants/council-k8s.constants.ts @@ -0,0 +1,4 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; + +export const NAMESPACE = (dre: DevNetRuntimeEnvironmentInterface) => + `kt-${dre.network.name}-council`; diff --git a/src/commands/council-k8s/down.ts b/src/commands/council-k8s/down.ts new file mode 100644 index 00000000..501206dc --- /dev/null +++ b/src/commands/council-k8s/down.ts @@ -0,0 +1,51 @@ +import { Params, command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + deleteNamespace, + getNamespacedDeployedHelmReleases, +} from "@devnet/k8s"; + +import { NAMESPACE } from "./constants/council-k8s.constants.js"; + +export const CouncilK8sDown = command.cli({ + description: "Stop Council(s) in K8s", + params: { + force: Params.boolean({ + description: "Do not check that the Council(s) was already stopped", + default: false, + required: false, + }), + }, + async handler({ dre, dre: { logger, services: { council }, state }, params }) { + if (!(await state.isCouncilK8sRunning()) && !(params.force)) { + logger.log("Council not running. Skipping"); + return; + } + + const releases = await getNamespacedDeployedHelmReleases(NAMESPACE(dre)); + + if (releases.length === 0) { + logger.log(`No Council releases found in namespace [${NAMESPACE(dre)}]. Skipping...`); + return; + } + + for (const release of releases) { + const helmLidoCouncilSh = council.sh({ + env: { + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE: release, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + }, + }); + + await helmLidoCouncilSh`make debug`; + await helmLidoCouncilSh`make lint`; + await helmLidoCouncilSh`make uninstall`; + logger.log(`Council [${release}] stopped.`); + } + + await deleteNamespace(NAMESPACE(dre)); + + await state.removeCouncilK8s(); + } +}); diff --git a/src/commands/council-k8s/extensions/council-k8s.extension.ts b/src/commands/council-k8s/extensions/council-k8s.extension.ts new file mode 100644 index 00000000..12367df1 --- /dev/null +++ b/src/commands/council-k8s/extensions/council-k8s.extension.ts @@ -0,0 +1,113 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { isEmptyObject } from "@devnet/utils"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getCouncilK8sImage(must?: M,): Promise>; + getCouncilK8sRunning(must?: M,): Promise>; + getCouncilK8sState(must?: M,): Promise>; + + isCouncilK8sImageReady(): Promise; + isCouncilK8sRunning(): Promise; + + removeCouncilK8s(): Promise; + + updateCouncilK8sImage(state: CouncilK8sStateImage): Promise; + updateCouncilK8sRunning(state: CouncilK8sStateRunning): Promise; + } + + export interface Config { + councilK8s: CouncilK8sState; + } +} + +export const CouncilK8sStateImage = z.object({ + image: z.string(), + tag: z.string(), + registryHostname: z.string() +}); + +export type CouncilK8sStateImage = z.infer; + +export const CouncilK8sStateRunning = z.object({ + helmReleases: z.array(z.string()), +}); + +export type CouncilK8sStateRunning = z.infer; + +export const CouncilK8sState = z.object({ + image: CouncilK8sStateImage.optional(), + running: CouncilK8sStateRunning.optional(), +}); + +export type CouncilK8sState = z.infer; + +export const councilK8sExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateCouncilK8sImage = (async function (stateImage: CouncilK8sStateImage) { + const state = await dre.state.getCouncilK8sState(false); + await dre.state.updateProperties("councilK8s", { ...state, image: stateImage }); + }); + + dre.state.updateCouncilK8sRunning = (async function (stateRunning: CouncilK8sStateRunning) { + const state = await dre.state.getCouncilK8sState(false); + await dre.state.updateProperties("councilK8s", { ...state, running: stateRunning }); + }); + + dre.state.removeCouncilK8s = (async function () { + await dre.state.updateProperties("councilK8s", {}); + }); + + dre.state.isCouncilK8sImageReady = (async function () { + const state = await dre.state.getCouncilK8sImage(false); + return state && !isEmptyObject(state) && (state.image !== undefined); + }); + + dre.state.isCouncilK8sRunning = (async function () { + const state = await dre.state.getCouncilK8sRunning(false); + return state && !isEmptyObject(state) && (state.helmReleases !== undefined) && state.helmReleases.length > 0; + }); + + dre.state.getCouncilK8sImage = (async function (must: M = true as M) { + return dre.state.getProperties( + { + image: "councilK8s.image.image", + tag: "councilK8s.image.tag", + registryHostname: "councilK8s.image.registryHostname", + }, + "councilK8s", + CouncilK8sStateImage, + must, + ); + }); + + dre.state.getCouncilK8sRunning = (async function (must: M = true as M) { + return dre.state.getProperties( + { + helmReleases: "councilK8s.running.helmReleases", + }, + "councilK8s", + CouncilK8sStateRunning, + must, + ); + }); + + dre.state.getCouncilK8sState = (async function (must: M = true as M) { + return dre.state.getProperties( + 'councilK8s', + "councilK8s", + CouncilK8sState, + must, + ); + }); +}; diff --git a/src/commands/council-k8s/up.ts b/src/commands/council-k8s/up.ts new file mode 100644 index 00000000..5426a6c4 --- /dev/null +++ b/src/commands/council-k8s/up.ts @@ -0,0 +1,106 @@ +import { command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + createNamespaceIfNotExists, + getNamespacedDeployedHelmReleases, +} from "@devnet/k8s"; +import { DevNetError } from "@devnet/utils"; + +import { DockerRegistryPushPullSecretToK8s } from "../docker-registry/push-pull-secret-to-k8s.js"; +import { CouncilK8sBuild } from "./build.js"; +import { NAMESPACE } from "./constants/council-k8s.constants.js"; +import { councilK8sExtension } from "./extensions/council-k8s.extension.js"; + +export const CouncilK8sUp = command.cli({ + description: "Start Council(s) in K8s", + params: {}, + extensions: [councilK8sExtension], + async handler({ dre, dre: { logger, services, state } }) { + const { council } = services; + + if (!(await state.isChainDeployed())) { + throw new DevNetError("Chain is not deployed"); + } + + if (!(await state.isLidoDeployed())) { + throw new DevNetError("Lido is not deployed"); + } + + if (!(await state.isCSMDeployed())) { + throw new DevNetError("CSM is not deployed"); + } + + if (!(await state.isKapiK8sRunning())) { + throw new DevNetError("KAPI is not deployed"); + } + + await dre.runCommand(CouncilK8sBuild, {}); + + const { council1, council2 } = await state.getNamedWallet(); + const { elPrivate } = await state.getChain(); + const { locator } = await state.getLido(); + const { privateUrl } = await state.getKapiK8sRunning(); + const { image, tag, registryHostname } = await state.getCouncilK8sImage(); + + const { address: dataBusAddress } = await state.getDataBus(); + + const env: Record = { + PORT: "9040", + LOG_LEVEL: "debug", + LOG_FORMAT: "json", + RPC_URL: elPrivate, + KEYS_API_HOST: privateUrl.replace(":3000", ""), // TODO make more beautiful + KEYS_API_PORT: "3000", + PUBSUB_SERVICE: "evm-chain", + EVM_CHAIN_DATA_BUS_ADDRESS: dataBusAddress, + EVM_CHAIN_DATA_BUS_PROVIDER_URL: elPrivate, + LOCATOR_DEVNET_ADDRESS: locator, + }; + + const helmReleases = [ + { HELM_RELEASE: 'lido-council-1', privateKey: council1.privateKey }, + { HELM_RELEASE: 'lido-council-2', privateKey: council2.privateKey }, + ]; + + for (const release of helmReleases) { + const { HELM_RELEASE, privateKey } = release; + + const alreadyDeployedHelmReleases = await getNamespacedDeployedHelmReleases(NAMESPACE(dre)); + if (alreadyDeployedHelmReleases?.includes(HELM_RELEASE)) { + logger.log(`Council release ${HELM_RELEASE} already running`); + continue; + } + + const helmLidoCouncilSh = council.sh({ + env: { + ...env, + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + IMAGE: image, + TAG: tag, + REGISTRY_HOSTNAME: registryHostname, + WALLET_PRIVATE_KEY: privateKey, + }, + }); + + await createNamespaceIfNotExists(NAMESPACE(dre)); + + await dre.runCommand(DockerRegistryPushPullSecretToK8s, { namespace: NAMESPACE(dre) }); + + await helmLidoCouncilSh`make debug`; + await helmLidoCouncilSh`make lint`; + + try { + await helmLidoCouncilSh`make install`; + } catch { + // rollback changes + await helmLidoCouncilSh`make uninstall`; + } + } + + await state.updateCouncilK8sRunning({ + helmReleases: ['active'], + }); + }, +}); diff --git a/src/commands/council/down.ts b/src/commands/council/down.ts deleted file mode 100644 index 3c98efd9..00000000 --- a/src/commands/council/down.ts +++ /dev/null @@ -1,14 +0,0 @@ -import { command } from "@devnet/command"; - -export const CouncilDown = command.cli({ - description: "Stop Council", - params: {}, - async handler({ dre: { logger, services } }) { - const { council } = services; - - logger.log("Stopping Council..."); - - await council.sh`docker compose -f docker-compose.devnet.yml down -v`; - logger.log("Council stopped successfully."); - }, -}); diff --git a/src/commands/council/logs.ts b/src/commands/council/logs.ts deleted file mode 100644 index efc668cc..00000000 --- a/src/commands/council/logs.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { command } from "@devnet/command"; - -export const CouncilLogs = command.cli({ - description: "Show Council logs", - params: {}, - async handler({ dre: { services } }) { - const { council } = services; - - await council.sh`docker compose -f docker-compose.devnet.yml logs -f council_daemon`; - }, -}); diff --git a/src/commands/council/up.ts b/src/commands/council/up.ts deleted file mode 100644 index da53ca6f..00000000 --- a/src/commands/council/up.ts +++ /dev/null @@ -1,42 +0,0 @@ -import { command } from "@devnet/command"; - -export const CouncilUp = command.cli({ - description: "Start Council", - params: {}, - async handler({ dre: { logger, services, state, network } }) { - const { council } = services; - - const { council1, council2 } = await state.getNamedWallet(); - const { elPrivate } = await state.getChain(); - const { locator } = await state.getLido(); - - const { address: dataBusAddress } = await state.getDataBus(); - - const env = { - PORT_1: "9040", - PORT_2: "9041", - LOG_LEVEL: "debug", - LOG_FORMAT: "json", - RPC_URL: elPrivate, - WALLET_PRIVATE_KEY_1: council1.privateKey, - WALLET_PRIVATE_KEY_2: council2.privateKey, - KEYS_API_HOST: "http://keys_api", - KEYS_API_PORT: "9030", - PUBSUB_SERVICE: "evm-chain", - EVM_CHAIN_DATA_BUS_ADDRESS: dataBusAddress, - EVM_CHAIN_DATA_BUS_PROVIDER_URL: elPrivate, - RABBITMQ_URL: "ws://dsm_rabbit:15674/ws", - RABBITMQ_LOGIN: "guest", - RABBITMQ_PASSCODE: "guest", - LOCATOR_DEVNET_ADDRESS: `"${locator}"`, - DOCKER_NETWORK_NAME: `kt-${network.name}`, - COMPOSE_PROJECT_NAME: `council-${network.name}`, - }; - - await council.writeENV(".env", env); - - await council.sh`docker compose -f docker-compose.devnet.yml up --build -d`; - - logger.log("Council started successfully."); - }, -}); diff --git a/src/commands/csm-prover-tool-k8s/build.ts b/src/commands/csm-prover-tool-k8s/build.ts new file mode 100644 index 00000000..4db605dd --- /dev/null +++ b/src/commands/csm-prover-tool-k8s/build.ts @@ -0,0 +1,31 @@ +import { command } from "@devnet/command"; +import { buildAndPushDockerImage } from "@devnet/docker"; + +export const CSMProverToolK8sBuild = command.cli({ + description: "Build CSM Prover Tool and push to Docker registry", + params: {}, + async handler({ dre: { state, network, services, logger } }) { + const dockerRegistry = await state.getDockerRegistry(); + + const TAG = `kt-${network.name}`; + const IMAGE = `lido/csm-prover-tool`; + + await buildAndPushDockerImage({ + cwd: services.csmProverTool.artifact.root, + registryHostname: dockerRegistry.registryHostname, + buildContext: '.', + imageName: IMAGE, + tag: TAG, + password: process.env.DOCKER_REGISTRY_PASSWORD ?? 'admin', + username: process.env.DOCKER_REGISTRY_USERNAME ?? 'changeme', + }); + + logger.log(`csm-prover-tool image pushed to ${dockerRegistry.registryUrl}/${IMAGE}:${TAG}`); + + await state.updateCSMProverToolK8sImage({ + tag: TAG, + image: IMAGE, + registryHostname: dockerRegistry.registryHostname, + }) + }, +}); diff --git a/src/commands/csm-prover-tool-k8s/constants/csm-prover-tool-k8s.constants.ts b/src/commands/csm-prover-tool-k8s/constants/csm-prover-tool-k8s.constants.ts new file mode 100644 index 00000000..c4dddbad --- /dev/null +++ b/src/commands/csm-prover-tool-k8s/constants/csm-prover-tool-k8s.constants.ts @@ -0,0 +1,6 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; + +export const NAMESPACE = (dre: DevNetRuntimeEnvironmentInterface) => + `kt-${dre.network.name}-csm-prover-tool`; + +export const SERVICE_NAME = "CSM Prover Tool"; diff --git a/src/commands/csm-prover-tool-k8s/down.ts b/src/commands/csm-prover-tool-k8s/down.ts new file mode 100644 index 00000000..f0d2c4a2 --- /dev/null +++ b/src/commands/csm-prover-tool-k8s/down.ts @@ -0,0 +1,51 @@ +import { Params, command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + deleteNamespace, + getNamespacedDeployedHelmReleases, +} from "@devnet/k8s"; + +import { NAMESPACE, SERVICE_NAME } from "./constants/csm-prover-tool-k8s.constants.js"; + +export const CSMProverToolK8sDown = command.cli({ + description: `Stop ${SERVICE_NAME} in K8s with Helm`, + params: { + force: Params.boolean({ + description: `Do not check that the ${SERVICE_NAME} was already stopped`, + default: false, + required: false, + }), + }, + async handler({ dre, dre: { services: { csmProverTool }, logger, state }, params }) { + if (!(await state.isCSMProverToolK8sRunning()) && !(params.force)) { + logger.log(`${SERVICE_NAME} not running. Skipping`); + return; + } + + const releases = await getNamespacedDeployedHelmReleases(NAMESPACE(dre)); + + if (releases.length === 0) { + logger.log(`No ${SERVICE_NAME} releases found in namespace [${NAMESPACE(dre)}]. Skipping...`); + return; + } + + const HELM_RELEASE = releases[0]; + const helmSh = csmProverTool.sh({ + env: { + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + }, + }); + + await helmSh`make debug`; + await helmSh`make lint`; + await helmSh`make uninstall`; + + logger.log(`${SERVICE_NAME} stopped.`); + + await deleteNamespace(NAMESPACE(dre)); + + await state.removeCSMProverToolK8sState(); + }, +}); diff --git a/src/commands/csm-prover-tool-k8s/extensions/csm-prover-tool-k8s.extension.ts b/src/commands/csm-prover-tool-k8s/extensions/csm-prover-tool-k8s.extension.ts new file mode 100644 index 00000000..eca21bff --- /dev/null +++ b/src/commands/csm-prover-tool-k8s/extensions/csm-prover-tool-k8s.extension.ts @@ -0,0 +1,113 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { isEmptyObject } from "@devnet/utils"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getCSMProverToolK8sImage(must?: M,): Promise>; + getCSMProverToolK8sRunning(must?: M,): Promise>; + getCSMProverToolK8sState(must?: M,): Promise>; + + isCSMProverToolK8sImageReady(): Promise; + isCSMProverToolK8sRunning(): Promise; + + removeCSMProverToolK8sState(): Promise; + + updateCSMProverToolK8sImage(state: CSMProverToolK8sStateImage): Promise; + updateCSMProverToolK8sRunning(state: CSMProverToolK8sStateRunning): Promise; + } + + export interface Config { + CSMProverToolK8s: CSMProverToolK8sState; + } +} + +export const CSMProverToolK8sStateImage = z.object({ + image: z.string(), + tag: z.string(), + registryHostname: z.string() +}); + +export type CSMProverToolK8sStateImage = z.infer; + +export const CSMProverToolK8sStateRunning = z.object({ + helmRelease: z.string(), +}); + +export type CSMProverToolK8sStateRunning = z.infer; + +export const CSMProverToolK8sState = z.object({ + image: CSMProverToolK8sStateImage.optional(), + running: CSMProverToolK8sStateRunning.optional(), +}); + +export type CSMProverToolK8sState = z.infer; + +export const CSMProverToolK8sExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateCSMProverToolK8sImage = (async function (stateImage: CSMProverToolK8sStateImage) { + const state = await dre.state.getCSMProverToolK8sState(false); + await dre.state.updateProperties("CSMProverToolK8s", { ...state, image: stateImage }); + }); + + dre.state.updateCSMProverToolK8sRunning = (async function (stateRunning: CSMProverToolK8sStateRunning) { + const state = await dre.state.getCSMProverToolK8sState(false); + await dre.state.updateProperties("CSMProverToolK8s", { ...state, running: stateRunning }); + }); + + dre.state.removeCSMProverToolK8sState = (async function () { + await dre.state.updateProperties("CSMProverToolK8s", {}); + }); + + dre.state.isCSMProverToolK8sImageReady = (async function () { + const state = await dre.state.getCSMProverToolK8sImage(false); + return state && !isEmptyObject(state) && (state.image !== undefined); + }); + + dre.state.isCSMProverToolK8sRunning = (async function () { + const state = await dre.state.getCSMProverToolK8sRunning(false); + return state && !isEmptyObject(state) && (state.helmRelease !== undefined); + }); + + dre.state.getCSMProverToolK8sImage = (async function (must: M = true as M) { + return dre.state.getProperties( + { + image: "CSMProverToolK8s.image.image", + tag: "CSMProverToolK8s.image.tag", + registryHostname: "CSMProverToolK8s.image.registryHostname", + }, + "CSMProverToolK8s", + CSMProverToolK8sStateImage, + must, + ); + }); + + dre.state.getCSMProverToolK8sRunning = (async function (must: M = true as M) { + return dre.state.getProperties( + { + helmRelease: "CSMProverToolK8s.running.helmRelease", + }, + "CSMProverToolK8s", + CSMProverToolK8sStateRunning, + must, + ); + }); + + dre.state.getCSMProverToolK8sState = (async function (must: M = true as M) { + return dre.state.getProperties( + 'CSMProverToolK8s', + "CSMProverToolK8s", + CSMProverToolK8sState, + must, + ); + }); +}; diff --git a/src/commands/csm-prover-tool-k8s/up.ts b/src/commands/csm-prover-tool-k8s/up.ts new file mode 100644 index 00000000..54a4cdef --- /dev/null +++ b/src/commands/csm-prover-tool-k8s/up.ts @@ -0,0 +1,87 @@ +import { command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { createNamespaceIfNotExists } from "@devnet/k8s"; +import { DevNetError } from "@devnet/utils"; + +import { DockerRegistryPushPullSecretToK8s } from "../docker-registry/push-pull-secret-to-k8s.js"; +import { CSMProverToolK8sBuild } from "./build.js"; +import { NAMESPACE, SERVICE_NAME } from "./constants/csm-prover-tool-k8s.constants.js"; +import { CSMProverToolK8sExtension } from "./extensions/csm-prover-tool-k8s.extension.js"; + +export const CSMProverToolK8sUp = command.cli({ + description: `Start ${SERVICE_NAME} on K8s with Helm`, + params: {}, + extensions: [CSMProverToolK8sExtension], + async handler({ dre, dre: { state, services: { csmProverTool }, logger } }) { + if (await state.isCSMProverToolK8sRunning()) { + logger.log(`${SERVICE_NAME} already running`); + return; + } + + if (!(await state.isChainDeployed())) { + throw new DevNetError("Chain is not deployed"); + } + + if (!(await state.isLidoDeployed())) { + throw new DevNetError("Lido is not deployed"); + } + + if (!(await state.isKapiK8sRunning())) { + throw new DevNetError("KAPI is not running"); + } + + if (!(await state.isCSMDeployed())) { + throw new DevNetError("CSM is not deployed"); + } + + await dre.runCommand(CSMProverToolK8sBuild, {}); + + if (!(await state.isCSMProverToolK8sImageReady())) { + throw new DevNetError(`${SERVICE_NAME} image is not ready`); + } + + const { elPrivate, clPrivate } = await state.getChain(); + const { verifier: csVerifier, module: csModule } = await state.getCSM(); + const { privateUrl: kapiPrivateUrl } = await state.getKapiK8sRunning(); + const { deployer } = await state.getNamedWallet(); + const { image, tag, registryHostname } = await state.getCSMProverToolK8sImage(); + const env: Record = { + ...csmProverTool.config.constants, + + CHAIN_ID: "32382", + EL_RPC_URLS: elPrivate, + CL_API_URLS: clPrivate, + KEYSAPI_API_URLS: kapiPrivateUrl, + CSM_ADDRESS: csModule, + VERIFIER_ADDRESS: csVerifier, + TX_SIGNER_PRIVATE_KEY: deployer.privateKey, + }; + + const HELM_RELEASE = 'lido-csm-prover-tool'; + const helmSh = csmProverTool.sh({ + env: { + ...env, + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + IMAGE: image, + TAG: tag, + REGISTRY_HOSTNAME: registryHostname, + }, + }); + + await createNamespaceIfNotExists(NAMESPACE(dre)); + + await dre.runCommand(DockerRegistryPushPullSecretToK8s, { namespace: NAMESPACE(dre) }); + + await helmSh`make debug`; + await helmSh`make lint`; + await helmSh`make install`; + + await state.updateCSMProverToolK8sRunning({ + helmRelease: HELM_RELEASE, + }); + + logger.log(`${SERVICE_NAME} started.`); + }, +}); diff --git a/src/commands/csm/activate.ts b/src/commands/csm/activate.ts index 4632ee27..b202b03c 100644 --- a/src/commands/csm/activate.ts +++ b/src/commands/csm/activate.ts @@ -44,11 +44,18 @@ export const ActivateCSM = command.cli({ const csmState = await dre.state.getCSM(); const clClient = await network.getCLClient(); + if (await state.isCSMActivated()) { + logger.log("CSM already activated"); + return; + } + await dre.network.waitEL(); const { HASH_CONSENSUS_CSM_EPOCHS_PER_FRAME } = oracle.config.constants; - const currentEpoch = await clClient.getHeadEpoch(); + let currentEpoch = await clClient.getHeadEpoch(); + // Ensure a minimum epoch for having non-zero block roots from CL state on initial epoch. + currentEpoch = Math.max(currentEpoch, 256); // SLOTS_PER_HISTORICAL_ROOT / SLOTS_PER_EPOCH const initialEpoch = HASH_CONSENSUS_CSM_EPOCHS_PER_FRAME + currentEpoch + 2; const env: CSMActivateENV = { @@ -72,5 +79,7 @@ export const ActivateCSM = command.cli({ logger.log("Deploying and configuring csm components..."); await lidoCLI.sh({ env })`./run.sh omnibus script devnetCSMStart`; + + await state.updateCSMActivated({ active: true }); }, }); diff --git a/src/commands/csm/add-operator.ts b/src/commands/csm/add-operator.ts index 7150ecc0..184801bb 100644 --- a/src/commands/csm/add-operator.ts +++ b/src/commands/csm/add-operator.ts @@ -14,6 +14,8 @@ export const LidoAddCSMOperatorWithKeys = command.cli({ const { services } = dre; const { lidoCLI } = services; + // TODO check if operator already exists + await dre.network.waitEL(); await lidoCLI.sh`./run.sh csm add-operator-with-keys-from-file generated-keys/${params.name}.json`; diff --git a/src/commands/csm/add-verifier.ts b/src/commands/csm/add-verifier.ts index ca28b5e7..9835d065 100644 --- a/src/commands/csm/add-verifier.ts +++ b/src/commands/csm/add-verifier.ts @@ -30,7 +30,7 @@ export const DeployCSVerifier = command.cli({ data: { ELECTRA_FORK_EPOCH, SLOTS_PER_EPOCH }, } = await clClient.getConfig(); - const blockscoutConfig = await state.getBlockScout(); + const blockscoutConfig = await state.getBlockscout(); const env = { ARTIFACTS_DIR: constants.ARTIFACTS_DIR, diff --git a/src/commands/csm/deploy.ts b/src/commands/csm/deploy.ts index 939f4b51..d095f0a7 100644 --- a/src/commands/csm/deploy.ts +++ b/src/commands/csm/deploy.ts @@ -2,6 +2,7 @@ import { Params, command } from "@devnet/command"; import { CSMInstall } from "./install.js"; import { CSMUpdateState } from "./update-state.js"; +import { csmExtension } from "./extensions/csm.extension.js"; type CSMENVConfig = { FOUNDRY_PROFILE: string; @@ -16,10 +17,12 @@ type CSMENVConfig = { CSM_ORACLE_2_ADDRESS: string; CSM_ORACLE_3_ADDRESS: string; CSM_SECOND_ADMIN_ADDRESS: string; + CSM_STAKING_MODULE_ID: string; DEPLOY_CONFIG: string; DEPLOYER_PRIVATE_KEY: string; DEVNET_CHAIN_ID: string; DEVNET_ELECTRA_EPOCH: string; + DEVNET_CAPELLA_EPOCH: string; DEVNET_GENESIS_TIME: string; DEVNET_SLOTS_PER_EPOCH: string; EVM_SCRIPT_EXECUTOR_ADDRESS: string; @@ -27,6 +30,7 @@ type CSMENVConfig = { UPGRADE_CONFIG: string; VERIFIER_API_KEY: string; VERIFIER_URL: string; + FOUNDRY_BLOCK_GAS_LIMIT: string; }; export const DeployCSMContracts = command.cli({ @@ -37,6 +41,7 @@ export const DeployCSMContracts = command.cli({ description: "Verify smart contracts", }), }, + extensions: [csmExtension], async handler({ params, dre, dre: { logger } }) { const { state, services, network } = dre; const { csm, oracle } = services; @@ -44,6 +49,11 @@ export const DeployCSMContracts = command.cli({ config: { constants }, } = csm; + if (await state.isCSMDeployed()) { + logger.log("CSM contracts are already deployed."); + return; + } + await dre.network.waitEL(); const { agent, locator, treasury } = await state.getLido(); @@ -51,6 +61,7 @@ export const DeployCSMContracts = command.cli({ const { deployer, secondDeployer, oracle1, oracle2, oracle3 } = await state.getNamedWallet(); + await network.waitCL(); const clClient = await network.getCLClient(); const { @@ -58,10 +69,10 @@ export const DeployCSMContracts = command.cli({ } = await clClient.getGenesis(); const { - data: { ELECTRA_FORK_EPOCH, SLOTS_PER_EPOCH }, + data: { ELECTRA_FORK_EPOCH, SLOTS_PER_EPOCH, CAPELLA_FORK_EPOCH }, } = await clClient.getConfig(); - const blockscoutConfig = await state.getBlockScout(); + const blockscoutConfig = await state.getBlockscout(); const env: CSMENVConfig = { FOUNDRY_PROFILE: constants.FOUNDRY_PROFILE, @@ -77,6 +88,8 @@ export const DeployCSMContracts = command.cli({ CSM_ORACLE_3_ADDRESS: oracle3.publicKey, CSM_SECOND_ADMIN_ADDRESS: secondDeployer.publicKey, + CSM_STAKING_MODULE_ID: constants.CSM_STAKING_MODULE_ID, + DEVNET_CAPELLA_EPOCH: CAPELLA_FORK_EPOCH, DEPLOY_CONFIG: constants.DEPLOY_CONFIG, DEPLOYER_PRIVATE_KEY: deployer.privateKey, DEVNET_CHAIN_ID: "32382", @@ -90,6 +103,7 @@ export const DeployCSMContracts = command.cli({ VERIFIER_API_KEY: constants.VERIFIER_API_KEY, VERIFIER_URL: blockscoutConfig.api, + FOUNDRY_BLOCK_GAS_LIMIT: "1000000000" }; logger.logJson(env); diff --git a/src/commands/csm/extensions/csm.extension.ts b/src/commands/csm/extensions/csm.extension.ts new file mode 100644 index 00000000..a8b8d90b --- /dev/null +++ b/src/commands/csm/extensions/csm.extension.ts @@ -0,0 +1,124 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { isEmptyObject } from "@devnet/utils"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getCSM( + must?: M, + ): Promise>; + getCSMActivated( + must?: M + ): Promise>; + getElectraVerifier( + must?: M, + ): Promise>; + + isCSMActivated(): Promise; + isCSMDeployed(): Promise; + updateCSM(state: CSMState): Promise; + updateCSMActivated(state: CSMActiveState): Promise; + updateElectraVerifier(state: CSMNewVerifierState): Promise; + } + + export interface Config { + csm: CSMState; + csmActive: CSMActiveState; + csmNewVerifier: CSMNewVerifierState; + } +} + +export const CSMState = z.object({ + accounting: z.string(), + earlyAdoption: z.string(), + feeDistributor: z.string(), + feeOracle: z.string(), + gateSeal: z.string(), + hashConsensus: z.string(), + lidoLocator: z.string(), + module: z.string(), + verifier: z.string(), + permissionlessGate: z.string(), +}); + +export type CSMState = z.infer; + +export const CSMActiveState = z.object({ + active: z.boolean(), +}); + +export type CSMActiveState = z.infer; + +export const CSMNewVerifierState = z.object({ + CSVerifier: z.string(), +}); + +export type CSMNewVerifierState = z.infer; + +export const csmExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateCSM = (async function (state: CSMState) { + await this.updateProperties("csm", state); + }); + + dre.state.updateCSMActivated = (async function (state: CSMActiveState) { + await this.updateProperties("csmActive", state); + }); + + dre.state.getCSM = (async function (must: M = true as M) { + return this.getProperties( + { + accounting: "csm.CSAccounting", + earlyAdoption: "csm.CSEarlyAdoption", + feeDistributor: "csm.CSFeeDistributor", + feeOracle: "csm.CSFeeOracle", + gateSeal: "csm.GateSeal", + hashConsensus: "csm.HashConsensus", + lidoLocator: "csm.LidoLocator", + module: "csm.CSModule", + verifier: "csm.CSVerifier", + permissionlessGate: "csm.PermissionlessGate", + }, + "csm", + CSMState, + must, + ); + }); + + dre.state.isCSMDeployed = (async function () { + const state = await this.getCSM(false); + return !isEmptyObject(state) && state.module !== undefined; + }); + + dre.state.isCSMActivated = (async function () { + const state = await this.getCSMActivated(false); + return !isEmptyObject(state) && state.active === true; + }); + + dre.state.getElectraVerifier = (async function(must: M = true as M) { + return this.getProperties( + { + CSVerifier: "electraVerifier.CSVerifier", + }, + "csmNewVerifier", + CSMNewVerifierState, + must, + ); + }); + + dre.state.updateElectraVerifier = (async function(state: CSMNewVerifierState) { + await this.updateProperties("csmNewVerifier", { electraVerifier: state }); + }); + + dre.state.getCSMActivated = (async function(must: M = true as M) { + return this.getProperties( + "csmActive", + "csmActive", + CSMActiveState, + must, + ); + }); +}; diff --git a/src/commands/data-bus/deploy.ts b/src/commands/data-bus/deploy.ts index 63dba7ba..29aa410e 100644 --- a/src/commands/data-bus/deploy.ts +++ b/src/commands/data-bus/deploy.ts @@ -11,7 +11,7 @@ export const DataBusDeploy = command.cli({ const { deployer } = await state.getNamedWallet(); const { elPublic } = await state.getChain(); - const { api, url } = await state.getBlockScout(); + const { api, url } = await state.getBlockscout(); await dre.runCommand(DataBusInstall, {}); diff --git a/src/commands/docker-registry/constants/docker-registry.constants.ts b/src/commands/docker-registry/constants/docker-registry.constants.ts new file mode 100644 index 00000000..396e3947 --- /dev/null +++ b/src/commands/docker-registry/constants/docker-registry.constants.ts @@ -0,0 +1,5 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +export const NAMESPACE = (dre: DevNetRuntimeEnvironmentInterface) => + `lido-devnet-docker-registry`; diff --git a/src/commands/docker-registry/down.ts b/src/commands/docker-registry/down.ts new file mode 100644 index 00000000..143e5acc --- /dev/null +++ b/src/commands/docker-registry/down.ts @@ -0,0 +1,39 @@ +import { Params, command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { deleteNamespace } from "@devnet/k8s"; + +import { NAMESPACE } from "./constants/docker-registry.constants.js"; + +export const DockerRegistryDown = command.cli({ + description: "Down Docker registry in k8s", + params: { + force: Params.boolean({ + description: "Do not check that the registry was already stopped", + default: false, + required: false, + }), + }, + async handler({ dre, dre: { logger }, params }) { + const { + state, + services: { dockerRegistry }, + } = dre; + + const dockerRegistrySh = dockerRegistry.sh({ + env: { + NAMESPACE: NAMESPACE(dre), + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + }, + }); + + await dockerRegistrySh`make debug`; + await dockerRegistrySh`make lint`; + await dockerRegistrySh`make uninstall`; + + logger.log("Docker registry stopped."); + + await deleteNamespace(NAMESPACE(dre)); + + await state.removeDockerRegistry(); + }, +}); diff --git a/src/commands/docker-registry/extensions/docker-registry.extension.ts b/src/commands/docker-registry/extensions/docker-registry.extension.ts new file mode 100644 index 00000000..9689d106 --- /dev/null +++ b/src/commands/docker-registry/extensions/docker-registry.extension.ts @@ -0,0 +1,86 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +import { getK8s, k8s } from "@devnet/k8s"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { DevNetError } from "@devnet/utils"; +import { z } from "zod"; + +const isEmpty = (obj: object): obj is Record => { + for (const prop in obj) { + if (Object.hasOwn(obj, prop)) { + return false; + } + } + + return true; +} + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getDockerRegistry(must?: M,): Promise>; + getDockerRegistryType(): Promise<'external' | 'local'>; + isDockerRegistryAvailable(): Promise; + removeDockerRegistry(): Promise; + updateDockerRegistry(state: DockerRegistryState): Promise; + } + + export interface Config { + dockerRegistry: DockerRegistryState; + } +} + +export const DockerRegistryState = z.object({ + uiUrl: z.string().url(), + registryHostname: z.string(), + registryUrl: z.string().url(), +}); + +export type DockerRegistryState = z.infer; + +export const dockerRegistryExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateDockerRegistry = (async function (state: DockerRegistryState) { + await dre.state.updateProperties("dockerRegistry", state); + }); + + dre.state.removeDockerRegistry = (async function () { + await dre.state.updateProperties("dockerRegistry", {}); + }); + + dre.state.isDockerRegistryAvailable = (async function () { + const state = await dre.state.getDockerRegistry(false); + return state && !isEmpty(state); + }); + + dre.state.getDockerRegistryType = (async function () { + const registryType = process.env.DOCKER_REGISTRY_TYPE ?? 'local'; + + return registryType === 'local' ? 'local' : 'external'; + }); + + dre.state.getDockerRegistry = (async function (must: M = true as M) { + + const registryType = await dre.state.getDockerRegistryType(); + + if (registryType === 'local') { + return await dre.state.getProperties( + "dockerRegistry", + "dockerRegistry", + DockerRegistryState, + must, + ); + } + + const registryHostname = process.env.DOCKER_REGISTRY_EXTERNAL_HOSTNAME; + + if (!registryHostname) { + throw new DevNetError(`DOCKER_REGISTRY_EXTERNAL_HOSTNAME env variable is not set`); + } + + return { + registryHostname, + registryUrl: `https://${registryHostname}`, + uiUrl: `https://${process.env.DOCKER_REGISTRY_EXTERNAL_UI_HOSTNAME}`, + }; + }); +}; diff --git a/src/commands/docker-registry/info.ts b/src/commands/docker-registry/info.ts new file mode 100644 index 00000000..90824388 --- /dev/null +++ b/src/commands/docker-registry/info.ts @@ -0,0 +1,26 @@ +import { command } from "@devnet/command"; + +export const DockerRegistryGetInfo = command.isomorphic({ + description: + "Retrieves and displays information about the docker registry service.", + params: {}, + async handler({ dre: { logger, state } }) { + const dockerRegistryDeployed = await state.isDockerRegistryAvailable(); + if (!dockerRegistryDeployed) { + logger.log(`Docker registry service is not enabled`); + return; + } + + logger.log(""); + + const dockerRegistryInfo = await state.getDockerRegistry(); + + logger.table( + ["Service", "URL"], + [ + ["docker registry ui", dockerRegistryInfo.uiUrl!], + ["docker registry", dockerRegistryInfo.registryUrl!], + ], + ); + }, +}); diff --git a/src/commands/docker-registry/push-pull-secret-to-k8s.ts b/src/commands/docker-registry/push-pull-secret-to-k8s.ts new file mode 100644 index 00000000..58d02dd1 --- /dev/null +++ b/src/commands/docker-registry/push-pull-secret-to-k8s.ts @@ -0,0 +1,40 @@ +import { Params, command } from "@devnet/command"; +import { getK8s, k8s } from "@devnet/k8s"; + +import { NAMESPACE } from "./constants/docker-registry.constants.js"; +import { registryPullSecretTmpl } from "./templates/registry-pull-secret.template.js"; + +export const DockerRegistryPushPullSecretToK8s = command.cli({ + description: "Push pull secret to k8s", + params: { + namespace: Params.string({ + description: "Namespace to use", + default: '', + required: false, + }), + }, + async handler({ dre, dre: { logger }, params }) { + if (!(await dre.state.isDockerRegistryAvailable())) { + logger.log("Docker registry already stopped."); + return; + } + + const CUSTOM_NAMESPACE = params.namespace && params.namespace !== '' + ? params.namespace + : NAMESPACE(dre); + + logger.log("Creating registry pull secret..."); + const kc = await getK8s(); + + const k8sCoreApi = kc.makeApiClient(k8s.CoreV1Api); + const pullSecret = await registryPullSecretTmpl(dre, CUSTOM_NAMESPACE); + + try { + // Secret doesn't exist, create it + await k8sCoreApi.createNamespacedSecret({ namespace: pullSecret.metadata.namespace, body: pullSecret }); + logger.log(`Successfully created registry authentication pull secret: ${pullSecret.metadata.name}`); + } catch { + logger.log(`Pull secret ${pullSecret.metadata.name} already exists in namespace [${CUSTOM_NAMESPACE}]. Skipping creation.`); + } + }, +}); diff --git a/src/commands/docker-registry/templates/registry-auth-secret.template.ts b/src/commands/docker-registry/templates/registry-auth-secret.template.ts new file mode 100644 index 00000000..129037d6 --- /dev/null +++ b/src/commands/docker-registry/templates/registry-auth-secret.template.ts @@ -0,0 +1,37 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +import * as k8s from "@kubernetes/client-node"; +import bcrypt from "bcryptjs"; +import { NAMESPACE } from "../constants/docker-registry.constants.js"; + +export const registryAuthSecretTmpl = async ( + dre: DevNetRuntimeEnvironmentInterface +) => { + const username = process.env.DOCKER_REGISTRY_USERNAME; + const password = process.env.DOCKER_REGISTRY_PASSWORD; + + if (!username || !password) { + throw new Error("DOCKER_REGISTRY_USERNAME and DOCKER_REGISTRY_PASSWORD environment variables are required"); + } + + // Generate htpasswd entry using system htpasswd command + const saltRounds = 12; + const hashedPassword = await bcrypt.hash(password, saltRounds); + const htpasswdEntry = `${username}:${hashedPassword}`; + + return { + apiVersion: "v1", + kind: "Secret", + metadata: { + name: "registry-auth-secret", + namespace: NAMESPACE(dre), + labels: { + "com.lido.devnet": "true", + "com.lido.devnet.docker-registry": "secret", + }, + }, + type: "Opaque", + data: { + htpasswd: Buffer.from(htpasswdEntry).toString('base64'), + }, + } satisfies k8s.V1Secret; +}; diff --git a/src/commands/docker-registry/templates/registry-pull-secret.template.ts b/src/commands/docker-registry/templates/registry-pull-secret.template.ts new file mode 100644 index 00000000..a1f79fe2 --- /dev/null +++ b/src/commands/docker-registry/templates/registry-pull-secret.template.ts @@ -0,0 +1,42 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +import * as k8s from "@kubernetes/client-node"; + +import { NAMESPACE } from "../constants/docker-registry.constants.js"; + +export const registryPullSecretTmpl = async ( + dre: DevNetRuntimeEnvironmentInterface, + namespace: string = NAMESPACE(dre), +) => { + const username = process.env.DOCKER_REGISTRY_USERNAME; + const password = process.env.DOCKER_REGISTRY_PASSWORD; + + if (!username || !password) { + throw new Error("DOCKER_REGISTRY_USERNAME and DOCKER_REGISTRY_PASSWORD environment variables are required"); + } + + const registry = await dre.state.getDockerRegistry(); + + return { + apiVersion: "v1", + kind: "Secret", + metadata: { + name: "registry-pull-secret", + namespace: `${namespace}`, + labels: { + "com.lido.devnet": "true", + "com.lido.devnet.docker-registry": "pull-secret", + }, + }, + type: "kubernetes.io/dockerconfigjson", + stringData: { + ".dockerconfigjson": `{ + "auths": { + "${registry.registryHostname}": { + "username": "${username}", + "password": "${password}" + } + } + }`, + }, + } satisfies k8s.V1Secret; +}; diff --git a/src/commands/docker-registry/up.ts b/src/commands/docker-registry/up.ts new file mode 100644 index 00000000..39b08530 --- /dev/null +++ b/src/commands/docker-registry/up.ts @@ -0,0 +1,90 @@ +import { command, Params } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { addPrefixToIngressHostname, getK8s, k8s } from "@devnet/k8s"; + +import { NAMESPACE } from "./constants/docker-registry.constants.js"; +import { dockerRegistryExtension } from "./extensions/docker-registry.extension.js"; +import { registryAuthSecretTmpl } from "./templates/registry-auth-secret.template.js"; + +export const DockerRegistryUp = command.cli({ + description: "Start Docker registry in k8s", + params: { + force: Params.boolean({ + description: "Do not check that the registry was already deployed", + default: false, + required: false, + }), + }, + extensions: [dockerRegistryExtension], + async handler({ dre, dre: { logger }, params }) { + const { + state, + services: { dockerRegistry }, + } = dre; + + if (await dre.state.isDockerRegistryAvailable() && !(params.force)) { + logger.log("Docker registry already deployed."); + return; + } + + if ((await dre.state.getDockerRegistryType()) === 'external' && !(params.force)) { + logger.log("Docker registry is external. Skipping deployment."); + } + + + const DOCKER_REGISTRY_INGRESS_HOSTNAME = + process.env.DOCKER_REGISTRY_LOCAL_INGRESS_HOSTNAME; + const DOCKER_REGISTRY_UI_INGRESS_HOSTNAME = + process.env.DOCKER_REGISTRY_LOCAL_INGRESS_UI_HOSTNAME; + + if (!DOCKER_REGISTRY_INGRESS_HOSTNAME) { + throw new Error(`DOCKER_REGISTRY_LOCAL_INGRESS_HOSTNAME env variable is not set`); + } + + if (!DOCKER_REGISTRY_UI_INGRESS_HOSTNAME) { + throw new Error(`DOCKER_REGISTRY_LOCAL_INGRESS_UI_HOSTNAME env variable is not set`); + } + + // Create and deploy registry authentication secret + logger.log("Creating registry authentication secret..."); + const kc = await getK8s(); + const k8sCoreApi = kc.makeApiClient(k8s.CoreV1Api); + const authSecret = await registryAuthSecretTmpl(dre); + + try { + await k8sCoreApi.createNamespace({ body: { metadata: { name: authSecret.metadata.namespace } } }); + } catch {} + + try { + // Secret doesn't exist, create it + await k8sCoreApi.createNamespacedSecret({ namespace: authSecret.metadata.namespace, body: authSecret }); + logger.log(`Successfully created registry authentication secret: ${authSecret.metadata.name}`); + } catch (error: unknown) { + logger.log(`Secret ${authSecret.metadata.name} already exists. Skipping creation. [${String(error)}]`); + } + + const dockerRegistrySh = dockerRegistry.sh({ + env: { + NAMESPACE: NAMESPACE(dre), + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + // Makefile-related ENV vars for Helm charts overrides + // see workspaces/dockerRegistry/Makefile + DOCKER_REGISTRY_INGRESS_HOSTNAME, + DOCKER_REGISTRY_UI_INGRESS_HOSTNAME, + }, + }); + + await dockerRegistrySh`make debug`; + await dockerRegistrySh`make lint`; + await dockerRegistrySh`make install`; + + const registryHostname = DOCKER_REGISTRY_INGRESS_HOSTNAME; + const registryUrl = `http://${DOCKER_REGISTRY_INGRESS_HOSTNAME}`; + const uiUrl = `http://${DOCKER_REGISTRY_UI_INGRESS_HOSTNAME}`; + + logger.log(`Docker registry UI started on URL: ${uiUrl}`); + logger.log(`Docker registry started on URL: ${registryUrl}`); + + await state.updateDockerRegistry({ registryUrl, uiUrl, registryHostname }); + }, +}); diff --git a/src/commands/dora/README.md b/src/commands/dora/README.md new file mode 100644 index 00000000..903555f3 --- /dev/null +++ b/src/commands/dora/README.md @@ -0,0 +1,7 @@ +# Dora + +## Kurtosis +Kurtosis Dora commands located in `src/commands/kurtosis/dora` folder. + +## Native Dora +TODO diff --git a/src/commands/dora/extensions/dora.extension.ts b/src/commands/dora/extensions/dora.extension.ts new file mode 100644 index 00000000..623c0add --- /dev/null +++ b/src/commands/dora/extensions/dora.extension.ts @@ -0,0 +1,45 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getDora( + must?: M, + ): Promise>; + removeDora(): Promise; + updateDora(state: DoraState): Promise; + } + + export interface Config { + dora: DoraState; + } +} + +export const DoraState = z.object({ + publicUrl: z.string().url() +}); + +export type DoraState = z.infer; + +export const doraExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateDora = (async function (state: DoraState) { + await dre.state.updateProperties("dora", state); + }); + + dre.state.removeDora = (async function () { + await dre.state.updateProperties("dora", {}); + }); + + dre.state.getDora = (async function (must: M = true as M) { + return dre.state.getProperties( + "dora", + "dora", + DoraState, + must, + ); + }); +}; diff --git a/src/commands/down-offchain.ts b/src/commands/down-offchain.ts new file mode 100644 index 00000000..b3970516 --- /dev/null +++ b/src/commands/down-offchain.ts @@ -0,0 +1,42 @@ +import { Params, command } from "@devnet/command"; + +import { CouncilK8sDown } from "./council-k8s/down.js"; +import { DSMBotsK8sDown } from "./dsm-bots-k8s/down.js"; +import { K8sPing } from "./k8s/ping.js"; +import { KapiK8sDown } from "./kapi-k8s/down.js"; +import { OracleK8sDown } from "./oracles-k8s/down.js"; + +export const DevNetStopOffchain = command.cli({ + description: "Stop offchain apps in DevNet", + params: { + force: Params.boolean({ + description: "Do not check that services were already stopped", + default: false, + required: false, + }), + silent: Params.boolean({ + description: "Do not stop on errors", + default: false, + required: false, + }), + }, + async handler({ dre, dre: { logger }, params }) { + logger.log("Stopping DevNet offchain services..."); + + await dre.runCommand(K8sPing, {}); + + const downFns = [ + () => dre.runCommand(KapiK8sDown, { force: params.force }), + () => dre.runCommand(OracleK8sDown, { force: params.force }), + () => dre.runCommand(CouncilK8sDown, { force: params.force }), + () => dre.runCommand(DSMBotsK8sDown, { force: params.force }), + ]; + + for (const fn of downFns) { + await (params.silent ? fn() : fn().catch((error) => logger.warn(error.message))); + } + + + logger.log("DevNet offchain services stopped successfully."); + }, +}); diff --git a/src/commands/down.ts b/src/commands/down.ts index 2bbfbde5..448b9926 100644 --- a/src/commands/down.ts +++ b/src/commands/down.ts @@ -1,37 +1,46 @@ -import { command } from "@devnet/command"; +import { Params, command } from "@devnet/command"; -import { BlockscoutDown } from "./blockscout/down.js"; -import { KurtosisCleanUp } from "./chain/down.js"; -import { CouncilDown } from "./council/down.js"; -import { DSMBotsDown } from "./dsm-bots/down.js"; -import { KapiDown } from "./kapi/down.js"; -import { OracleDown } from "./oracles/down.js"; +import { ChainDown } from "./chain/down.js"; +import { CouncilK8sDown } from "./council-k8s/down.js"; +import { DSMBotsK8sDown } from "./dsm-bots-k8s/down.js"; +import { K8sPing } from "./k8s/ping.js"; +import { KapiK8sDown } from "./kapi-k8s/down.js"; +import { NoWidgetDown } from "./no-widget/down.js"; +import { NoWidgetBackendDown } from "./no-widget-backend/down.js"; +import { OracleK8sDown } from "./oracles-k8s/down.js"; export const DevNetStop = command.cli({ description: "Stop full DevNet", - params: {}, - async handler({ dre, dre: { logger } }) { + params: { + force: Params.boolean({ + description: "Do not check that services were already stopped", + default: false, + required: false, + }), + silent: Params.boolean({ + description: "Do not stop on errors", + default: false, + required: false, + }), + }, + async handler({ dre, dre: { logger }, params }) { logger.log("Stopping DevNet..."); - await dre - .runCommand(BlockscoutDown, {}) - .catch((error) => logger.warn(error.message)); - await dre - .runCommand(KapiDown, {}) - .catch((error) => logger.warn(error.message)); - await dre - .runCommand(OracleDown, {}) - .catch((error) => logger.warn(error.message)); - await dre - .runCommand(CouncilDown, {}) - .catch((error) => logger.warn(error.message)); - await dre - .runCommand(DSMBotsDown, {}) - .catch((error) => logger.warn(error.message)); + await dre.runCommand(K8sPing, {}); + + const downFns = [ + () => dre.runCommand(NoWidgetBackendDown, { force: params.force }), + () => dre.runCommand(NoWidgetDown, { force: params.force }), + () => dre.runCommand(KapiK8sDown, { force: params.force }), + () => dre.runCommand(OracleK8sDown, { force: params.force }), + () => dre.runCommand(CouncilK8sDown, { force: params.force }), + () => dre.runCommand(DSMBotsK8sDown, { force: params.force }), + () => dre.runCommand(ChainDown, {}) + ]; - await dre - .runCommand(KurtosisCleanUp, {}) - .catch((error) => logger.warn(error.message)); + for (const fn of downFns) { + await (params.silent ? fn() : fn().catch((error) => logger.warn(error.message))); + } await dre.clean(); diff --git a/src/commands/dsm-bots-k8s/build.ts b/src/commands/dsm-bots-k8s/build.ts new file mode 100644 index 00000000..3bcd3760 --- /dev/null +++ b/src/commands/dsm-bots-k8s/build.ts @@ -0,0 +1,38 @@ +import { command } from "@devnet/command"; +import { buildAndPushDockerImage } from "@devnet/docker"; + +import { GitCheckout } from "../git/checkout.js"; + +export const DSMBotsK8sBuild = command.cli({ + description: "Build DSM Bot and push to Docker registry", + params: {}, + async handler({ dre, dre: { state, network, services, logger } }) { + const dockerRegistry = await state.getDockerRegistry(); + + const TAG = `kt-${network.name}`; + const IMAGE = `lido/dsm-bot`; + + await dre.runCommand(GitCheckout, { + service: "dsmBots", + ref: "feat/devnet", // TODO make configurable from global yaml config + }); + + await buildAndPushDockerImage({ + cwd: services.dsmBots.artifact.root, + registryHostname: dockerRegistry.registryHostname, + buildContext: '.', + imageName: IMAGE, + tag: TAG, + password: process.env.DOCKER_REGISTRY_PASSWORD ?? 'admin', + username: process.env.DOCKER_REGISTRY_USERNAME ?? 'changeme', + }); + + logger.log(`DSM Bot image pushed to ${dockerRegistry.registryUrl}/${IMAGE}:${TAG}`); + + await state.updateDsmBotsK8sImage({ + tag: TAG, + image: IMAGE, + registryHostname: dockerRegistry.registryHostname, + }) + }, +}); diff --git a/src/commands/dsm-bots-k8s/constants/dsm-bots-k8s.constants.ts b/src/commands/dsm-bots-k8s/constants/dsm-bots-k8s.constants.ts new file mode 100644 index 00000000..d2fe80d4 --- /dev/null +++ b/src/commands/dsm-bots-k8s/constants/dsm-bots-k8s.constants.ts @@ -0,0 +1,4 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; + +export const NAMESPACE = (dre: DevNetRuntimeEnvironmentInterface) => + `kt-${dre.network.name}-dsm-bots`; diff --git a/src/commands/dsm-bots-k8s/down.ts b/src/commands/dsm-bots-k8s/down.ts new file mode 100644 index 00000000..1556631d --- /dev/null +++ b/src/commands/dsm-bots-k8s/down.ts @@ -0,0 +1,52 @@ +import { Params, command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + deleteNamespace, + getNamespacedDeployedHelmReleases, +} from "@devnet/k8s"; + +import { NAMESPACE } from "./constants/dsm-bots-k8s.constants.js"; + +export const DSMBotsK8sDown = command.cli({ + description: "Stop DSM-bots", + params: { + force: Params.boolean({ + description: "Do not check that the DSM Bots were already stopped", + default: false, + required: false, + }), + }, + async handler({ dre, dre: { services: { dsmBots }, state, logger }, params }) { + + if (!(await state.isDsmBotsK8sRunning()) && !(params.force)) { + logger.log("DSM Bots are not running. Skipping"); + return; + } + + const releases = await getNamespacedDeployedHelmReleases(NAMESPACE(dre)); + + if (releases.length === 0) { + logger.log(`No DSM Bots releases found in namespace [${NAMESPACE(dre)}]. Skipping...`); + return; + } + + for (const release of releases) { + const helmLidoDsmBotSh = dsmBots.sh({ + env: { + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE: release, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + }, + }); + + await helmLidoDsmBotSh`make debug`; + await helmLidoDsmBotSh`make lint`; + await helmLidoDsmBotSh`make uninstall`; + logger.log(`DSM Bot [${release}] stopped.`); + } + + await deleteNamespace(NAMESPACE(dre)); + + await state.removeDsmBotsK8sState(); + }, +}); diff --git a/src/commands/dsm-bots-k8s/extensions/dsm-bots-k8s.extension.ts b/src/commands/dsm-bots-k8s/extensions/dsm-bots-k8s.extension.ts new file mode 100644 index 00000000..ae0f3153 --- /dev/null +++ b/src/commands/dsm-bots-k8s/extensions/dsm-bots-k8s.extension.ts @@ -0,0 +1,113 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { isEmptyObject } from "@devnet/utils"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getDsmBotsK8sImage(must?: M,): Promise>; + getDsmBotsK8sRunning(must?: M,): Promise>; + getDsmBotsK8sState(must?: M,): Promise>; + + isDsmBotsK8sImageReady(): Promise; + isDsmBotsK8sRunning(): Promise; + + removeDsmBotsK8sState(): Promise; + + updateDsmBotsK8sImage(state: DsmBotsK8sStateImage): Promise; + updateDsmBotsK8sRunning(state: DsmBotsK8sStateRunning): Promise; + } + + export interface Config { + dsmBotsK8s: DsmBotsK8sState; + } +} + +export const DsmBotsK8sStateImage = z.object({ + image: z.string(), + tag: z.string(), + registryHostname: z.string() +}); + +export type DsmBotsK8sStateImage = z.infer; + +export const DsmBotsK8sStateRunning = z.object({ + helmReleases: z.array(z.string()), +}); + +export type DsmBotsK8sStateRunning = z.infer; + +export const DsmBotsK8sState = z.object({ + image: DsmBotsK8sStateImage.optional(), + running: DsmBotsK8sStateRunning.optional(), +}); + +export type DsmBotsK8sState = z.infer; + +export const dsmBotsK8sExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateDsmBotsK8sImage = (async function (stateImage: DsmBotsK8sStateImage) { + const state = await dre.state.getDsmBotsK8sState(false); + await dre.state.updateProperties("dsmBotsK8s", { ...state, image: stateImage }); + }); + + dre.state.updateDsmBotsK8sRunning = (async function (stateRunning: DsmBotsK8sStateRunning) { + const state = await dre.state.getDsmBotsK8sState(false); + await dre.state.updateProperties("dsmBotsK8s", { ...state, running: stateRunning }); + }); + + dre.state.removeDsmBotsK8sState = (async function () { + await dre.state.updateProperties("dsmBotsK8s", {}); + }); + + dre.state.isDsmBotsK8sImageReady = (async function () { + const state = await dre.state.getDsmBotsK8sImage(false); + return state && !isEmptyObject(state) && (state.image !== undefined); + }); + + dre.state.isDsmBotsK8sRunning = (async function () { + const state = await dre.state.getDsmBotsK8sRunning(false); + return state && !isEmptyObject(state) && (state.helmReleases !== undefined) && state.helmReleases.length > 0; + }); + + dre.state.getDsmBotsK8sImage = (async function (must: M = true as M) { + return dre.state.getProperties( + { + image: "dsmBotsK8s.image.image", + tag: "dsmBotsK8s.image.tag", + registryHostname: "dsmBotsK8s.image.registryHostname", + }, + "dsmBotsK8s", + DsmBotsK8sStateImage, + must, + ); + }); + + dre.state.getDsmBotsK8sRunning = (async function (must: M = true as M) { + return dre.state.getProperties( + { + helmReleases: "dsmBotsK8s.running.helmReleases", + }, + "dsmBotsK8s", + DsmBotsK8sStateRunning, + must, + ); + }); + + dre.state.getDsmBotsK8sState = (async function (must: M = true as M) { + return dre.state.getProperties( + 'dsmBotsK8s', + "dsmBotsK8s", + DsmBotsK8sState, + must, + ); + }); +}; diff --git a/src/commands/dsm-bots-k8s/up.ts b/src/commands/dsm-bots-k8s/up.ts new file mode 100644 index 00000000..1866f1fa --- /dev/null +++ b/src/commands/dsm-bots-k8s/up.ts @@ -0,0 +1,109 @@ +import { command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + createNamespaceIfNotExists, + getNamespacedDeployedHelmReleases, +} from "@devnet/k8s"; +import { DevNetError } from "@devnet/utils"; + +import { DockerRegistryPushPullSecretToK8s } from "../docker-registry/push-pull-secret-to-k8s.js"; +import { DSMBotsK8sBuild } from "./build.js"; +import { NAMESPACE } from "./constants/dsm-bots-k8s.constants.js"; +import { dsmBotsK8sExtension } from "./extensions/dsm-bots-k8s.extension.js"; + + +export const DSMBotsK8sUp = command.cli({ + description: "Start DSM bots in K8s", + params: {}, + extensions: [dsmBotsK8sExtension], + async handler({ dre, dre: { services, state, network, logger } }) { + const { dsmBots } = services; + + if (!(await state.isChainDeployed())) { + throw new DevNetError("Chain is not deployed"); + } + + if (!(await state.isLidoDeployed())) { + throw new DevNetError("Lido is not deployed"); + } + + if (!(await state.isCSMDeployed())) { + throw new DevNetError("CSM is not deployed"); + } + + await dre.runCommand(DSMBotsK8sBuild, {}); + + const { elPrivate } = await state.getChain(); + const { locator } = await state.getLido(); + const { deployer } = await state.getNamedWallet(); + const { image, tag, registryHostname } = await state.getDsmBotsK8sImage(); + + const { address: dataBusAddress } = await state.getDataBus(); + + const DEPOSIT_CONTRACT_ADDRESS = await dre.services.kurtosis.config.getters.DEPOSIT_CONTRACT_ADDRESS(dre.services.kurtosis); + + const env: Record = { + WEB3_RPC_ENDPOINTS: elPrivate, + WALLET_PRIVATE_KEY: deployer.privateKey, + LIDO_LOCATOR: locator, + DEPOSIT_CONTRACT: DEPOSIT_CONTRACT_ADDRESS, + MESSAGE_TRANSPORTS: "onchain_transport", + ONCHAIN_TRANSPORT_ADDRESS: dataBusAddress, + ONCHAIN_TRANSPORT_RPC_ENDPOINTS: elPrivate, + RABBIT_MQ_URL: "ws://dsm_rabbit:15674/ws", + RABBIT_MQ_USERNAME: "guest", + RABBIT_MQ_PASSWORD: "guest", + CREATE_TRANSACTIONS: "true", + DEPOSIT_MODULES_WHITELIST: "1\\,2\\,3", // necessary wrapping for helm + PROMETHEUS_PREFIX: "depositor_bot", + }; + + const helmReleases = [ + { HELM_RELEASE: 'depositor-bot', command: 'depositor', privateKey: deployer.privateKey, }, + { HELM_RELEASE: 'pause-bot', command: 'pauser', privateKey: deployer.privateKey, }, + { HELM_RELEASE: 'unvetter-bot', command: 'unvetter', privateKey: deployer.privateKey, }, + ]; + + for (const release of helmReleases) { + const { HELM_RELEASE, command, privateKey } = release; + + const alreadyDeployedHelmReleases = await getNamespacedDeployedHelmReleases(NAMESPACE(dre)); + if (alreadyDeployedHelmReleases?.includes(HELM_RELEASE)) { + logger.log(`DSM Bot release ${HELM_RELEASE} already running`); + continue; + } + + const helmLidoDsmBotSh = dsmBots.sh({ + env: { + ...env, + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + IMAGE: image, + TAG: tag, + REGISTRY_HOSTNAME: registryHostname, + WALLET_PRIVATE_KEY: privateKey, + COMMAND: command, + }, + }); + + await createNamespaceIfNotExists(NAMESPACE(dre)); + + await dre.runCommand(DockerRegistryPushPullSecretToK8s, { namespace: NAMESPACE(dre) }); + + await helmLidoDsmBotSh`make debug`; + await helmLidoDsmBotSh`make lint`; + + try { + await helmLidoDsmBotSh`make install`; + } catch { + // rollback changes + await helmLidoDsmBotSh`make uninstall`; + } + } + + await state.updateDsmBotsK8sRunning({ + helmReleases: ['active'], + }); + }, +}); diff --git a/src/commands/dsm-bots/down.ts b/src/commands/dsm-bots/down.ts deleted file mode 100644 index 96d0b023..00000000 --- a/src/commands/dsm-bots/down.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { command } from "@devnet/command"; - -export const DSMBotsDown = command.cli({ - description: "Stop DSM-bots", - params: {}, - async handler({ dre: { services } }) { - const { dsmBots } = services; - - await dsmBots.sh`docker compose -f docker-compose.devnet.yml down -v`; - }, -}); diff --git a/src/commands/dsm-bots/logs.ts b/src/commands/dsm-bots/logs.ts deleted file mode 100644 index c4a10835..00000000 --- a/src/commands/dsm-bots/logs.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { command } from "@devnet/command"; - -export const DSMBotsLogs = command.cli({ - description: "Show DSM-bots logs", - params: {}, - async handler({ dre: { services } }) { - const { dsmBots } = services; - - await dsmBots.sh`docker compose -f docker-compose.devnet.yml logs -f`; - }, -}); diff --git a/src/commands/dsm-bots/up.ts b/src/commands/dsm-bots/up.ts deleted file mode 100644 index 94c0e28f..00000000 --- a/src/commands/dsm-bots/up.ts +++ /dev/null @@ -1,36 +0,0 @@ -import { command } from "@devnet/command"; - -export const DSMBotsUp = command.cli({ - description: "Start DSM-bots", - params: {}, - async handler({ dre: { services, state, network } }) { - const { dsmBots } = services; - - const { elPrivate } = await state.getChain(); - const { locator } = await state.getLido(); - const { deployer } = await state.getNamedWallet(); - const { address: dataBusAddress } = await state.getDataBus(); - - const env = { - WEB3_RPC_ENDPOINTS: elPrivate, - WALLET_PRIVATE_KEY: deployer.privateKey, - LIDO_LOCATOR: locator, - DEPOSIT_CONTRACT: "0x00000000219ab540356cBB839Cbe05303d7705Fa", - MESSAGE_TRANSPORTS: "onchain_transport", - ONCHAIN_TRANSPORT_ADDRESS: dataBusAddress, - ONCHAIN_TRANSPORT_RPC_ENDPOINTS: elPrivate, - RABBIT_MQ_URL: "ws://dsm_rabbit:15674/ws", - RABBIT_MQ_USERNAME: "guest", - RABBIT_MQ_PASSWORD: "guest", - CREATE_TRANSACTIONS: "true", - DEPOSIT_MODULES_WHITELIST: "1,2,3", - PROMETHEUS_PREFIX: "depositor_bot", - DOCKER_NETWORK_NAME: `kt-${network.name}`, - COMPOSE_PROJECT_NAME: `dsm-bots-${network.name}`, - }; - - await dsmBots.writeENV(".env", env); - - await dsmBots.sh`docker compose -f docker-compose.devnet.yml up --build -d`; - }, -}); diff --git a/src/commands/git/checkout.ts b/src/commands/git/checkout.ts index e3eaf524..75da48b2 100644 --- a/src/commands/git/checkout.ts +++ b/src/commands/git/checkout.ts @@ -1,12 +1,13 @@ -import { DevNetError, Params, command } from "@devnet/command"; -import { services } from "@devnet/service"; +import { Params, command } from "@devnet/command"; +import { serviceConfigs } from "@devnet/service"; +import { DevNetError } from "@devnet/utils"; export const GitCheckout = command.cli({ description: "Switching the Git branch in the specified service", params: { service: Params.option({ description: "Name of one of the existing services.", - options: Object.keys(services) as (keyof typeof services)[], + options: Object.keys(serviceConfigs) as (keyof typeof serviceConfigs)[], required: true, })(), ref: Params.string({ @@ -48,6 +49,10 @@ export const GitCheckout = command.cli({ await sh`git reset --hard && git clean -fd`; } + // fetching all branches + await sh`git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"`; + await sh`git fetch origin --prune`; + // Check if the branch exists locally const localBranchExists = await sh`git rev-parse --verify refs/heads/${branch}` diff --git a/src/commands/git/pull.ts b/src/commands/git/pull.ts index 626a917d..f198b7e4 100644 --- a/src/commands/git/pull.ts +++ b/src/commands/git/pull.ts @@ -1,12 +1,12 @@ import { Params, command } from "@devnet/command"; -import { services } from "@devnet/service"; +import { serviceConfigs } from "@devnet/service"; export const GitCheckout = command.cli({ description: "Retrieve changes from a Git branch in a specified service.", params: { service: Params.option({ description: "Name of one of the existing services.", - options: Object.keys(services) as (keyof typeof services)[], + options: Object.keys(serviceConfigs) as (keyof typeof serviceConfigs)[], required: true, })(), branch: Params.string({ diff --git a/src/commands/k8s/extensions/k8s.extension.ts b/src/commands/k8s/extensions/k8s.extension.ts new file mode 100644 index 00000000..d7745efd --- /dev/null +++ b/src/commands/k8s/extensions/k8s.extension.ts @@ -0,0 +1,57 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { isEmptyObject } from "@devnet/utils"; +import { z } from "zod"; +import { LidoCoreState } from "../../lido-core/extensions/lido-core.extension.js"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getK8sState(must?: M,): Promise>; + + isK8sRunning(): Promise; + + removeK8sState(): Promise; + + updateK8sState(state: K8sState): Promise; + } + + export interface Config { + k8s: K8sState; + } +} + +export const K8sState = z.object({ + context: z.string(), +}); + +export type K8sState = z.infer; + +export const k8sExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.removeK8sState = (async function () { + await dre.state.updateProperties("k8s", {}); + }); + + dre.state.updateK8sState = (async function (state: K8sState) { + await this.updateProperties("k8s", state); + }); + + dre.state.isK8sRunning = (async function () { + const state = await dre.state.getK8sState(false); + return state && !isEmptyObject(state) && (state.context !== undefined); + }); + + + dre.state.getK8sState = (async function (must: M = true as M) { + return dre.state.getProperties( + 'k8s', + "k8s", + K8sState, + must, + ); + }); +}; diff --git a/src/commands/k8s/ping.ts b/src/commands/k8s/ping.ts new file mode 100644 index 00000000..34b93b34 --- /dev/null +++ b/src/commands/k8s/ping.ts @@ -0,0 +1,14 @@ +import { command } from "@devnet/command"; +import { pingCluster } from "@devnet/k8s"; + +export const K8sPing = command.isomorphic({ + description: + "Checks connectivity to the k8s cluster", + params: {}, + async handler({ dre: { logger } }) { + + await pingCluster(); + + logger.log(`K8s cluster pinged successfully.`); + }, +}); diff --git a/src/commands/k8s/set-default-context.ts b/src/commands/k8s/set-default-context.ts new file mode 100644 index 00000000..1bcf3689 --- /dev/null +++ b/src/commands/k8s/set-default-context.ts @@ -0,0 +1,29 @@ +import { Params, command } from "@devnet/command"; +import { DevNetError } from "@devnet/utils"; +import { execa } from "execa"; + +import { k8sExtension } from "./extensions/k8s.extension.js"; + +export const K8sSetDefaultContext = command.isomorphic({ + description: + "Set k8s default cluster context", + params: { + context: Params.string({ + description: "K8s context", + required: true, + }), + }, + extensions: [k8sExtension], + async handler({ dre: { logger, state }, params }) { + + if (!params.context) { + throw new DevNetError('Context is required'); + } + + execa('kubectl', ['config', 'use-context', params.context], { stdio: 'inherit' }); + + await state.updateK8sState({ context: params.context }); + + logger.log(`K8s cluster context set to [${params.context}]`); + }, +}); diff --git a/src/commands/kapi-k8s/build.ts b/src/commands/kapi-k8s/build.ts new file mode 100644 index 00000000..0f3a97a2 --- /dev/null +++ b/src/commands/kapi-k8s/build.ts @@ -0,0 +1,33 @@ +import { command } from "@devnet/command"; +import { buildAndPushDockerImage } from "@devnet/docker"; + +import { SERVICE_NAME } from "./constants/kapi-k8s.constants.js"; + +export const KapiK8sBuild = command.cli({ + description: `Build ${SERVICE_NAME} and push to Docker registry`, + params: {}, + async handler({ dre, dre: { state, network, services, logger } }) { + const dockerRegistry = await state.getDockerRegistry(); + + const TAG = `kt-${network.name}`; + const IMAGE = 'lido/keys-api'; + + await buildAndPushDockerImage({ + cwd: services.kapi.artifact.root, + registryHostname: dockerRegistry.registryHostname, + buildContext: '.', + imageName: IMAGE, + tag: TAG, + password: process.env.DOCKER_REGISTRY_PASSWORD ?? 'admin', + username: process.env.DOCKER_REGISTRY_USERNAME ?? 'changeme', + }); + + logger.log(`${SERVICE_NAME} image pushed to ${dockerRegistry.registryUrl}/${IMAGE}:${TAG}`); + + await state.updateKapiK8sImage({ + tag: TAG, + image: IMAGE, + registryHostname: dockerRegistry.registryHostname, + }) + }, +}); diff --git a/src/commands/kapi-k8s/constants/kapi-k8s.constants.ts b/src/commands/kapi-k8s/constants/kapi-k8s.constants.ts new file mode 100644 index 00000000..ba5d33c9 --- /dev/null +++ b/src/commands/kapi-k8s/constants/kapi-k8s.constants.ts @@ -0,0 +1,6 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; + +export const NAMESPACE = (dre: DevNetRuntimeEnvironmentInterface) => + `kt-${dre.network.name}-kapi`; + +export const SERVICE_NAME = "Keys API"; diff --git a/src/commands/kapi-k8s/down.ts b/src/commands/kapi-k8s/down.ts new file mode 100644 index 00000000..258cd609 --- /dev/null +++ b/src/commands/kapi-k8s/down.ts @@ -0,0 +1,60 @@ +import { Params, command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + deleteNamespace, + deleteNamespacedPersistentVolumeClaimIfExists, + getNamespacedDeployedHelmReleases, +} from "@devnet/k8s"; + +import { NAMESPACE, SERVICE_NAME } from "./constants/kapi-k8s.constants.js"; + +export const KapiK8sDown = command.cli({ + description: `Stop ${SERVICE_NAME} in K8s with Helm`, + params: { + force: Params.boolean({ + description: `Do not check that the ${SERVICE_NAME} was already stopped`, + default: false, + required: false, + }), + }, + async handler({ dre, dre: { services: { kapi }, logger, state }, params }) { + if (!(await state.isKapiK8sRunning()) && !(params.force)) { + logger.log(`${SERVICE_NAME} not running. Skipping`); + return; + } + + const releases = await getNamespacedDeployedHelmReleases(NAMESPACE(dre)); + + if (releases.length === 0) { + logger.log(`No ${SERVICE_NAME} releases found in namespace [${NAMESPACE(dre)}]. Skipping...`); + return; + } + + const HELM_RELEASE = releases[0]; + const helmSh = kapi.sh({ + env: { + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + }, + }); + + await helmSh`make debug`; + await helmSh`make lint`; + await helmSh`make uninstall`; + + // removing postgress persistent volume claim + // TODO delegate to helm + logger.log("Removing persistent volume claim for postgress"); + await deleteNamespacedPersistentVolumeClaimIfExists( + NAMESPACE(dre), + 'data-lido-kapi-postgresql-0', // hardcoded for now + ); + + logger.log(`${SERVICE_NAME} stopped.`); + + await deleteNamespace(NAMESPACE(dre)); + + await state.removeKapiK8sState(); + }, +}); diff --git a/src/commands/kapi-k8s/extensions/kapi-k8s.extension.ts b/src/commands/kapi-k8s/extensions/kapi-k8s.extension.ts new file mode 100644 index 00000000..18736294 --- /dev/null +++ b/src/commands/kapi-k8s/extensions/kapi-k8s.extension.ts @@ -0,0 +1,117 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { isEmptyObject } from "@devnet/utils"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getKapiK8sImage(must?: M,): Promise>; + getKapiK8sRunning(must?: M,): Promise>; + getKapiK8sState(must?: M,): Promise>; + + isKapiK8sImageReady(): Promise; + isKapiK8sRunning(): Promise; + + removeKapiK8sState(): Promise; + + updateKapiK8sImage(state: KapiK8sStateImage): Promise; + updateKapiK8sRunning(state: KapiK8sStateRunning): Promise; + } + + export interface Config { + kapiK8s: KapiK8sState; + } +} + +export const KapiK8sStateImage = z.object({ + image: z.string(), + tag: z.string(), + registryHostname: z.string() +}); + +export type KapiK8sStateImage = z.infer; + +export const KapiK8sStateRunning = z.object({ + publicUrl: z.string().url(), + privateUrl: z.string().url(), + helmRelease: z.string(), +}); + +export type KapiK8sStateRunning = z.infer; + +export const KapiK8sState = z.object({ + image: KapiK8sStateImage.optional(), + running: KapiK8sStateRunning.optional(), +}); + +export type KapiK8sState = z.infer; + +export const kapiK8sExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateKapiK8sImage = (async function (stateImage: KapiK8sStateImage) { + const state = await dre.state.getKapiK8sState(false); + await dre.state.updateProperties("kapiK8s", { ...state, image: stateImage }); + }); + + dre.state.updateKapiK8sRunning = (async function (stateRunning: KapiK8sStateRunning) { + const state = await dre.state.getKapiK8sState(false); + await dre.state.updateProperties("kapiK8s", { ...state, running: stateRunning }); + }); + + dre.state.removeKapiK8sState = (async function () { + await dre.state.updateProperties("kapiK8s", {}); + }); + + dre.state.isKapiK8sImageReady = (async function () { + const state = await dre.state.getKapiK8sImage(false); + return state && !isEmptyObject(state) && (state.image !== undefined); + }); + + dre.state.isKapiK8sRunning = (async function () { + const state = await dre.state.getKapiK8sRunning(false); + return state && !isEmptyObject(state) && (state.privateUrl !== undefined); + }); + + dre.state.getKapiK8sImage = (async function (must: M = true as M) { + return dre.state.getProperties( + { + image: "kapiK8s.image.image", + tag: "kapiK8s.image.tag", + registryHostname: "kapiK8s.image.registryHostname", + }, + "kapiK8s", + KapiK8sStateImage, + must, + ); + }); + + dre.state.getKapiK8sRunning = (async function (must: M = true as M) { + return dre.state.getProperties( + { + publicUrl: "kapiK8s.running.publicUrl", + privateUrl: "kapiK8s.running.privateUrl", + helmRelease: "kapiK8s.running.helmRelease", + }, + "kapiK8s", + KapiK8sStateRunning, + must, + ); + }); + + dre.state.getKapiK8sState = (async function (must: M = true as M) { + return dre.state.getProperties( + 'kapiK8s', + "kapiK8s", + KapiK8sState, + must, + ); + }); +}; diff --git a/src/commands/kapi-k8s/up.ts b/src/commands/kapi-k8s/up.ts new file mode 100644 index 00000000..b553f824 --- /dev/null +++ b/src/commands/kapi-k8s/up.ts @@ -0,0 +1,105 @@ +import { + DEFAULT_NETWORK_NAME, + NETWORK_NAME_SUBSTITUTION, + command, +} from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + addPrefixToIngressHostname, + createNamespaceIfNotExists, +} from "@devnet/k8s"; +import { DevNetError } from "@devnet/utils"; + +import { DockerRegistryPushPullSecretToK8s } from "../docker-registry/push-pull-secret-to-k8s.js"; +import { KapiK8sBuild } from "./build.js"; +import { NAMESPACE, SERVICE_NAME } from "./constants/kapi-k8s.constants.js"; +import { kapiK8sExtension } from "./extensions/kapi-k8s.extension.js"; + +export const KapiK8sUp = command.cli({ + description: `Start ${SERVICE_NAME} on K8s with Helm`, + params: {}, + extensions: [kapiK8sExtension], + async handler({ dre, dre: { state, services: { kapi }, logger } }) { + if (await state.isKapiK8sRunning()) { + logger.log(`${SERVICE_NAME} already running`); + return; + } + + if (!(await state.isChainDeployed())) { + throw new DevNetError("Chain is not deployed"); + } + + if (!(await state.isLidoDeployed())) { + throw new DevNetError("Lido is not deployed"); + } + + if (!(await state.isCSMDeployed())) { + throw new DevNetError("CSM is not deployed"); + } + + await dre.runCommand(KapiK8sBuild, {}); + + if (!(await state.isKapiK8sImageReady())) { + throw new DevNetError(`${SERVICE_NAME} image is not ready`); + } + + const { elPrivate, clPrivate } = await state.getChain(); + + const { locator, stakingRouter, curatedModule } = await state.getLido(); + const { module: csmModule } = await state.getCSM(); + const { image, tag, registryHostname } = await state.getKapiK8sImage(); + + const env: Record = { + ...kapi.config.constants, + + IS_DEVNET_MODE: "1", + CHAIN_ID: "32382", + CSM_MODULE_DEVNET_ADDRESS: csmModule, + CURATED_MODULE_DEVNET_ADDRESS: curatedModule, + LIDO_LOCATOR_DEVNET_ADDRESS: locator, + PROVIDERS_URLS: elPrivate, + CL_API_URLS: clPrivate, + STAKING_ROUTER_DEVNET_ADDRESS: stakingRouter, + }; + + const hostname = process.env.KAPI_INGRESS_HOSTNAME?. + replace(NETWORK_NAME_SUBSTITUTION, DEFAULT_NETWORK_NAME); + + if (!hostname) { + throw new DevNetError(`KAPI_INGRESS_HOSTNAME env variable is not set`); + } + + const INGRESS_HOSTNAME = addPrefixToIngressHostname(hostname); + + const HELM_RELEASE = 'lido-kapi-1'; + const helmSh = kapi.sh({ + env: { + ...env, + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + IMAGE: image, + TAG: tag, + REGISTRY_HOSTNAME: registryHostname, + INGRESS_HOSTNAME, + DB_HOST: `${HELM_RELEASE}-postgresql`, + }, + }); + + await createNamespaceIfNotExists(NAMESPACE(dre)); + + await dre.runCommand(DockerRegistryPushPullSecretToK8s, { namespace: NAMESPACE(dre) }); + + await helmSh`make debug`; + await helmSh`make lint`; + await helmSh`make install`; + + await state.updateKapiK8sRunning({ + helmRelease: HELM_RELEASE, + publicUrl: `http://${INGRESS_HOSTNAME}`, + privateUrl: `http://${HELM_RELEASE}.${NAMESPACE(dre)}.svc.cluster.local:3000` + }); + + logger.log(`${SERVICE_NAME} started.`); + }, +}); diff --git a/src/commands/kapi/down.ts b/src/commands/kapi/down.ts deleted file mode 100644 index f47e5e2e..00000000 --- a/src/commands/kapi/down.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { command } from "@devnet/command"; - -export const KapiDown = command.cli({ - description: "Stop Kapi", - params: {}, - async handler({ - dre: { - services: { kapi }, - }, - }) { - await kapi.sh`docker compose -f docker-compose.devnet.yml down -v`; - }, -}); diff --git a/src/commands/kapi/logs.ts b/src/commands/kapi/logs.ts deleted file mode 100644 index 4cb2d371..00000000 --- a/src/commands/kapi/logs.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { command } from "@devnet/command"; - -export const KapiLogs = command.cli({ - description: "Show Kapi logs", - params: {}, - async handler({ - dre: { - services: { kapi }, - }, - }) { - await kapi.sh`docker compose -f docker-compose.devnet.yml logs -f`; - }, -}); diff --git a/src/commands/kapi/up.ts b/src/commands/kapi/up.ts deleted file mode 100644 index 5a40f209..00000000 --- a/src/commands/kapi/up.ts +++ /dev/null @@ -1,31 +0,0 @@ -import { command } from "@devnet/command"; - -export const KapiUp = command.cli({ - description: "Start Kapi", - params: {}, - async handler({ dre: { state, network, services } }) { - const { elPrivate } = await state.getChain(); - - const { kapi } = services; - - const { locator, stakingRouter, curatedModule } = await state.getLido(); - const { module: csmModule } = await state.getCSM(); - - const env = { - ...kapi.config.constants, - - CHAIN_ID: "32382", - CSM_MODULE_DEVNET_ADDRESS: csmModule, - CURATED_MODULE_DEVNET_ADDRESS: curatedModule, - DOCKER_NETWORK_NAME: `kt-${network.name}`, - LIDO_LOCATOR_DEVNET_ADDRESS: locator, - PROVIDERS_URLS: elPrivate, - STAKING_ROUTER_DEVNET_ADDRESS: stakingRouter, - COMPOSE_PROJECT_NAME: `kapi-${network.name}`, - }; - - await kapi.writeENV(".env", env); - - await kapi.sh`docker compose -f docker-compose.devnet.yml up --build -d`; - }, -}); diff --git a/src/commands/kubo-k8s/build.ts b/src/commands/kubo-k8s/build.ts new file mode 100644 index 00000000..12294b70 --- /dev/null +++ b/src/commands/kubo-k8s/build.ts @@ -0,0 +1,33 @@ +import { command } from "@devnet/command"; +import { buildAndPushDockerImage } from "@devnet/docker"; + +import { GitCheckout } from "../git/checkout.js"; + +export const KuboK8sBuild = command.cli({ + description: "Build Kubo (IPFS) and push to Docker registry", + params: {}, + async handler({ dre, dre: { state, network, services, logger } }) { + const dockerRegistry = await state.getDockerRegistry(); + + const TAG = `kt-${network.name}`; + const IMAGE = 'lido/kubo'; + + await buildAndPushDockerImage({ + cwd: services.oracle.artifact.root, + registryHostname: dockerRegistry.registryHostname, + buildContext: './kubo', + imageName: IMAGE, + tag: TAG, + password: process.env.DOCKER_REGISTRY_PASSWORD ?? 'admin', + username: process.env.DOCKER_REGISTRY_USERNAME ?? 'changeme', + }); + + logger.log(`Kubo image pushed to ${dockerRegistry.registryUrl}/${IMAGE}:${TAG}`); + + await state.updateKuboK8sImage({ + tag: TAG, + image: IMAGE, + registryHostname: dockerRegistry.registryHostname, + }) + }, +}); diff --git a/src/commands/kubo-k8s/constants/kubo-k8s.constants.ts b/src/commands/kubo-k8s/constants/kubo-k8s.constants.ts new file mode 100644 index 00000000..a2f16280 --- /dev/null +++ b/src/commands/kubo-k8s/constants/kubo-k8s.constants.ts @@ -0,0 +1,4 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; + +export const NAMESPACE = (dre: DevNetRuntimeEnvironmentInterface) => + `kt-${dre.network.name}-kubo`; diff --git a/src/commands/kubo-k8s/down.ts b/src/commands/kubo-k8s/down.ts new file mode 100644 index 00000000..87ed3516 --- /dev/null +++ b/src/commands/kubo-k8s/down.ts @@ -0,0 +1,54 @@ +import { Params, command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + deleteNamespace, + deleteNamespacedPersistentVolumeClaimIfExists, + getK8s, + getNamespacedDeployedHelmReleases, + k8s, +} from "@devnet/k8s"; + +import { NAMESPACE } from "./constants/kubo-k8s.constants.js"; + +export const KuboK8sDown = command.cli({ + description: "Stop Kubo in K8s with Helm", + params: { + force: Params.boolean({ + description: "Do not check that the Kubo was already stopped", + default: false, + required: false, + }), + }, + async handler({ dre, dre: { services: { kubo }, logger, state }, params }) { + if (!(await state.isKuboK8sRunning()) && !(params.force)) { + logger.log("Kubo not running. Skipping"); + return; + } + + const releases = await getNamespacedDeployedHelmReleases(NAMESPACE(dre)); + + if (releases.length === 0) { + logger.log(`No Kubo releases found in namespace [${NAMESPACE(dre)}]. Skipping...`); + return; + } + + const HELM_RELEASE = releases[0]; + const helmLidoKuboSh = kubo.sh({ + env: { + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + }, + }); + + await helmLidoKuboSh`make debug`; + await helmLidoKuboSh`make lint`; + await helmLidoKuboSh`make uninstall`; + + logger.log("Kubo stopped."); + + await deleteNamespace(NAMESPACE(dre)); + + await state.removeKuboK8sState(); + }, +}); diff --git a/src/commands/kubo-k8s/extensions/kubo-k8s.extension.ts b/src/commands/kubo-k8s/extensions/kubo-k8s.extension.ts new file mode 100644 index 00000000..7d670258 --- /dev/null +++ b/src/commands/kubo-k8s/extensions/kubo-k8s.extension.ts @@ -0,0 +1,117 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { isEmptyObject } from "@devnet/utils"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getKuboK8sImage(must?: M,): Promise>; + getKuboK8sRunning(must?: M,): Promise>; + getKuboK8sState(must?: M,): Promise>; + + isKuboK8sImageReady(): Promise; + isKuboK8sRunning(): Promise; + + removeKuboK8sState(): Promise; + + updateKuboK8sImage(state: KuboK8sStateImage): Promise; + updateKuboK8sRunning(state: KuboK8sStateRunning): Promise; + } + + export interface Config { + kuboK8s: KuboK8sState; + } +} + +export const KuboK8sStateImage = z.object({ + image: z.string(), + tag: z.string(), + registryHostname: z.string() +}); + +export type KuboK8sStateImage = z.infer; + +export const KuboK8sStateRunning = z.object({ + publicUrl: z.string().url(), + privateUrl: z.string().url(), + helmRelease: z.string(), +}); + +export type KuboK8sStateRunning = z.infer; + +export const KuboK8sState = z.object({ + image: KuboK8sStateImage.optional(), + running: KuboK8sStateRunning.optional(), +}); + +export type KuboK8sState = z.infer; + +export const kuboK8sExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateKuboK8sImage = (async function (stateImage: KuboK8sStateImage) { + const state = await dre.state.getKuboK8sState(false); + await dre.state.updateProperties("kuboK8s", { ...state, image: stateImage }); + }); + + dre.state.updateKuboK8sRunning = (async function (stateRunning: KuboK8sStateRunning) { + const state = await dre.state.getKuboK8sState(false); + await dre.state.updateProperties("kuboK8s", { ...state, running: stateRunning }); + }); + + dre.state.removeKuboK8sState = (async function () { + await dre.state.updateProperties("kuboK8s", {}); + }); + + dre.state.isKuboK8sImageReady = (async function () { + const state = await dre.state.getKuboK8sImage(false); + return state && !isEmptyObject(state) && (state.image !== undefined); + }); + + dre.state.isKuboK8sRunning = (async function () { + const state = await dre.state.getKuboK8sRunning(false); + return state && !isEmptyObject(state) && (state.privateUrl !== undefined); + }); + + dre.state.getKuboK8sImage = (async function (must: M = true as M) { + return dre.state.getProperties( + { + image: "kuboK8s.image.image", + tag: "kuboK8s.image.tag", + registryHostname: "kuboK8s.image.registryHostname", + }, + "kuboK8s", + KuboK8sStateImage, + must, + ); + }); + + dre.state.getKuboK8sRunning = (async function (must: M = true as M) { + return dre.state.getProperties( + { + publicUrl: "kuboK8s.running.publicUrl", + privateUrl: "kuboK8s.running.privateUrl", + helmRelease: "kuboK8s.running.helmRelease", + }, + "kuboK8s", + KuboK8sStateRunning, + must, + ); + }); + + dre.state.getKuboK8sState = (async function (must: M = true as M) { + return dre.state.getProperties( + 'kuboK8s', + "kuboK8s", + KuboK8sState, + must, + ); + }); +}; diff --git a/src/commands/kubo-k8s/up.ts b/src/commands/kubo-k8s/up.ts new file mode 100644 index 00000000..53fe0a3d --- /dev/null +++ b/src/commands/kubo-k8s/up.ts @@ -0,0 +1,77 @@ +import { + DEFAULT_NETWORK_NAME, + NETWORK_NAME_SUBSTITUTION, + command, +} from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { addPrefixToIngressHostname, createNamespaceIfNotExists } from "@devnet/k8s"; +import { DevNetError } from "@devnet/utils"; + +import { DockerRegistryPushPullSecretToK8s } from "../docker-registry/push-pull-secret-to-k8s.js"; +import { KuboK8sBuild } from "./build.js"; +import { NAMESPACE } from "./constants/kubo-k8s.constants.js"; +import { kuboK8sExtension } from "./extensions/kubo-k8s.extension.js"; + +export const KuboK8sUp = command.cli({ + description: "Start Kubo on K8s with Helm", + params: {}, + extensions: [kuboK8sExtension], + async handler({ dre, dre: { state, services: { kubo }, logger } }) { + if (await state.isKuboK8sRunning()) { + logger.log("KUbo already running"); + return; + } + + await dre.runCommand(KuboK8sBuild, {}); + + if (!(await state.isKuboK8sImageReady())) { + throw new DevNetError("KUBO image is not ready"); + } + + const { image, tag, registryHostname } = await state.getKuboK8sImage(); + + const env: Record = { + ...kubo.config.constants, + + CHAIN: "artifact", + }; + + const kuboHostname = process.env.KUBO_INGRESS_HOSTNAME?. + replace(NETWORK_NAME_SUBSTITUTION, DEFAULT_NETWORK_NAME); + + if (!kuboHostname) { + throw new DevNetError(`KUBO_INGRESS_HOSTNAME env variable is not set`); + } + + const KUBO_INGRESS_HOSTNAME = addPrefixToIngressHostname(kuboHostname); + + const HELM_RELEASE = 'lido-kubo-1'; + const helmLidoKuboSh = kubo.sh({ + env: { + ...env, + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + IMAGE: image, + TAG: tag, + REGISTRY_HOSTNAME: registryHostname, + KUBO_INGRESS_HOSTNAME, + }, + }); + + await createNamespaceIfNotExists(NAMESPACE(dre)); + + await dre.runCommand(DockerRegistryPushPullSecretToK8s, { namespace: NAMESPACE(dre) }); + + await helmLidoKuboSh`make debug`; + await helmLidoKuboSh`make lint`; + await helmLidoKuboSh`make install`; + + // TODO get service name from helm release + await state.updateKuboK8sRunning({ + helmRelease: HELM_RELEASE, + publicUrl: `http://${KUBO_INGRESS_HOSTNAME}`, + privateUrl: `http://${HELM_RELEASE}.${NAMESPACE(dre)}.svc.cluster.local:5001` + }); + }, +}); diff --git a/src/commands/kurtosis/dora/constants/dora.constants.ts b/src/commands/kurtosis/dora/constants/dora.constants.ts new file mode 100644 index 00000000..279f8bcd --- /dev/null +++ b/src/commands/kurtosis/dora/constants/dora.constants.ts @@ -0,0 +1,6 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; + +export const DORA_INGRESS_LABEL = { 'com.lido.devnet.dora': 'ingress' }; + +export const NAMESPACE = (dre: DevNetRuntimeEnvironmentInterface) => + `kt-${dre.network.name}`; diff --git a/src/commands/kurtosis/dora/down.ts b/src/commands/kurtosis/dora/down.ts new file mode 100644 index 00000000..ce25f753 --- /dev/null +++ b/src/commands/kurtosis/dora/down.ts @@ -0,0 +1,45 @@ +import {command} from "@devnet/command"; +import { getK8s, getK8sIngress, k8s } from "@devnet/k8s"; + +import { doraExtension } from "../../dora/extensions/dora.extension.js"; +import { DORA_INGRESS_LABEL, NAMESPACE } from "./constants/dora.constants.js"; + +export const KurtosisDoraK8sIngressDown = command.cli({ + description: + "Undeploy Kurtosis K8s Ingress for Dora", + params: {}, + extensions: [doraExtension], + async handler({ dre, dre: { logger, state} }) { + logger.log( + "Un-deploying Kurtosis K8s Ingress for Dora...", + ); + + const kc = await getK8s(); + const k8sNetworkApi = kc.makeApiClient(k8s.NetworkingV1Api); + const ingresses = await getK8sIngress(dre, { label: DORA_INGRESS_LABEL }); + + logger.log( + `Total ingresses [${ingresses.length}] will be deleted`, + ); + + for (const ingress of ingresses) { + logger.log( + `ingress named [${ingress?.metadata?.name}] will be deleted`, + ); + + const name = ingress?.metadata?.name; + + if (!name) { + continue; + } + + const result = await k8sNetworkApi.deleteNamespacedIngress( + { namespace: NAMESPACE(dre), name }, + ); + + logger.log(`Successfully removed ingress: [${result.status}]`); + } + + await state.removeDora(); + }, +}); diff --git a/src/commands/kurtosis/dora/info.ts b/src/commands/kurtosis/dora/info.ts new file mode 100644 index 00000000..5bbc3afd --- /dev/null +++ b/src/commands/kurtosis/dora/info.ts @@ -0,0 +1,21 @@ +import { command } from "@devnet/command"; + +export const KurtosisDoraK8sInfo = command.cli({ + description: "Retrieves and displays information about the Dora.", + params: {}, + async handler({ + dre: { + logger, + state, + }, + }) { + logger.log(""); + const chainServices = await state.getDora(false); + logger.table( + ["Service", "URL"], + [ + ["dora-ui", (chainServices?.publicUrl ?? '')], + ], + ); + }, +}); diff --git a/src/commands/kurtosis/dora/templates/dora-ingress.template.ts b/src/commands/kurtosis/dora/templates/dora-ingress.template.ts new file mode 100644 index 00000000..087e7059 --- /dev/null +++ b/src/commands/kurtosis/dora/templates/dora-ingress.template.ts @@ -0,0 +1,47 @@ +import { DevNetRuntimeEnvironmentInterface, } from "@devnet/command"; +import * as k8s from "@kubernetes/client-node"; + +import { DORA_INGRESS_LABEL, NAMESPACE } from "../constants/dora.constants.js"; + +export const doraIngressTmpl = async ( + dre: DevNetRuntimeEnvironmentInterface, + doraHostname: string +) => ({ + apiVersion: "networking.k8s.io/v1", + kind: "Ingress", + metadata: { + name: "dora-devnet-ingress", + namespace: NAMESPACE(dre), + annotations: { + "traefik.ingress.kubernetes.io/router.entrypoints": "web", + }, + labels: { + "com.lido.devnet": "true", + ...DORA_INGRESS_LABEL, + }, + }, + spec: { + ingressClassName: "public", + rules: [ + { + host: doraHostname, + http: { + paths: [ + { + path: "/", + pathType: "Prefix", + backend: { + service: { + name: "dora", + port: { + number: 8080, + }, + }, + }, + }, + ], + }, + }, + ], + }, + } satisfies k8s.V1Ingress); diff --git a/src/commands/kurtosis/dora/up.ts b/src/commands/kurtosis/dora/up.ts new file mode 100644 index 00000000..a32dd99d --- /dev/null +++ b/src/commands/kurtosis/dora/up.ts @@ -0,0 +1,62 @@ +import { + DEFAULT_NETWORK_NAME, + NETWORK_NAME_SUBSTITUTION, + command, +} from "@devnet/command"; +import { + addPrefixToIngressHostname, + checkK8sIngressExists, + getK8s, + k8s, +} from "@devnet/k8s"; +import { DevNetError } from "@devnet/utils"; + +import { doraExtension } from "../../dora/extensions/dora.extension.js"; +import { NAMESPACE } from "./constants/dora.constants.js"; +import { doraIngressTmpl } from "./templates/dora-ingress.template.js"; + +export const KurtosisDoraK8sIngressUp = command.cli({ + description: + "Deploy Kurtosis K8s Ingress for Dora", + params: {}, + extensions: [doraExtension], + async handler({ dre }) { + const { logger, state } = dre; + + logger.log( + "Deploying Kurtosis K8s Ingress for Dora...", + ); + + const doraHostname = process.env.DORA_INGRESS_HOSTNAME?. + replace(NETWORK_NAME_SUBSTITUTION, DEFAULT_NETWORK_NAME); + + if (!doraHostname) { + throw new DevNetError(`DORA_INGRESS_HOSTNAME env variable is not set`); + } + + const kc = await getK8s(); + const k8sNetworkApi = kc.makeApiClient(k8s.NetworkingV1Api); + const ingress = await doraIngressTmpl( + dre, + addPrefixToIngressHostname(doraHostname) + ); + const url = `http://${ingress.spec.rules[0].host}`; + + const exists = await checkK8sIngressExists(dre, { name: ingress.metadata.name}); + + if (exists) { + logger.log(`Ingress with name ${ingress.metadata.name} already exists. Dora URL: [${url}] . Skipping creation.`); + + await state.updateDora({ publicUrl: `http://${ingress.spec.rules[0].host}` }); + return; + } + + const result = await k8sNetworkApi.createNamespacedIngress( + { namespace: NAMESPACE(dre) , body: ingress }, + ); + + logger.log(`Successfully created Ingress: [${result.metadata?.name}]. Dora URL: [${url}]`); + + await state.updateDora({ publicUrl: url }); + }, +}); diff --git a/src/commands/kurtosis/download-artifacts.ts b/src/commands/kurtosis/download-artifacts.ts new file mode 100644 index 00000000..bbbdb317 --- /dev/null +++ b/src/commands/kurtosis/download-artifacts.ts @@ -0,0 +1,24 @@ +import { command } from "@devnet/command"; + +import { + startKurtosisGateway, + stopKurtosisGateway, +} from "./extensions/kurtosis.extension.js"; + +export const KurtosisDownloadArtifacts = command.cli({ + description: + "Downloads the genesis data for EL and CL nodes from the Kurtosis enclave.", + params: {}, + async handler({ dre, dre: { logger, services: { kurtosis } }, }) { + await startKurtosisGateway(dre); + + // removing network folder to avoid conflicts + await kurtosis.sh`rm -rf network`; + + await kurtosis.sh`kurtosis files download ${dre.network.name} el_cl_genesis_data network`; + + logger.log("Genesis data downloaded successfully."); + + await stopKurtosisGateway(dre); + }, +}); diff --git a/src/commands/kurtosis/extensions/kurtosis.extension.ts b/src/commands/kurtosis/extensions/kurtosis.extension.ts new file mode 100644 index 00000000..38b1d8db --- /dev/null +++ b/src/commands/kurtosis/extensions/kurtosis.extension.ts @@ -0,0 +1,119 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { DevNetError, isEmptyObject } from "@devnet/utils"; +import { execa, ResultPromise } from "execa"; +import { z } from "zod"; + +export const KURTOSIS_DEFAULT_PRESET = "pectra-devnet4"; + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getKurtosis(must?: M): Promise>; + isKurtosisDeployed(): Promise; + removeKurtosis(): Promise; + updateKurtosis(state: KurtosisState): Promise; + } + + export interface Config { + kurtosis: KurtosisState; + } +} + +export const KurtosisState = z.object({ + preset: z.string() +}); + +export type KurtosisState = z.infer; + +export const kurtosisExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateKurtosis = (async function (state: KurtosisState) { + await dre.state.updateProperties("kurtosis", state); + }); + + dre.state.removeKurtosis = (async function () { + await dre.state.updateProperties("kurtosis", {}); + }); + + dre.state.isKurtosisDeployed = (async function () { + const state = await dre.state.getKurtosis(false); + return state && !isEmptyObject(state); + }); + + dre.state.getKurtosis = (async function (must: M = true as M) { + const kurtosis = await dre.state.getProperties( + "kurtosis", + "kurtosis", + KurtosisState, + must, + ); + + return kurtosis; + }); +}; + +let kurtosisGatewayProcess: ResultPromise<{ + detached: true + stdio: "ignore" +}> | undefined = undefined; + +export const startKurtosisGateway = async (dre: DevNetRuntimeEnvironmentInterface) => { + if (kurtosisGatewayProcess) { + dre.logger.log(`Kurtosis gateway already started`); + return true; + } + + const kurtosisClusterType = await getKurtosisClusterType(dre); + + if (!isSupportedClusterType(kurtosisClusterType)) { + return; + } + + dre.logger.log(`Starting kurtosis gateway in the background`); + kurtosisGatewayProcess = execa('kurtosis', ['gateway'], { detached: true, stdio: 'ignore' }); + dre.logger.log(`Started kurtosis gateway`); + + // unref so it doesn’t keep the parent alive + //kurtosisGatewayProcess.unref(); + + // Make sure to kill it when this script ends + const cleanup = () => { + kurtosisGatewayProcess?.kill(); + }; + + process.on('exit', cleanup); + process.on('SIGINT', () => { cleanup(); }); + process.on('SIGTERM', () => { cleanup(); }); +}; + +export const stopKurtosisGateway = async (dre: DevNetRuntimeEnvironmentInterface) => { + if (!kurtosisGatewayProcess) { + return; + } + + dre.logger.log(`Kurtosis gateway will be killed`); + kurtosisGatewayProcess?.kill(); + kurtosisGatewayProcess = undefined; +} + +export const getKurtosisClusterType = async (dre: DevNetRuntimeEnvironmentInterface) => { + const result = await dre.services.kurtosis.sh({ + stdout: ["pipe"], + stderr: ["pipe"], + verbose() {}, + })`kurtosis cluster get` + .catch((error) => dre.logger.error(error.message)); + + const kurtosisClusterType = result?.stdout.trim(); + + if (!kurtosisClusterType) { + throw new DevNetError('Unable to detect kurtosis cluster type'); + } + + return kurtosisClusterType; +} + +export const isSupportedClusterType = (clusterType: string) => ['cloud', 'valset-sandbox3'].includes(clusterType); diff --git a/src/commands/kurtosis/get-cluster-info.ts b/src/commands/kurtosis/get-cluster-info.ts new file mode 100644 index 00000000..011b3e2f --- /dev/null +++ b/src/commands/kurtosis/get-cluster-info.ts @@ -0,0 +1,14 @@ +import { command } from "@devnet/command"; + +import { getKurtosisClusterType } from "./extensions/kurtosis.extension.js"; + +export const KurtosisGetClusterInfo = command.isomorphic({ + description: + "Get the Kurtosis cluster type", + params: {}, + async handler({dre, dre: { logger}}) { + logger.log("Kurtosis cluster info"); + + return await getKurtosisClusterType(dre); + }, +}); diff --git a/src/commands/kurtosis/nodes/ingress-down.ts b/src/commands/kurtosis/nodes/ingress-down.ts new file mode 100644 index 00000000..8fb3b348 --- /dev/null +++ b/src/commands/kurtosis/nodes/ingress-down.ts @@ -0,0 +1,44 @@ +import {command} from "@devnet/command"; +import { getK8s, getK8sIngress, k8s } from "@devnet/k8s"; + +import { ETH_NODE_INGRESS_LABEL } from "../../chain/constants/nodes-ingress.constants.js"; +import { nodesIngressExtension } from "../../chain/extensions/nodes-ingress.extension.js"; + + +export const KurtosisK8sNodesIngressDown = command.cli({ + description: + "Un-Deploy Kurtosis K8s Ingress for EL, CL and VC", + params: {}, + extensions: [nodesIngressExtension], + async handler({ dre, dre: { network, logger } }) { + logger.log( + "Un-deploying Kurtosis K8s Ingress for EL, CL and VC...", + ); + + const kc = await getK8s(); + const k8sNetworkApi = kc.makeApiClient(k8s.NetworkingV1Api); + const ingresses = await getK8sIngress(dre, { label: ETH_NODE_INGRESS_LABEL }); + + logger.log( + `Total ingresses [${ingresses.length}] will be deleted`, + ); + + for (const ingress of ingresses) { + logger.log( + `ingress named [${ingress?.metadata?.name}] will be deleted`, + ); + + const name = ingress?.metadata?.name; + + if (!name) { + continue; + } + + const result = await k8sNetworkApi.deleteNamespacedIngress( + { namespace: `kt-${network.name}`, name }, + ); + + logger.log(`Successfully removed ingress: [${result.status}]`); + } + }, +}); diff --git a/src/commands/kurtosis/nodes/ingress-up.ts b/src/commands/kurtosis/nodes/ingress-up.ts new file mode 100644 index 00000000..156ee9e5 --- /dev/null +++ b/src/commands/kurtosis/nodes/ingress-up.ts @@ -0,0 +1,118 @@ +import { + DEFAULT_NETWORK_NAME, + NETWORK_NAME_SUBSTITUTION, + command, +} from "@devnet/command"; +import { E, NEA, TE, pipe } from "@devnet/fp"; +import { checkK8sIngressExists, getK8s, k8s } from "@devnet/k8s"; +import { DevNetError, assertNonEmpty } from "@devnet/utils"; + +import { nodesIngressExtension } from "../../chain/extensions/nodes-ingress.extension.js"; +import { consensusIngressTmpl } from "./templates/consensus-ingress.template.js"; +import { executionIngressTmpl } from "./templates/execution-ingress.template.js"; +import { validatorClientIngressTmpl } from "./templates/validator-client-ingress.template.js"; + +export const KurtosisK8sNodesIngressUp = command.cli({ + description: + "Deploy Kurtosis K8s Ingress(es) for EL, CL and VC", + params: {}, + extensions: [nodesIngressExtension], + async handler({ dre }) { + const { logger, state } = dre; + + if(!(await state.isNodesDeployed())) { + throw new DevNetError("Nodes are not deployed. Please deploy them first."); + } + + const nodes = await state.getNodes(); + const kc = await getK8s(); + const k8sNetworkApi = kc.makeApiClient(k8s.NetworkingV1Api); + + const ETH_NODES_INGRESS_HOSTNAME = process.env.ETH_NODES_INGRESS_HOSTNAME?. + replace(NETWORK_NAME_SUBSTITUTION, DEFAULT_NETWORK_NAME); + + if (!ETH_NODES_INGRESS_HOSTNAME) { + throw new DevNetError(`ETH_NODES_INGRESS_HOSTNAME env variable is not set`); + } + + const elIngresses = await pipe( + nodes.el, + NEA.mapWithIndex((index, node) => { + const hostname = `${process.env.GLOBAL_INGRESS_HOST_PREFIX}-execution${index > 0 ? index : ''}.${ETH_NODES_INGRESS_HOSTNAME}`; + + return { ...node, hostname }; + }), + NEA.mapWithIndex((index, node) => + TE.tryCatchK(executionIngressTmpl, E.toError)(dre, node.k8sService, node.rpcPort, index, node.hostname) + ), + NEA.sequence(TE.ApplicativeSeq), + TE.execute + ); + + const clIngresses = await pipe( + nodes.cl, + NEA.mapWithIndex((index, node) => { + const hostname = `${process.env.GLOBAL_INGRESS_HOST_PREFIX}-consensus${index > 0 ? index : ''}.${ETH_NODES_INGRESS_HOSTNAME}`; + + return { ...node, hostname }; + }), + NEA.mapWithIndex((index, node) => + TE.tryCatchK(consensusIngressTmpl, E.toError)(dre, node.k8sService, node.httpPort, index, node.hostname) + ), + NEA.sequence(TE.ApplicativeSeq), + TE.execute + ); + + + const vcIngresses = await pipe( + nodes.vc, + NEA.mapWithIndex((index, node) => { + const hostname = `${process.env.GLOBAL_INGRESS_HOST_PREFIX}-validator${index > 0 ? index : ''}.${ETH_NODES_INGRESS_HOSTNAME}`; + + return { ...node, hostname }; + }), + NEA.mapWithIndex((index, node) => + TE.tryCatchK(validatorClientIngressTmpl, E.toError)(dre, node.k8sService, node.httpValidatorPort, index, node.hostname) + ), + NEA.sequence(TE.ApplicativeSeq), + TE.execute + ); + + await Promise.all([...elIngresses, ...clIngresses, ...vcIngresses].map(async (ingress) => { + const url = `http://${ingress.spec.rules[0].host}`; + + const exists = await checkK8sIngressExists(dre, { name: ingress.metadata.name}); + + if (exists) { + logger.log(`Ingress with name ${ingress.metadata.name} already exists. URL: [${url}]. Skipping creation.`); + return; + } + + const result = await k8sNetworkApi.createNamespacedIngress( + { namespace: `kt-${dre.network.name}` , body: ingress }, + ); + + logger.log(`Successfully created Ingress: [${result.metadata?.name}]. URL: [${url}]`); + })); + + const el = pipe(elIngresses, NEA.map(ingress => ({ + publicIngressUrl: `http://${ingress.spec.rules[0].host}`, + }))); + + const cl = pipe(clIngresses, NEA.map(ingress => ({ + publicIngressUrl: `http://${ingress.spec.rules[0].host}`, + }))); + + const vc = pipe(vcIngresses, NEA.map(ingress => ({ + publicIngressUrl: `http://${ingress.spec.rules[0].host}`, + }))); + + await state.updateNodesIngress( + { + el: assertNonEmpty(el), + cl: assertNonEmpty(cl), + vc: assertNonEmpty(vc) + } + ); + }, +}); diff --git a/src/commands/kurtosis/nodes/templates/consensus-ingress.template.ts b/src/commands/kurtosis/nodes/templates/consensus-ingress.template.ts new file mode 100644 index 00000000..6e6440dd --- /dev/null +++ b/src/commands/kurtosis/nodes/templates/consensus-ingress.template.ts @@ -0,0 +1,54 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +import * as k8s from "@kubernetes/client-node"; + +import { + CONSENSUS_INGRESS_LABEL, + ETH_NODE_INGRESS_LABEL, +} from "../../../chain/constants/nodes-ingress.constants.js"; + +export const consensusIngressTmpl = async ( + dre: DevNetRuntimeEnvironmentInterface, + serviceName: string, + port: number, + index: number, + hostname: string, + ) => ({ + apiVersion: "networking.k8s.io/v1", + kind: "Ingress", + metadata: { + name: `lido-devnet-consensus-ingress-${index}`, + namespace: `kt-${dre.network.name}`, + annotations: { + "traefik.ingress.kubernetes.io/router.entrypoints": "web", + }, + labels: { + "com.lido.devnet": "true", + ...CONSENSUS_INGRESS_LABEL, + ...ETH_NODE_INGRESS_LABEL, + }, + }, + spec: { + ingressClassName: "public", + rules: [ + { + host: hostname, + http: { + paths: [ + { + path: "/", + pathType: "Prefix", + backend: { + service: { + name: `${serviceName}`, + port: { + number: port, + }, + }, + }, + }, + ], + }, + }, + ], + }, + } satisfies k8s.V1Ingress); diff --git a/src/commands/kurtosis/nodes/templates/execution-ingress.template.ts b/src/commands/kurtosis/nodes/templates/execution-ingress.template.ts new file mode 100644 index 00000000..4c84e229 --- /dev/null +++ b/src/commands/kurtosis/nodes/templates/execution-ingress.template.ts @@ -0,0 +1,54 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +import * as k8s from "@kubernetes/client-node"; + +import { + ETH_NODE_INGRESS_LABEL, + EXECUTION_INGRESS_LABEL, +} from "../../../chain/constants/nodes-ingress.constants.js"; + +export const executionIngressTmpl = async ( + dre: DevNetRuntimeEnvironmentInterface, + serviceName: string, + port: number, + index: number, + hostname: string, +) => ({ + apiVersion: "networking.k8s.io/v1", + kind: "Ingress", + metadata: { + name: `lido-devnet-execution-ingress-${index}`, + namespace: `kt-${dre.network.name}`, + annotations: { + "traefik.ingress.kubernetes.io/router.entrypoints": "web", + }, + labels: { + "com.lido.devnet": "true", + ...EXECUTION_INGRESS_LABEL, + ...ETH_NODE_INGRESS_LABEL, + }, + }, + spec: { + ingressClassName: "public", + rules: [ + { + host: hostname, + http: { + paths: [ + { + path: "/", + pathType: "Prefix", + backend: { + service: { + name: `${serviceName}`, + port: { + number: port, + }, + }, + }, + }, + ], + }, + }, + ], + }, + } satisfies k8s.V1Ingress); diff --git a/src/commands/kurtosis/nodes/templates/validator-client-ingress.template.ts b/src/commands/kurtosis/nodes/templates/validator-client-ingress.template.ts new file mode 100644 index 00000000..f6f60cc4 --- /dev/null +++ b/src/commands/kurtosis/nodes/templates/validator-client-ingress.template.ts @@ -0,0 +1,54 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +import * as k8s from "@kubernetes/client-node"; + +import { + ETH_NODE_INGRESS_LABEL, + VALIDATOR_INGRESS_LABEL, +} from "../../../chain/constants/nodes-ingress.constants.js"; + +export const validatorClientIngressTmpl = async ( + dre: DevNetRuntimeEnvironmentInterface, + serviceName: string, + port: number, + index: number, + hostname: string, + ) => ({ + apiVersion: "networking.k8s.io/v1", + kind: "Ingress", + metadata: { + name: `lido-devnet-validator-client-ingress-${index}`, + namespace: `kt-${dre.network.name}`, + annotations: { + "traefik.ingress.kubernetes.io/router.entrypoints": "web", + }, + labels: { + "com.lido.devnet": "true", + ...VALIDATOR_INGRESS_LABEL, + ...ETH_NODE_INGRESS_LABEL, + }, + }, + spec: { + ingressClassName: "public", + rules: [ + { + host: hostname, + http: { + paths: [ + { + path: "/", + pathType: "Prefix", + backend: { + service: { + name: `${serviceName}`, + port: { + number: port, + }, + }, + }, + }, + ], + }, + }, + ], + }, + } satisfies k8s.V1Ingress); diff --git a/src/commands/kurtosis/restart-service.ts b/src/commands/kurtosis/restart-service.ts new file mode 100644 index 00000000..7292fd5a --- /dev/null +++ b/src/commands/kurtosis/restart-service.ts @@ -0,0 +1,35 @@ +import { Params, command } from "@devnet/command"; +import { DevNetError, sleep } from "@devnet/utils"; + +import { + kurtosisExtension, + startKurtosisGateway, + stopKurtosisGateway, +} from "./extensions/kurtosis.extension.js"; + +export const KurtosisRestartService = command.isomorphic({ + description: + "Update a specific service in kurtosis enclave", + params: { + service: Params.string({ description: "Kurtosis Service" }) + }, + extensions: [kurtosisExtension], + async handler({ dre, dre: { logger, services: { kurtosis } }, params: { service } }) { + + if (!service) { + throw new DevNetError(`Kurtosis service not defined`); + } + + await startKurtosisGateway(dre); + + await kurtosis.sh`kurtosis service stop ${dre.network.name} ${service}`; + + await sleep(2000); + + await kurtosis.sh`kurtosis service start ${dre.network.name} ${service}`; + + logger.log(`Kurtosis service [${service}] updated`); + + await stopKurtosisGateway(dre); + }, +}); diff --git a/src/commands/kurtosis/run-package.ts b/src/commands/kurtosis/run-package.ts new file mode 100644 index 00000000..10b630f3 --- /dev/null +++ b/src/commands/kurtosis/run-package.ts @@ -0,0 +1,62 @@ +import { Params, command } from "@devnet/command"; +import { DevNetError } from "@devnet/utils"; + +import { + KURTOSIS_DEFAULT_PRESET, + getKurtosisClusterType, + isSupportedClusterType, + kurtosisExtension, + startKurtosisGateway, + stopKurtosisGateway, +} from "./extensions/kurtosis.extension.js"; + + + +export const KurtosisRunPackage = command.isomorphic({ + description: + "Runs a specific Ethereum package in Kurtosis and updates local JSON database with the network information.", + params: { preset: Params.string({ description: "Kurtosis config name.", default: KURTOSIS_DEFAULT_PRESET }) }, + extensions: [kurtosisExtension], + async handler({ dre, dre: { logger, state, services: { kurtosis } }, params: { preset } }) { + + if (await state.isKurtosisDeployed()) { + logger.log(`Kurtosis already started with preset [${preset}]`); + return; + } + + logger.log(`Running Ethereum package with preset [${preset}] in Kurtosis...`); + const configFileName = `${preset}.yml`; + const file = await kurtosis.readYaml(configFileName).catch((error: any) => { + logger.warn( + `There was an error in the process of connecting the config, most likely you specified the wrong file name, check the "workspaces/kurtosis" folder`, + ); + + throw new DevNetError(error.message); + }); + + logger.log(`Resolved kurtosis config: ${configFileName}`); + logger.logJson(file); + + const kurtosisClusterType = await getKurtosisClusterType(dre); + + if (!isSupportedClusterType(kurtosisClusterType)) { + throw new DevNetError(`Unsupported kurtosis cluster type [${kurtosisClusterType}]`); + } + + logger.log(`Kurtosis cluster type [${kurtosisClusterType}]`); + + await startKurtosisGateway(dre); + + await kurtosis.sh`kurtosis run + --enclave ${dre.network.name} + github.com/lidofinance/ethereum-package + --production + --args-file ${configFileName}`; + + await state.updateKurtosis({ preset }); + + logger.log(`Kurtosis started with preset [${preset}]`); + + await stopKurtosisGateway(dre); + }, +}); diff --git a/src/commands/kurtosis/stop-package.ts b/src/commands/kurtosis/stop-package.ts new file mode 100644 index 00000000..b04341bf --- /dev/null +++ b/src/commands/kurtosis/stop-package.ts @@ -0,0 +1,30 @@ +import { command } from "@devnet/command"; + +import { + startKurtosisGateway, + stopKurtosisGateway, +} from "./extensions/kurtosis.extension.js"; + +export const KurtosisStopPackage = command.isomorphic({ + description: + "Destroys the Kurtosis enclave", + params: {}, + async handler({ dre, dre: { logger, services: { kurtosis }, state, network, }, }) { + logger.log("Destroying Kurtosis enclave..."); + + await startKurtosisGateway(dre); + + await kurtosis.sh`kurtosis enclave rm -f ${network.name}`.catch((error) => + logger.error(error.message), + ); + + await state.removeKurtosis(); + + logger.log("Removing kurtosis artifacts..."); + + await kurtosis.artifact.clean(); + logger.log("Cleanup completed successfully."); + + await stopKurtosisGateway(dre); + }, +}); diff --git a/src/commands/late-prover-bot-k8s/build.ts b/src/commands/late-prover-bot-k8s/build.ts new file mode 100644 index 00000000..a42228f3 --- /dev/null +++ b/src/commands/late-prover-bot-k8s/build.ts @@ -0,0 +1,31 @@ +import { command } from "@devnet/command"; +import { buildAndPushDockerImage } from "@devnet/docker"; + +export const LateProverBotK8sBuild = command.cli({ + description: "Build Late Prover Bot and push to Docker registry", + params: {}, + async handler({ dre: { state, network, services, logger } }) { + const dockerRegistry = await state.getDockerRegistry(); + + const TAG = `kt-${network.name}`; + const IMAGE = `lido/late-prover-bot`; + + await buildAndPushDockerImage({ + cwd: services.lateProverBot.artifact.root, + registryHostname: dockerRegistry.registryHostname, + buildContext: '.', + imageName: IMAGE, + tag: TAG, + password: process.env.DOCKER_REGISTRY_PASSWORD ?? 'admin', + username: process.env.DOCKER_REGISTRY_USERNAME ?? 'changeme', + }); + + logger.log(`late-prover-bot image pushed to ${dockerRegistry.registryUrl}/${IMAGE}:${TAG}`); + + await state.updateLateProverBotK8sImage({ + tag: TAG, + image: IMAGE, + registryHostname: dockerRegistry.registryHostname, + }) + }, +}); diff --git a/src/commands/late-prover-bot-k8s/constants/late-prover-bot-k8s.constants.ts b/src/commands/late-prover-bot-k8s/constants/late-prover-bot-k8s.constants.ts new file mode 100644 index 00000000..cc9b7c0c --- /dev/null +++ b/src/commands/late-prover-bot-k8s/constants/late-prover-bot-k8s.constants.ts @@ -0,0 +1,6 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; + +export const NAMESPACE = (dre: DevNetRuntimeEnvironmentInterface) => + `kt-${dre.network.name}-late-prover-bot`; + +export const SERVICE_NAME = "Late Prover Bot"; diff --git a/src/commands/late-prover-bot-k8s/down.ts b/src/commands/late-prover-bot-k8s/down.ts new file mode 100644 index 00000000..e6c3dca5 --- /dev/null +++ b/src/commands/late-prover-bot-k8s/down.ts @@ -0,0 +1,51 @@ +import { Params, command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + deleteNamespace, + getNamespacedDeployedHelmReleases, +} from "@devnet/k8s"; + +import { NAMESPACE, SERVICE_NAME } from "./constants/late-prover-bot-k8s.constants.js"; + +export const LateProverBotK8sDown = command.cli({ + description: `Stop ${SERVICE_NAME} in K8s with Helm`, + params: { + force: Params.boolean({ + description: `Do not check that the ${SERVICE_NAME} was already stopped`, + default: false, + required: false, + }), + }, + async handler({ dre, dre: { services: { lateProverBot }, logger, state }, params }) { + if (!(await state.isLateProverBotK8sRunning()) && !(params.force)) { + logger.log(`${SERVICE_NAME} not running. Skipping`); + return; + } + + const releases = await getNamespacedDeployedHelmReleases(NAMESPACE(dre)); + + if (releases.length === 0) { + logger.log(`No ${SERVICE_NAME} releases found in namespace [${NAMESPACE(dre)}]. Skipping...`); + return; + } + + const HELM_RELEASE = releases[0]; + const helmSh = lateProverBot.sh({ + env: { + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + }, + }); + + await helmSh`make debug`; + await helmSh`make lint`; + await helmSh`make uninstall`; + + logger.log(`${SERVICE_NAME} stopped.`); + + await deleteNamespace(NAMESPACE(dre)); + + await state.removeLateProverBotK8sState(); + }, +}); diff --git a/src/commands/late-prover-bot-k8s/extensions/late-prover-bot-k8s.extension.ts b/src/commands/late-prover-bot-k8s/extensions/late-prover-bot-k8s.extension.ts new file mode 100644 index 00000000..13759ed6 --- /dev/null +++ b/src/commands/late-prover-bot-k8s/extensions/late-prover-bot-k8s.extension.ts @@ -0,0 +1,113 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { isEmptyObject } from "@devnet/utils"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getLateProverBotK8sImage(must?: M,): Promise>; + getLateProverBotK8sRunning(must?: M,): Promise>; + getLateProverBotK8sState(must?: M,): Promise>; + + isLateProverBotK8sImageReady(): Promise; + isLateProverBotK8sRunning(): Promise; + + removeLateProverBotK8sState(): Promise; + + updateLateProverBotK8sImage(state: LateProverBotK8sStateImage): Promise; + updateLateProverBotK8sRunning(state: LateProverBotK8sStateRunning): Promise; + } + + export interface Config { + lateProverBotK8s: LateProverBotK8sState; + } +} + +export const LateProverBotK8sStateImage = z.object({ + image: z.string(), + tag: z.string(), + registryHostname: z.string() +}); + +export type LateProverBotK8sStateImage = z.infer; + +export const LateProverBotK8sStateRunning = z.object({ + helmRelease: z.string(), +}); + +export type LateProverBotK8sStateRunning = z.infer; + +export const LateProverBotK8sState = z.object({ + image: LateProverBotK8sStateImage.optional(), + running: LateProverBotK8sStateRunning.optional(), +}); + +export type LateProverBotK8sState = z.infer; + +export const lateProverBotK8sExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateLateProverBotK8sImage = (async function (stateImage: LateProverBotK8sStateImage) { + const state = await dre.state.getLateProverBotK8sState(false); + await dre.state.updateProperties("lateProverBotK8s", { ...state, image: stateImage }); + }); + + dre.state.updateLateProverBotK8sRunning = (async function (stateRunning: LateProverBotK8sStateRunning) { + const state = await dre.state.getLateProverBotK8sState(false); + await dre.state.updateProperties("lateProverBotK8s", { ...state, running: stateRunning }); + }); + + dre.state.removeLateProverBotK8sState = (async function () { + await dre.state.updateProperties("lateProverBotK8s", {}); + }); + + dre.state.isLateProverBotK8sImageReady = (async function () { + const state = await dre.state.getLateProverBotK8sImage(false); + return state && !isEmptyObject(state) && (state.image !== undefined); + }); + + dre.state.isLateProverBotK8sRunning = (async function () { + const state = await dre.state.getLateProverBotK8sRunning(false); + return state && !isEmptyObject(state) && (state.helmRelease !== undefined); + }); + + dre.state.getLateProverBotK8sImage = (async function (must: M = true as M) { + return dre.state.getProperties( + { + image: "lateProverBotK8s.image.image", + tag: "lateProverBotK8s.image.tag", + registryHostname: "lateProverBotK8s.image.registryHostname", + }, + "lateProverBotK8s", + LateProverBotK8sStateImage, + must, + ); + }); + + dre.state.getLateProverBotK8sRunning = (async function (must: M = true as M) { + return dre.state.getProperties( + { + helmRelease: "lateProverBotK8s.running.helmRelease", + }, + "lateProverBotK8s", + LateProverBotK8sStateRunning, + must, + ); + }); + + dre.state.getLateProverBotK8sState = (async function (must: M = true as M) { + return dre.state.getProperties( + 'lateProverBotK8s', + "lateProverBotK8s", + LateProverBotK8sState, + must, + ); + }); +}; diff --git a/src/commands/late-prover-bot-k8s/up.ts b/src/commands/late-prover-bot-k8s/up.ts new file mode 100644 index 00000000..4c190ad9 --- /dev/null +++ b/src/commands/late-prover-bot-k8s/up.ts @@ -0,0 +1,76 @@ +import { command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { createNamespaceIfNotExists } from "@devnet/k8s"; +import { DevNetError } from "@devnet/utils"; + +import { DockerRegistryPushPullSecretToK8s } from "../docker-registry/push-pull-secret-to-k8s.js"; +import { LateProverBotK8sBuild } from "./build.js"; +import { NAMESPACE, SERVICE_NAME } from "./constants/late-prover-bot-k8s.constants.js"; +import { lateProverBotK8sExtension } from "./extensions/late-prover-bot-k8s.extension.js"; + +export const LateProverBotK8sUp = command.cli({ + description: `Start ${SERVICE_NAME} on K8s with Helm`, + params: {}, + extensions: [lateProverBotK8sExtension], + async handler({ dre, dre: { state, services: { lateProverBot }, logger } }) { + if (await state.isLateProverBotK8sRunning()) { + logger.log(`${SERVICE_NAME} already running`); + return; + } + + if (!(await state.isChainDeployed())) { + throw new DevNetError("Chain is not deployed"); + } + + if (!(await state.isLidoDeployed())) { + throw new DevNetError("Lido is not deployed"); + } + + await dre.runCommand(LateProverBotK8sBuild, {}); + + if (!(await state.isLateProverBotK8sImageReady())) { + throw new DevNetError(`${SERVICE_NAME} image is not ready`); + } + + const { elPrivate, clPrivate } = await state.getChain(); + const { locator } = await state.getLido(); + const { deployer } = await state.getNamedWallet(); + const { image, tag, registryHostname } = await state.getLateProverBotK8sImage(); + const env: Record = { + ...lateProverBot.config.constants, + + CHAIN_ID: "32382", + LIDO_LOCATOR_ADDRESS: locator, + EL_RPC_URLS: elPrivate, + CL_API_URLS: clPrivate, + TX_SIGNER_PRIVATE_KEY: deployer.privateKey, + }; + + const HELM_RELEASE = 'lido-late-prover-bot'; + const helmSh = lateProverBot.sh({ + env: { + ...env, + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + IMAGE: image, + TAG: tag, + REGISTRY_HOSTNAME: registryHostname, + }, + }); + + await createNamespaceIfNotExists(NAMESPACE(dre)); + + await dre.runCommand(DockerRegistryPushPullSecretToK8s, { namespace: NAMESPACE(dre) }); + + await helmSh`make debug`; + await helmSh`make lint`; + await helmSh`make install`; + + await state.updateLateProverBotK8sRunning({ + helmRelease: HELM_RELEASE, + }); + + logger.log(`${SERVICE_NAME} started.`); + }, +}); diff --git a/src/commands/lido-core/activate.ts b/src/commands/lido-core/activate.ts index 7e3af3f4..4b83642f 100644 --- a/src/commands/lido-core/activate.ts +++ b/src/commands/lido-core/activate.ts @@ -4,13 +4,18 @@ export const ActivateLidoProtocol = command.cli({ description: "Activates the lido-core protocol by deploying smart contracts and configuring the environment based on the current network state.", params: {}, - async handler({ dre }) { + async handler({ dre, dre: { logger} }) { const { state, services: { lidoCLI, oracle }, network, } = dre; + if (await state.isLidoActivated()) { + logger.log("Lido already activated"); + return; + } + const { elPublic } = await state.getChain(); const { deployer, oracles, councils } = await state.getNamedWallet(); const clClient = await network.getCLClient(); @@ -47,5 +52,7 @@ export const ActivateLidoProtocol = command.cli({ --dsm-guardians ${councils.map(({ publicKey }) => publicKey).join(",")} --dsm-quorum ${councils.length} --roles-beneficiary ${deployer.publicKey}`; + + await state.updateLidoActivated({ active: true }); }, }); diff --git a/src/commands/lido-core/add-new-operator.ts b/src/commands/lido-core/add-new-operator.ts index deab40d4..c0a81e91 100644 --- a/src/commands/lido-core/add-new-operator.ts +++ b/src/commands/lido-core/add-new-operator.ts @@ -1,4 +1,5 @@ -import { Params, assert, command } from "@devnet/command"; +import { Params, command } from "@devnet/command"; +import { assert } from "@devnet/utils"; import { LidoAddKeys } from "../lido-core/add-keys.js"; import { LidoAddOperator } from "../lido-core/add-operator.js"; diff --git a/src/commands/lido-core/deploy-tw.ts b/src/commands/lido-core/deploy-tw.ts index d75a3c45..42e161b1 100644 --- a/src/commands/lido-core/deploy-tw.ts +++ b/src/commands/lido-core/deploy-tw.ts @@ -29,7 +29,7 @@ export const DeployTWContracts = command.cli({ const { constants } = lidoCore.config; const { elPublic } = await state.getChain(); - const blockscoutState = await state.getBlockScout(); + const blockscoutState = await state.getBlockscout(); const clClient = await network.getCLClient(); const { @@ -40,10 +40,11 @@ export const DeployTWContracts = command.cli({ await dre.network.waitEL(); + const DEPOSIT_CONTRACT_ADDRESS = await dre.services.kurtosis.config.getters.DEPOSIT_CONTRACT_ADDRESS(dre.services.kurtosis); + const deployEnv: DeployEnvRequired = { DEPLOYER: deployer.publicKey, - // TODO: get DEPOSIT_CONTRACT from state - DEPOSIT_CONTRACT: constants.DEPOSIT_CONTRACT, + DEPOSIT_CONTRACT: DEPOSIT_CONTRACT_ADDRESS, GAS_MAX_FEE: constants.GAS_MAX_FEE, GAS_PRIORITY_FEE: constants.GAS_PRIORITY_FEE, LOCAL_DEVNET_PK: deployer.privateKey, diff --git a/src/commands/lido-core/deploy.ts b/src/commands/lido-core/deploy.ts index c7524087..3c559d84 100644 --- a/src/commands/lido-core/deploy.ts +++ b/src/commands/lido-core/deploy.ts @@ -1,5 +1,6 @@ import { Params, command } from "@devnet/command"; +import { lidoCoreExtension } from "./extensions/lido-core.extension.js"; import { PrepareLidoCore } from "./prepare-repository.js"; import { LidoCoreUpdateState } from "./update-state.js"; import { LidoCoreVerify } from "./verify.js"; @@ -16,6 +17,7 @@ type DeployEnvRequired = { NETWORK_STATE_FILE: string; RPC_URL: string; SLOTS_PER_EPOCH: string; + GAS_LIMIT?: string; }; export const DeployLidoContracts = command.cli({ @@ -28,12 +30,19 @@ export const DeployLidoContracts = command.cli({ required: true, }), }, + extensions:[lidoCoreExtension], async handler({ dre, dre: { logger }, params }) { const { state, services, network } = dre; const { lidoCore } = services; const { constants } = lidoCore.config; + if (await state.isLidoDeployed()) { + logger.log("Lido contracts are already deployed."); + return; + } + const { elPublic } = await state.getChain(); + await network.waitCL(); const clClient = await network.getCLClient(); const { @@ -50,10 +59,13 @@ export const DeployLidoContracts = command.cli({ vesting: "820000000000000000000000", }); + const DEPOSIT_CONTRACT_ADDRESS = await dre.services.kurtosis.config.getters.DEPOSIT_CONTRACT_ADDRESS(dre.services.kurtosis); + + logger.log(DEPOSIT_CONTRACT_ADDRESS); + const deployEnv: DeployEnvRequired = { DEPLOYER: deployer.publicKey, - // TODO: get DEPOSIT_CONTRACT from state - DEPOSIT_CONTRACT: constants.DEPOSIT_CONTRACT, + DEPOSIT_CONTRACT: DEPOSIT_CONTRACT_ADDRESS, GAS_MAX_FEE: constants.GAS_MAX_FEE, GAS_PRIORITY_FEE: constants.GAS_PRIORITY_FEE, LOCAL_DEVNET_PK: deployer.privateKey, @@ -63,8 +75,12 @@ export const DeployLidoContracts = command.cli({ GENESIS_TIME: genesis_time, RPC_URL: elPublic, SLOTS_PER_EPOCH: constants.SLOTS_PER_EPOCH, + GAS_LIMIT: '16000000', }; + // print git branch information + await lidoCore.sh`git status`; + await lidoCore.sh({ env: deployEnv })`bash -c scripts/dao-deploy.sh`; await dre.runCommand(LidoCoreUpdateState, {}); diff --git a/src/commands/lido-core/extensions/lido-core.extension.ts b/src/commands/lido-core/extensions/lido-core.extension.ts new file mode 100644 index 00000000..643dea3a --- /dev/null +++ b/src/commands/lido-core/extensions/lido-core.extension.ts @@ -0,0 +1,120 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { isEmptyObject } from "@devnet/utils"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getLido(must?: M,): Promise>; + getLidoActivated(must?: M,): Promise>; + isLidoActivated(): Promise; + isLidoDeployed(): Promise; + removeLido(): Promise; + updateLido(state: LidoCoreState): Promise; + updateLidoActivated(state: LidoCoreActiveState): Promise; + } + + export interface Config { + lidoCore: LidoCoreState; + lidoCoreActive: LidoCoreActiveState; + } +} + + +export const LidoCoreState = z.object({ + accountingOracle: z.string(), + agent: z.string(), + locator: z.string(), + lido: z.string(), + sanityChecker: z.string(), + tokenManager: z.string(), + validatorExitBus: z.string(), + voting: z.string(), + treasury: z.string(), + withdrawalVault: z.string(), + stakingRouter: z.string(), + curatedModule: z.string(), + acl: z.string(), + oracleDaemonConfig: z.string(), + withdrawalQueue: z.string(), + finance: z.string(), + + withdrawalVaultImpl: z.string(), + withdrawalQueueImpl: z.string(), + validatorExitBusImpl: z.string(), +}); + +export type LidoCoreState = z.infer; + +export const LidoCoreActiveState = z.object({ + active: z.boolean(), +}); + +export type LidoCoreActiveState = z.infer; + +export const lidoCoreExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateLido = (async function (state: LidoCoreState) { + await this.updateProperties("lidoCore", state); + }); + + dre.state.updateLidoActivated = (async function (state: LidoCoreActiveState) { + await this.updateProperties("lidoCoreActive", state); + }); + + dre.state.removeLido = (async function () { + await dre.state.updateProperties("lidoCore", {}); + }); + + dre.state.isLidoDeployed = (async function () { + const state = await dre.state.getLido(false); + return !isEmptyObject(state) && state.locator !== undefined; + }); + + dre.state.isLidoActivated = (async function () { + const state = await dre.state.getLidoActivated(false); + return !isEmptyObject(state) && state.active === true; + }); + + dre.state.getLido = (async function(must: M = true as M) { + return this.getProperties( + { + accountingOracle: "lidoCore.accountingOracle.proxy.address", + agent: "lidoCore.app:aragon-agent.proxy.address", + locator: "lidoCore.lidoLocator.proxy.address", + lido: "lidoCore.app:lido.proxy.address", + sanityChecker: "lidoCore.oracleReportSanityChecker.address", + tokenManager: "lidoCore.app:aragon-token-manager.proxy.address", + validatorExitBus: "lidoCore.validatorsExitBusOracle.proxy.address", + voting: "lidoCore.app:aragon-voting.proxy.address", + treasury: + "lidoCore.withdrawalVault.implementation.constructorArgs.1", + + stakingRouter: "lidoCore.stakingRouter.proxy.address", + curatedModule: "lidoCore.app:node-operators-registry.proxy.address", + acl: "lidoCore.aragon-acl.proxy.address", + oracleDaemonConfig: "lidoCore.oracleDaemonConfig.address", + withdrawalVault: "lidoCore.withdrawalVault.proxy.address", + withdrawalQueue: "lidoCore.withdrawalQueueERC721.proxy.address", + withdrawalVaultImpl: "lidoCore.withdrawalVault.implementation.address", + validatorExitBusImpl: "lidoCore.validatorsExitBusOracle.implementation.address", + withdrawalQueueImpl: "lidoCore.withdrawalQueueERC721.implementation.address", + finance: "lidoCore.app:aragon-finance.proxy.address" + }, + "lidoCore", + LidoCoreState, + must, + ); + }); + + dre.state.getLidoActivated = (async function(must: M = true as M) { + return this.getProperties( + "lidoCoreActive", + "lidoCoreActive", + LidoCoreActiveState, + must, + ); + }); +}; diff --git a/src/commands/lido-core/keys/use.ts b/src/commands/lido-core/keys/use.ts index 55493cd1..6aa33d76 100644 --- a/src/commands/lido-core/keys/use.ts +++ b/src/commands/lido-core/keys/use.ts @@ -1,4 +1,5 @@ -import { Params, assert, command } from "@devnet/command"; +import { Params, command } from "@devnet/command"; +import { assert } from "@devnet/utils"; export const UseLidoDevNetKeys = command.cli({ description: "Finds previously unused validator keys and saves them under the specified name in the lido-cli service.", @@ -19,7 +20,7 @@ export const UseLidoDevNetKeys = command.cli({ .replace("0x", "010000000000000000000000"); const lidoKeys = depositData.filter((d) => d.withdrawal_credentials === WC); - + const lidoUnusedKeys = lidoKeys.filter((k) => !k.used); assert(lidoUnusedKeys.length > 0, "No unused keys found."); diff --git a/src/commands/lido-core/prepare-repository.ts b/src/commands/lido-core/prepare-repository.ts index 4e47f2bb..6e126840 100644 --- a/src/commands/lido-core/prepare-repository.ts +++ b/src/commands/lido-core/prepare-repository.ts @@ -1,4 +1,5 @@ -import { Params, assert, command } from "@devnet/command"; +import { Params, command } from "@devnet/command"; +import { assert } from "@devnet/utils"; export const PrepareLidoCore = command.cli({ description: "Prepare lido core repository.", @@ -71,6 +72,7 @@ export const PrepareLidoCore = command.cli({ NODE_OPERATOR_NETWORK_PENETRATION_THRESHOLD_BP: 100, PREDICTION_DURATION_IN_SLOTS: 50_400, FINALIZATION_MAX_NEGATIVE_REBASE_EPOCH_SHIFT: 1350, + EXIT_EVENTS_LOOKBACK_WINDOW_IN_SLOTS: 100_800 }, }); diff --git a/src/commands/lido-core/verify.ts b/src/commands/lido-core/verify.ts index 620fb446..20fb35e5 100644 --- a/src/commands/lido-core/verify.ts +++ b/src/commands/lido-core/verify.ts @@ -25,15 +25,16 @@ export const LidoCoreVerify = command.cli({ const { elPublic } = await state.getChain(); const { deployer } = await state.getNamedWallet(); - const blockscoutState = await state.getBlockScout(); + const blockscoutState = await state.getBlockscout(); logger.log("Verifying deployed contracts..."); + const DEPOSIT_CONTRACT_ADDRESS = await dre.services.kurtosis.config.getters.DEPOSIT_CONTRACT_ADDRESS(dre.services.kurtosis); + const deployEnv: DeployEnvRequired = { DEPLOYER: deployer.publicKey, - // TODO: get DEPOSIT_CONTRACT from state - DEPOSIT_CONTRACT: constants.DEPOSIT_CONTRACT, + DEPOSIT_CONTRACT: DEPOSIT_CONTRACT_ADDRESS, GAS_MAX_FEE: constants.GAS_MAX_FEE, GAS_PRIORITY_FEE: constants.GAS_PRIORITY_FEE, LOCAL_DEVNET_PK: deployer.privateKey, diff --git a/src/commands/no-widget-backend/build.ts b/src/commands/no-widget-backend/build.ts new file mode 100644 index 00000000..2eae60d1 --- /dev/null +++ b/src/commands/no-widget-backend/build.ts @@ -0,0 +1,39 @@ +import { command } from "@devnet/command"; +import { buildAndPushDockerImage } from "@devnet/docker"; + +import { SERVICE_NAME } from "./constants/no-widget-backend.constants.js"; + +export const NoWidgetBackendBuild = command.cli({ + description: `Build ${SERVICE_NAME} and push to Docker registry`, + params: {}, + async handler({ dre: { state, network, services, logger } }) { + const dockerRegistry = await state.getDockerRegistry(); + + const TAG = `kt-${network.name}`; + const IMAGE = 'lido/no-widget-backend'; + + await buildAndPushDockerImage({ + cwd: services.noWidgetBackend.artifact.root, + registryHostname: dockerRegistry.registryHostname, + buildContext: '.', + imageName: IMAGE, + tag: TAG, + password: process.env.DOCKER_REGISTRY_PASSWORD ?? 'admin', + username: process.env.DOCKER_REGISTRY_USERNAME ?? 'changeme', + }); + + logger.log(`${SERVICE_NAME} image pushed to ${dockerRegistry.registryUrl}/${IMAGE}:${TAG}`); + + await state.updateNoWidgetBackendImage({ + tag: TAG, + image: IMAGE, + registryHostname: dockerRegistry.registryHostname, + }); + + return { + tag: TAG, + image: IMAGE, + registryHostname: dockerRegistry.registryHostname, + } + }, +}); diff --git a/src/commands/no-widget-backend/constants/no-widget-backend.constants.ts b/src/commands/no-widget-backend/constants/no-widget-backend.constants.ts new file mode 100644 index 00000000..007eea14 --- /dev/null +++ b/src/commands/no-widget-backend/constants/no-widget-backend.constants.ts @@ -0,0 +1,7 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; + +export const NAMESPACE = (dre: DevNetRuntimeEnvironmentInterface) => + `kt-${dre.network.name}-no-widget-backend`; + + +export const SERVICE_NAME = "NO Widget Backend"; diff --git a/src/commands/no-widget-backend/down.ts b/src/commands/no-widget-backend/down.ts new file mode 100644 index 00000000..6bc60c4d --- /dev/null +++ b/src/commands/no-widget-backend/down.ts @@ -0,0 +1,51 @@ +import { Params, command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + deleteNamespace, + getNamespacedDeployedHelmReleases, +} from "@devnet/k8s"; + +import { NAMESPACE, SERVICE_NAME } from "./constants/no-widget-backend.constants.js"; + +export const NoWidgetBackendDown = command.cli({ + description: `Stop ${SERVICE_NAME} in K8s with Helm`, + params: { + force: Params.boolean({ + description: `Do not check that the ${SERVICE_NAME} was already stopped`, + default: false, + required: false, + }), + }, + async handler({ dre, dre: { services: { noWidgetBackend }, logger, state }, params }) { + if (!(await state.isNoWidgetBackendRunning()) && !(params.force)) { + logger.log(`${SERVICE_NAME} not running. Skipping`); + return; + } + + const releases = await getNamespacedDeployedHelmReleases(NAMESPACE(dre)); + + if (releases.length === 0) { + logger.log(`No ${SERVICE_NAME} releases found in namespace [${NAMESPACE(dre)}]. Skipping...`); + return; + } + + const HELM_RELEASE = releases[0]; + const helmSh = noWidgetBackend.sh({ + env: { + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + }, + }); + + await helmSh`make debug`; + await helmSh`make lint`; + await helmSh`make uninstall`; + + logger.log(`${SERVICE_NAME} stopped.`); + + await deleteNamespace(NAMESPACE(dre)); + + await state.removeNoWidgetBackendState(); + }, +}); diff --git a/src/commands/no-widget-backend/extensions/no-widget-backend.extension.ts b/src/commands/no-widget-backend/extensions/no-widget-backend.extension.ts new file mode 100644 index 00000000..5d9caa5e --- /dev/null +++ b/src/commands/no-widget-backend/extensions/no-widget-backend.extension.ts @@ -0,0 +1,115 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { isEmptyObject } from "@devnet/utils"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getNoWidgetBackendImage(must?: M,): Promise>; + getNoWidgetBackendRunning(must?: M,): Promise>; + getNoWidgetBackendState(must?: M,): Promise>; + + isNoWidgetBackendImageReady(): Promise; + isNoWidgetBackendRunning(): Promise; + + removeNoWidgetBackendState(): Promise; + + updateNoWidgetBackendImage(state: NoWidgetBackendStateImage): Promise; + updateNoWidgetBackendRunning(state: NoWidgetBackendStateRunning): Promise; + } + + export interface Config { + noWidgetBackend: NoWidgetBackendState; + } +} + +export const NoWidgetBackendStateImage = z.object({ + image: z.string(), + tag: z.string(), + registryHostname: z.string() +}); + +export type NoWidgetBackendStateImage = z.infer; + +export const NoWidgetBackendStateRunning = z.object({ + publicUrl: z.string().url(), + privateUrl: z.string().url(), +}); + +export type NoWidgetBackendStateRunning = z.infer; + +export const NoWidgetBackendState = z.object({ + image: NoWidgetBackendStateImage.optional(), + running: NoWidgetBackendStateRunning.optional(), +}); + +export type NoWidgetBackendState = z.infer; + +export const noWidgetBackendExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateNoWidgetBackendImage = (async function (stateImage: NoWidgetBackendStateImage) { + const state = await dre.state.getNoWidgetBackendState(false); + await dre.state.updateProperties("noWidgetBackend", { ...state, image: stateImage }); + }); + + dre.state.updateNoWidgetBackendRunning = (async function (stateRunning: NoWidgetBackendStateRunning) { + const state = await dre.state.getNoWidgetBackendState(false); + await dre.state.updateProperties("noWidgetBackend", { ...state, running: stateRunning }); + }); + + dre.state.removeNoWidgetBackendState = (async function () { + await dre.state.updateProperties("noWidgetBackend", {}); + }); + + dre.state.isNoWidgetBackendImageReady = (async function () { + const state = await dre.state.getNoWidgetBackendImage(false); + return state && !isEmptyObject(state) && (state.image !== undefined); + }); + + dre.state.isNoWidgetBackendRunning = (async function () { + const state = await dre.state.getNoWidgetBackendRunning(false); + return state && !isEmptyObject(state) && (state.privateUrl !== undefined); + }); + + dre.state.getNoWidgetBackendImage = (async function (must: M = true as M) { + return dre.state.getProperties( + { + image: "noWidgetBackend.image.image", + tag: "noWidgetBackend.image.tag", + registryHostname: "noWidgetBackend.image.registryHostname", + }, + "noWidgetBackend", + NoWidgetBackendStateImage, + must, + ); + }); + + dre.state.getNoWidgetBackendRunning = (async function (must: M = true as M) { + return dre.state.getProperties( + { + publicUrl: "noWidgetBackend.running.publicUrl", + privateUrl: "noWidgetBackend.running.privateUrl", + }, + "noWidgetBackend", + NoWidgetBackendStateRunning, + must, + ); + }); + + dre.state.getNoWidgetBackendState = (async function (must: M = true as M) { + return dre.state.getProperties( + 'noWidgetBackend', + "noWidgetBackend", + NoWidgetBackendState, + must, + ); + }); +}; diff --git a/src/commands/no-widget-backend/up.ts b/src/commands/no-widget-backend/up.ts new file mode 100644 index 00000000..5c8ff559 --- /dev/null +++ b/src/commands/no-widget-backend/up.ts @@ -0,0 +1,106 @@ +import { + DEFAULT_NETWORK_NAME, + NETWORK_NAME_SUBSTITUTION, + command, +} from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + addPrefixToIngressHostname, + createNamespaceIfNotExists, +} from "@devnet/k8s"; +import { DevNetError } from "@devnet/utils"; + +import { DockerRegistryPushPullSecretToK8s } from "../docker-registry/push-pull-secret-to-k8s.js"; +import { NoWidgetBackendBuild } from "./build.js"; +import { NAMESPACE, SERVICE_NAME } from "./constants/no-widget-backend.constants.js"; +import { noWidgetBackendExtension } from "./extensions/no-widget-backend.extension.js"; + +export const NoWidgetBackendUp = command.cli({ + description: `Start ${SERVICE_NAME} in K8s with Helm`, + params: {}, + extensions: [noWidgetBackendExtension], + async handler({ dre, dre: { state, services: { noWidgetBackend }, logger } }) { + if (await state.isNoWidgetBackendRunning()) { + logger.log(`${SERVICE_NAME} already running`); + return; + } + + if (!(await state.isChainDeployed())) { + throw new DevNetError("Chain is not deployed"); + } + + if (!(await state.isLidoDeployed())) { + throw new DevNetError("Lido is not deployed"); + } + + if (!(await state.isCSMDeployed())) { + throw new DevNetError("CSM is not deployed"); + } + + if (!(await state.isKapiK8sRunning())) { + throw new DevNetError("KAPI is not deployed"); + } + + const result = await dre.runCommand(NoWidgetBackendBuild, {}); + + if (!(await state.isNoWidgetBackendImageReady())) { + throw new DevNetError(`${SERVICE_NAME} image is not ready`); + } + + const { elPrivate, clPrivate } = await state.getChain(); + + const { locator, lido, stakingRouter, curatedModule } = await state.getLido(); + const { module: csmModule } = await state.getCSM(); + const { privateUrl } = await state.getKapiK8sRunning(); + const { image, tag, registryHostname } = await state.getNoWidgetBackendImage(); + + const GENESIS_FORK_VERSION = await dre.services.kurtosis.config.getters.GENESIS_FORK_VERSION(dre.services.kurtosis); + + const env: Record = { + ...noWidgetBackend.config.constants, + IS_DEVNET_MODE: "1", + CHAIN_ID: "32382", + LIDO_DEVNET_ADDRESS: lido, + DEVNET_GENESIS_FORK_VERSION: GENESIS_FORK_VERSION.replace("0x", ""), + KEYS_API_HOST: privateUrl, + EL_API_URLS: elPrivate, + }; + + const hostname = process.env.NO_WIDGET_BACKEND_INGRESS_HOSTNAME?. + replace(NETWORK_NAME_SUBSTITUTION, DEFAULT_NETWORK_NAME); + + if (!hostname) { + throw new DevNetError(`NO_WIDGET_BACKEND_INGRESS_HOSTNAME env variable is not set`); + } + + const INGRESS_HOSTNAME = addPrefixToIngressHostname(hostname); + + const HELM_RELEASE = 'lido-no-widget-backend-1'; + const helmSh = noWidgetBackend.sh({ + env: { + ...env, + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + IMAGE: image, + TAG: tag, + REGISTRY_HOSTNAME: registryHostname, + INGRESS_HOSTNAME, + PG_HOST: `${HELM_RELEASE}-postgresql`, + }, + }); + + await createNamespaceIfNotExists(NAMESPACE(dre)); + + await dre.runCommand(DockerRegistryPushPullSecretToK8s, { namespace: NAMESPACE(dre) }); + + await helmSh`make debug`; + await helmSh`make lint`; + await helmSh`make install`; + + await state.updateNoWidgetBackendRunning({ + publicUrl: `http://${INGRESS_HOSTNAME}`, + privateUrl: `http://${HELM_RELEASE}-lido-no-widget-backend-api.${NAMESPACE(dre)}.svc.cluster.local:3000` + }); + }, +}); diff --git a/src/commands/no-widget/build.ts b/src/commands/no-widget/build.ts new file mode 100644 index 00000000..24201e45 --- /dev/null +++ b/src/commands/no-widget/build.ts @@ -0,0 +1,39 @@ +import { command } from "@devnet/command"; +import { buildAndPushDockerImage } from "@devnet/docker"; + +import { SERVICE_NAME } from "./constants/no-widget.constants.js"; + +export const NoWidgetBuild = command.cli({ + description: `Build ${SERVICE_NAME} and push to Docker registry`, + params: {}, + async handler({ dre: { state, network, services, logger } }) { + const dockerRegistry = await state.getDockerRegistry(); + + const TAG = `kt-${network.name}`; + const IMAGE = 'lido/no-widget'; + + await buildAndPushDockerImage({ + cwd: services.noWidget.artifact.root, + registryHostname: dockerRegistry.registryHostname, + buildContext: '.', + imageName: IMAGE, + tag: TAG, + password: process.env.DOCKER_REGISTRY_PASSWORD ?? 'admin', + username: process.env.DOCKER_REGISTRY_USERNAME ?? 'changeme', + }); + + logger.log(`${SERVICE_NAME} image pushed to ${dockerRegistry.registryUrl}/${IMAGE}:${TAG}`); + + await state.updateNoWidgetImage({ + tag: TAG, + image: IMAGE, + registryHostname: dockerRegistry.registryHostname, + }); + + return { + tag: TAG, + image: IMAGE, + registryHostname: dockerRegistry.registryHostname, + } + }, +}); diff --git a/src/commands/no-widget/constants/no-widget.constants.ts b/src/commands/no-widget/constants/no-widget.constants.ts new file mode 100644 index 00000000..dbcdcb3c --- /dev/null +++ b/src/commands/no-widget/constants/no-widget.constants.ts @@ -0,0 +1,7 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; + +export const NAMESPACE = (dre: DevNetRuntimeEnvironmentInterface) => + `kt-${dre.network.name}-no-widget`; + + +export const SERVICE_NAME = "NO Widget"; diff --git a/src/commands/no-widget/down.ts b/src/commands/no-widget/down.ts new file mode 100644 index 00000000..7b7c01d2 --- /dev/null +++ b/src/commands/no-widget/down.ts @@ -0,0 +1,51 @@ +import { Params, command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + deleteNamespace, + getNamespacedDeployedHelmReleases, +} from "@devnet/k8s"; + +import { NAMESPACE, SERVICE_NAME } from "./constants/no-widget.constants.js"; + +export const NoWidgetDown = command.cli({ + description: `Stop ${SERVICE_NAME} in K8s with Helm`, + params: { + force: Params.boolean({ + description: `Do not check that the ${SERVICE_NAME} was already stopped`, + default: false, + required: false, + }), + }, + async handler({ dre, dre: { services: { noWidgetBackend }, logger, state }, params }) { + if (!(await state.isNoWidgetRunning()) && !(params.force)) { + logger.log(`${SERVICE_NAME} not running. Skipping`); + return; + } + + const releases = await getNamespacedDeployedHelmReleases(NAMESPACE(dre)); + + if (releases.length === 0) { + logger.log(`No ${SERVICE_NAME} releases found in namespace [${NAMESPACE(dre)}]. Skipping...`); + return; + } + + const HELM_RELEASE = releases[0]; + const helmSh = noWidgetBackend.sh({ + env: { + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + }, + }); + + await helmSh`make debug`; + await helmSh`make lint`; + await helmSh`make uninstall`; + + logger.log(`${SERVICE_NAME} stopped.`); + + await deleteNamespace(NAMESPACE(dre)); + + await state.removeNoWidgetState(); + }, +}); diff --git a/src/commands/no-widget/extensions/no-widget.extension.ts b/src/commands/no-widget/extensions/no-widget.extension.ts new file mode 100644 index 00000000..b6460547 --- /dev/null +++ b/src/commands/no-widget/extensions/no-widget.extension.ts @@ -0,0 +1,113 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { isEmptyObject } from "@devnet/utils"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getNoWidgetImage(must?: M,): Promise>; + getNoWidgetRunning(must?: M,): Promise>; + getNoWidgetState(must?: M,): Promise>; + + isNoWidgetImageReady(): Promise; + isNoWidgetRunning(): Promise; + + removeNoWidgetState(): Promise; + + updateNoWidgetImage(state: NoWidgetStateImage): Promise; + updateNoWidgetRunning(state: NoWidgetStateRunning): Promise; + } + + export interface Config { + noWidget: NoWidgetState; + } +} + +export const NoWidgetStateImage = z.object({ + image: z.string(), + tag: z.string(), + registryHostname: z.string() +}); + +export type NoWidgetStateImage = z.infer; + +export const NoWidgetStateRunning = z.object({ + publicUrl: z.string().url(), +}); + +export type NoWidgetStateRunning = z.infer; + +export const NoWidgetState = z.object({ + image: NoWidgetStateImage.optional(), + running: NoWidgetStateRunning.optional(), +}); + +export type NoWidgetState = z.infer; + +export const noWidgetExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateNoWidgetImage = (async function (stateImage: NoWidgetStateImage) { + const state = await dre.state.getNoWidgetState(false); + await dre.state.updateProperties("noWidget", { ...state, image: stateImage }); + }); + + dre.state.updateNoWidgetRunning = (async function (stateRunning: NoWidgetStateRunning) { + const state = await dre.state.getNoWidgetState(false); + await dre.state.updateProperties("noWidget", { ...state, running: stateRunning }); + }); + + dre.state.removeNoWidgetState = (async function () { + await dre.state.updateProperties("noWidget", {}); + }); + + dre.state.isNoWidgetImageReady = (async function () { + const state = await dre.state.getNoWidgetImage(false); + return state && !isEmptyObject(state) && (state.image !== undefined); + }); + + dre.state.isNoWidgetRunning = (async function () { + const state = await dre.state.getNoWidgetRunning(false); + return state && !isEmptyObject(state) && (state.publicUrl !== undefined); + }); + + dre.state.getNoWidgetImage = (async function (must: M = true as M) { + return dre.state.getProperties( + { + image: "noWidget.image.image", + tag: "noWidget.image.tag", + registryHostname: "noWidget.image.registryHostname", + }, + "noWidget", + NoWidgetStateImage, + must, + ); + }); + + dre.state.getNoWidgetRunning = (async function (must: M = true as M) { + return dre.state.getProperties( + { + publicUrl: "noWidget.running.publicUrl", + }, + "noWidget", + NoWidgetStateRunning, + must, + ); + }); + + dre.state.getNoWidgetState = (async function (must: M = true as M) { + return dre.state.getProperties( + 'noWidget', + "noWidget", + NoWidgetState, + must, + ); + }); +}; diff --git a/src/commands/no-widget/up.ts b/src/commands/no-widget/up.ts new file mode 100644 index 00000000..3a0da652 --- /dev/null +++ b/src/commands/no-widget/up.ts @@ -0,0 +1,107 @@ +import { + DEFAULT_NETWORK_NAME, + NETWORK_NAME_SUBSTITUTION, + command, +} from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { + addPrefixToIngressHostname, + createNamespaceIfNotExists, +} from "@devnet/k8s"; +import { DevNetError } from "@devnet/utils"; + +import { DockerRegistryPushPullSecretToK8s } from "../docker-registry/push-pull-secret-to-k8s.js"; +import { NoWidgetBuild } from "./build.js"; +import { NAMESPACE, SERVICE_NAME } from "./constants/no-widget.constants.js"; +import { noWidgetExtension } from "./extensions/no-widget.extension.js"; + +export const NoWidgetUp = command.cli({ + description: `Start ${SERVICE_NAME} in K8s with Helm`, + params: {}, + extensions: [noWidgetExtension], + async handler({ dre, dre: { state, services: { noWidget }, logger } }) { + if (await state.isNoWidgetRunning()) { + logger.log(`${SERVICE_NAME} already running`); + return; + } + + if (!(await state.isChainDeployed())) { + throw new DevNetError("Chain is not deployed"); + } + + if (!(await state.isLidoDeployed())) { + throw new DevNetError("Lido is not deployed"); + } + + if (!(await state.isCSMDeployed())) { + throw new DevNetError("CSM is not deployed"); + } + + if (!(await state.isKapiK8sRunning())) { + throw new DevNetError("KAPI is not deployed"); + } + + if (!(await state.isNoWidgetBackendRunning())) { + throw new DevNetError("NO Widget Backend is not deployed"); + } + + const result = await dre.runCommand(NoWidgetBuild, {}); + + if (!(await state.isNoWidgetImageReady())) { + throw new DevNetError(`${SERVICE_NAME} image is not ready`); + } + + const { elPrivate, clPrivate } = await state.getChain(); + + const { locator, lido, stakingRouter, curatedModule } = await state.getLido(); + const { module: csmModule } = await state.getCSM(); + + const { privateUrl } = await state.getNoWidgetBackendRunning(); + const { image, tag, registryHostname } = await state.getNoWidgetImage(); + + + const env: Record = { + ...noWidget.config.constants, + NODE_ENV: "production", + EL_RPC_URLS_17000: elPrivate, + BACKEND_URL_17000: privateUrl, + SUPPORTED_CHAINS: "17000", + DEFAULT_CHAIN: "17000" + }; + + const hostname = process.env.NO_WIDGET_INGRESS_HOSTNAME?. + replace(NETWORK_NAME_SUBSTITUTION, DEFAULT_NETWORK_NAME); + + if (!hostname) { + throw new DevNetError(`NO_WIDGET_INGRESS_HOSTNAME env variable is not set`); + } + + const INGRESS_HOSTNAME = addPrefixToIngressHostname(hostname); + + const HELM_RELEASE = 'lido-no-widget-1'; + const helmSh = noWidget.sh({ + env: { + ...env, + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + IMAGE: image, + TAG: tag, + REGISTRY_HOSTNAME: registryHostname, + INGRESS_HOSTNAME, + }, + }); + + await createNamespaceIfNotExists(NAMESPACE(dre)); + + await dre.runCommand(DockerRegistryPushPullSecretToK8s, { namespace: NAMESPACE(dre) }); + + await helmSh`make debug`; + await helmSh`make lint`; + await helmSh`make install`; + + await state.updateNoWidgetRunning({ + publicUrl: `http://${INGRESS_HOSTNAME}` + }); + }, +}); diff --git a/src/commands/oracles-k8s/build.ts b/src/commands/oracles-k8s/build.ts new file mode 100644 index 00000000..59264f56 --- /dev/null +++ b/src/commands/oracles-k8s/build.ts @@ -0,0 +1,31 @@ +import { command } from "@devnet/command"; +import { buildAndPushDockerImage } from "@devnet/docker"; + +export const OracleK8sBuild = command.cli({ + description: "Build Oracle and push to Docker registry", + params: {}, + async handler({ dre, dre: { state, network, services, logger } }) { + const dockerRegistry = await state.getDockerRegistry(); + + const TAG = `kt-${network.name}`; + const IMAGE = `lido/oracle`; + + await buildAndPushDockerImage({ + cwd: services.oracle.artifact.root, + registryHostname: dockerRegistry.registryHostname, + buildContext: '.', + imageName: IMAGE, + tag: TAG, + password: process.env.DOCKER_REGISTRY_PASSWORD ?? 'admin', + username: process.env.DOCKER_REGISTRY_USERNAME ?? 'changeme', + }); + + logger.log(`Oracle image pushed to ${dockerRegistry.registryUrl}/${IMAGE}:${TAG}`); + + await state.updateOraclesK8sImage({ + tag: TAG, + image: IMAGE, + registryHostname: dockerRegistry.registryHostname, + }) + }, +}); diff --git a/src/commands/oracles-k8s/constants/oracles-k8s.constants.ts b/src/commands/oracles-k8s/constants/oracles-k8s.constants.ts new file mode 100644 index 00000000..04b18ffd --- /dev/null +++ b/src/commands/oracles-k8s/constants/oracles-k8s.constants.ts @@ -0,0 +1,4 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; + +export const NAMESPACE = (dre: DevNetRuntimeEnvironmentInterface) => + `kt-${dre.network.name}-oracles`; diff --git a/src/commands/oracles-k8s/down.ts b/src/commands/oracles-k8s/down.ts new file mode 100644 index 00000000..d0412e2f --- /dev/null +++ b/src/commands/oracles-k8s/down.ts @@ -0,0 +1,52 @@ +import { Params, command } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { deleteNamespace, getNamespacedDeployedHelmReleases } from "@devnet/k8s"; + +import { KuboK8sDown } from "../kubo-k8s/down.js"; +import { NAMESPACE } from "./constants/oracles-k8s.constants.js"; + +export const OracleK8sDown = command.cli({ + description: "Stop Oracle(s) in K8s with Helm", + params: { + force: Params.boolean({ + description: "Do not check that the Oracles was already stopped", + default: false, + required: false, + }), + }, + async handler({ dre, dre: { state, services: { oracle }, logger }, params }) { + + if (!(await state.isOraclesK8sRunning()) && !(params.force)) { + logger.log("Oracles are not running. Skipping"); + return; + } + + const releases = await getNamespacedDeployedHelmReleases(NAMESPACE(dre)); + + if (releases.length === 0) { + logger.log(`No Oracles releases found in namespace [${NAMESPACE(dre)}]. Skipping...`); + return; + } + + for (const release of releases) { + const helmLidoOracleSh = oracle.sh({ + env: { + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE: release, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + }, + }); + + await helmLidoOracleSh`make debug`; + await helmLidoOracleSh`make lint`; + await helmLidoOracleSh`make uninstall`; + logger.log(`Oracles [${release}] stopped.`); + } + + await deleteNamespace(NAMESPACE(dre)); + + await state.removeOraclesK8sState(); + + await dre.runCommand(KuboK8sDown, { force: false }); + }, +}); diff --git a/src/commands/oracles-k8s/extensions/oracles-k8s.extension.ts b/src/commands/oracles-k8s/extensions/oracles-k8s.extension.ts new file mode 100644 index 00000000..51700071 --- /dev/null +++ b/src/commands/oracles-k8s/extensions/oracles-k8s.extension.ts @@ -0,0 +1,113 @@ +import { DevNetRuntimeEnvironmentInterface } from "@devnet/command"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars +import { Config, StateInterface } from "@devnet/state"; +import { isEmptyObject } from "@devnet/utils"; +import { z } from "zod"; + + +// augmenting the StateInterface +declare module "@devnet/state" { + export interface StateInterface { + getOraclesK8sImage(must?: M,): Promise>; + getOraclesK8sRunning(must?: M,): Promise>; + getOraclesK8sState(must?: M,): Promise>; + + isOraclesK8sImageReady(): Promise; + isOraclesK8sRunning(): Promise; + + removeOraclesK8sState(): Promise; + + updateOraclesK8sImage(state: OraclesK8sStateImage): Promise; + updateOraclesK8sRunning(state: OraclesK8sStateRunning): Promise; + } + + export interface Config { + oraclesK8s: OraclesK8sState; + } +} + +export const OraclesK8sStateImage = z.object({ + image: z.string(), + tag: z.string(), + registryHostname: z.string() +}); + +export type OraclesK8sStateImage = z.infer; + +export const OraclesK8sStateRunning = z.object({ + helmReleases: z.array(z.string()), +}); + +export type OraclesK8sStateRunning = z.infer; + +export const OraclesK8sState = z.object({ + image: OraclesK8sStateImage.optional(), + running: OraclesK8sStateRunning.optional(), +}); + +export type OraclesK8sState = z.infer; + +export const oraclesK8sExtension = (dre: DevNetRuntimeEnvironmentInterface) => { + dre.state.updateOraclesK8sImage = (async function (stateImage: OraclesK8sStateImage) { + const state = await dre.state.getOraclesK8sState(false); + await dre.state.updateProperties("oraclesK8s", { ...state, image: stateImage }); + }); + + dre.state.updateOraclesK8sRunning = (async function (stateRunning: OraclesK8sStateRunning) { + const state = await dre.state.getOraclesK8sState(false); + await dre.state.updateProperties("oraclesK8s", { ...state, running: stateRunning }); + }); + + dre.state.removeOraclesK8sState = (async function () { + await dre.state.updateProperties("oraclesK8s", {}); + }); + + dre.state.isOraclesK8sImageReady = (async function () { + const state = await dre.state.getOraclesK8sImage(false); + return state && !isEmptyObject(state) && (state.image !== undefined); + }); + + dre.state.isOraclesK8sRunning = (async function () { + const state = await dre.state.getOraclesK8sRunning(false); + return state && !isEmptyObject(state) && (state.helmReleases !== undefined) && (state.helmReleases?.length > 0); + }); + + dre.state.getOraclesK8sImage = (async function (must: M = true as M) { + return dre.state.getProperties( + { + image: "oraclesK8s.image.image", + tag: "oraclesK8s.image.tag", + registryHostname: "oraclesK8s.image.registryHostname", + }, + "oraclesK8s", + OraclesK8sStateImage, + must, + ); + }); + + dre.state.getOraclesK8sRunning = (async function (must: M = true as M) { + return dre.state.getProperties( + { + helmReleases: "oraclesK8s.running.helmReleases", + }, + "oraclesK8s", + OraclesK8sStateRunning, + must, + ); + }); + + dre.state.getOraclesK8sState = (async function (must: M = true as M) { + return dre.state.getProperties( + 'oraclesK8s', + "oraclesK8s", + OraclesK8sState, + must, + ); + }); +}; diff --git a/src/commands/oracles-k8s/up.ts b/src/commands/oracles-k8s/up.ts new file mode 100644 index 00000000..2e39cda8 --- /dev/null +++ b/src/commands/oracles-k8s/up.ts @@ -0,0 +1,140 @@ +import { command, Params } from "@devnet/command"; +import { HELM_VENDOR_CHARTS_ROOT_PATH } from "@devnet/helm"; +import { createNamespaceIfNotExists, getNamespacedDeployedHelmReleases } from "@devnet/k8s"; +import { DevNetError } from "@devnet/utils"; + +import { DockerRegistryPushPullSecretToK8s } from "../docker-registry/push-pull-secret-to-k8s.js"; +import { KuboK8sUp } from "../kubo-k8s/up.js"; +import { OracleK8sBuild } from "./build.js"; +import { NAMESPACE } from "./constants/oracles-k8s.constants.js"; +import { oraclesK8sExtension } from "./extensions/oracles-k8s.extension.js"; + +export const OracleK8sUp = command.cli({ + description: "Start Oracle(s) in K8s with Helm", + params: { + tag: Params.string({ + description: "Oracle image tag", + default: '6.0.1', + required: false, + }), + build: Params.boolean({ + description: "Build oracle image from git repo instead of tag", + default: false, + required: false, + }), + }, + extensions: [oraclesK8sExtension], + async handler({ dre: { logger, state, services: { oracle } }, dre, params }) { + if (!(await state.isChainDeployed())) { + throw new DevNetError("Chain is not deployed"); + } + + if (!(await state.isLidoDeployed())) { + throw new DevNetError("Lido is not deployed"); + } + + if (!(await state.isCSMDeployed())) { + throw new DevNetError("CSM is not deployed"); + } + + if (!(await state.isKapiK8sRunning())) { + throw new DevNetError("KAPI is not deployed"); + } + + await dre.runCommand(KuboK8sUp, {}); + + const { privateUrl: kuboPrivateUrl } = await state.getKuboK8sRunning(); + + const buildAndGetImage = async () => { + await dre.runCommand(OracleK8sBuild, {}); + if (!(await state.isOraclesK8sImageReady())) { + throw new DevNetError("Oracle image is not ready"); + } + + return await state.getOraclesK8sImage() + } + + const { image, tag, registryHostname } = params.build + ? await buildAndGetImage() + : { + tag: params.tag, + image: 'lidofinance/oracle', + registryHostname: 'docker.io' + }; + + + const { elPrivate, clPrivate } = await state.getChain(); + + const { locator, stakingRouter, curatedModule } = await state.getLido(); + const { module: csmModule } = await state.getCSM(); + const { oracle1, oracle2, oracle3 } = await state.getNamedWallet(); + const { privateUrl: kapiPrivateUrl } = await state.getKapiK8sRunning(); + + const env: Record = { + ...oracle.config.constants, + + CHAIN_ID: "32382", + EXECUTION_CLIENT_URI: elPrivate, + CONSENSUS_CLIENT_URI: clPrivate, + LIDO_LOCATOR_ADDRESS: locator, + KEYS_API_URI: kapiPrivateUrl, + CSM_MODULE_ADDRESS: csmModule, + CSM_ORACLE_MAX_CONCURRENCY: "1", + SUBMIT_DATA_DELAY_IN_SLOTS: "1", + ALLOW_REPORTING_IN_BUNKER_MODE: "false", + PINATA_JWT: process.env.CSM_ORACLE_PINATA_JWT ?? "", + KUBO_HOST: kuboPrivateUrl.replace(":5001", ""), + }; + + const helmReleases = [ + { HELM_RELEASE: 'oracle-accounting-1', command: 'accounting', privateKey: oracle1 }, + { HELM_RELEASE: 'oracle-accounting-2', command: 'accounting', privateKey: oracle2 }, + { HELM_RELEASE: 'oracle-ejector-1', command: 'ejector', privateKey: oracle1 }, + { HELM_RELEASE: 'oracle-ejector-2', command: 'ejector', privateKey: oracle2 }, + { HELM_RELEASE: 'oracle-csm-1', command: 'csm', privateKey: oracle1 }, + { HELM_RELEASE: 'oracle-csm-2', command: 'csm', privateKey: oracle2 }, + ]; + + for (const release of helmReleases) { + const { HELM_RELEASE, privateKey, command } = release; + + const alreadyDeployedHelmReleases = await getNamespacedDeployedHelmReleases(NAMESPACE(dre)); + if (alreadyDeployedHelmReleases?.includes(HELM_RELEASE)) { + logger.log(`Oracles release ${HELM_RELEASE} already running`); + continue; + } + + const helmLidoOracleSh = oracle.sh({ + env: { + ...env, + NAMESPACE: NAMESPACE(dre), + HELM_RELEASE, + HELM_CHART_ROOT_PATH: HELM_VENDOR_CHARTS_ROOT_PATH, + IMAGE: image, + TAG: tag, + REGISTRY_HOSTNAME: registryHostname, + MEMBER_PRIV_KEY: privateKey.privateKey, + COMMAND: command, + }, + }); + + await createNamespaceIfNotExists(NAMESPACE(dre)); + + await dre.runCommand(DockerRegistryPushPullSecretToK8s, { namespace: NAMESPACE(dre) }); + + await helmLidoOracleSh`make debug`; + await helmLidoOracleSh`make lint`; + + try { + await helmLidoOracleSh`make install`; + } catch { + // rollback changes + await helmLidoOracleSh`make uninstall`; + } + } + + await state.updateOraclesK8sRunning({ + helmReleases: ['active'], + }); + }, +}); diff --git a/src/commands/oracles/down.ts b/src/commands/oracles/down.ts deleted file mode 100644 index ad0c1b26..00000000 --- a/src/commands/oracles/down.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { command } from "@devnet/command"; - -export const OracleDown = command.cli({ - description: "Stop Oracle(s)", - params: {}, - async handler({ - dre: { - services: { oracle }, - }, - }) { - await oracle.sh`docker compose -f docker-compose.devnet.yml down -v`; - }, -}); diff --git a/src/commands/oracles/logs.ts b/src/commands/oracles/logs.ts deleted file mode 100644 index 378e350f..00000000 --- a/src/commands/oracles/logs.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { command } from "@devnet/command"; - -export const OracleLogs = command.cli({ - description: "Show Oracle(s) logs", - params: {}, - async handler({ - dre: { - services: { oracle }, - }, - }) { - await oracle.sh`docker compose -f docker-compose.devnet.yml logs -f`; - }, -}); diff --git a/src/commands/oracles/up.ts b/src/commands/oracles/up.ts deleted file mode 100644 index 64220054..00000000 --- a/src/commands/oracles/up.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { command } from "@devnet/command"; - -import { GitCheckout } from "../git/checkout.js"; - -export const OracleUp = command.cli({ - description: "Start Oracle(s)", - params: {}, - async handler({ dre: { state, network, services }, dre }) { - const { oracle } = services; - await dre.runCommand(GitCheckout, { - service: "oracle", - ref: "feat/oracle-v6", - }); - - const { elPrivate } = await state.getChain(); - const { clNodesSpecs } = await state.getNodes(); - - const distinctConsensusUris = Object.values( - Object.fromEntries( - new Map( - clNodesSpecs.map((c) => { - const port = c.ports.find((p) => p.privateUrl); - if (!port || !port.privateUrl) { - throw new Error(`Missing privateUrl for client: ${c.client} (${c.name})`); - } - return [c.client, port.privateUrl]; - }) - ) - ) - ); - - - const { locator } = await state.getLido(); - const { module: csmModule } = await state.getCSM(); - const { oracle1, oracle2, oracle3 } = await state.getNamedWallet(); - - const env = { - CHAIN_ID: "32382", - EXECUTION_CLIENT_URI_1: elPrivate, - EXECUTION_CLIENT_URI_2: elPrivate, - EXECUTION_CLIENT_URI_3: elPrivate, - CONSENSUS_CLIENT_URI_1: distinctConsensusUris[0], - CONSENSUS_CLIENT_URI_2: distinctConsensusUris[1], - CONSENSUS_CLIENT_URI_3: distinctConsensusUris[2], - LIDO_LOCATOR_ADDRESS: locator, - CSM_MODULE_ADDRESS: csmModule, - MEMBER_PRIV_KEY_1: oracle1.privateKey, - MEMBER_PRIV_KEY_2: oracle2.privateKey, - MEMBER_PRIV_KEY_3: oracle3.privateKey, - PINATA_JWT: process.env.CSM_ORACLE_PINATA_JWT ?? "", - DOCKER_NETWORK_NAME: `kt-${network.name}`, - COMPOSE_PROJECT_NAME: `oracles-${network.name}`, - }; - - await oracle.writeENV(".env", env); - - await oracle.sh`docker compose -f docker-compose.devnet.yml up --build -d`; - }, -}); diff --git a/src/commands/ssh/tunnel.ts b/src/commands/ssh/tunnel.ts new file mode 100644 index 00000000..9961222d --- /dev/null +++ b/src/commands/ssh/tunnel.ts @@ -0,0 +1,80 @@ +import { command } from "@devnet/command"; +import { DevNetError } from "@devnet/utils"; +import { execa } from "execa"; + + +export const SSHTunnel = command.cli({ + description: "Start SSH tunnel", + params: {}, + extensions: [], + async handler({ dre: { logger } }) { + const sshHost = process.env.SSH_HOST; + const sshUser = process.env.SSH_USER ?? process.env.USER; + const sshPrivateKey = process.env.SSH_PRIVATE_KEY ?? '~/.ssh/id_rsa'; + const sshTunnelRemoteAddress = process.env.SSH_TUNNEL_REMOTE_ADDRESS; + const sshTunnelLocalPort = process.env.SSH_TUNNEL_LOCAL_PORT; + + // Validate required environment variables + if (!sshHost) { + throw new DevNetError("SSH_HOST environment variable is required"); + } + + if (!sshUser) { + throw new DevNetError("SSH_USER environment variable is required"); + } + + if (!sshPrivateKey) { + throw new DevNetError("SSH_PRIVATE_KEY environment variable is required"); + } + + if (!sshTunnelRemoteAddress) { + throw new DevNetError("SSH_TUNNEL_REMOTE_ADDRESS environment variable is required"); + } + + if (!sshTunnelLocalPort) { + throw new DevNetError("SSH_TUNNEL_LOCAL_PORT environment variable is required"); + } + + logger.log(`Starting SSH tunnel to ${sshHost}...`); + logger.log(`Tunnel configuration: ${sshTunnelLocalPort} -> ${sshTunnelRemoteAddress}`); + + const sshArgs = [ + '-i', sshPrivateKey, + '-L', `${sshTunnelLocalPort}:${sshTunnelRemoteAddress}`, + '-N', // Don't execute a remote command + '-T', // Disable pseudo-terminal allocation + `${sshUser}@${sshHost}` + ]; + + try { + const sshProcess = execa('ssh', sshArgs, { + stdio: 'inherit' + }); + + // Handle process termination gracefully + process.on('SIGINT', () => { + logger.log('Received SIGINT, terminating SSH tunnel...'); + sshProcess.kill('SIGTERM'); + setTimeout(() => { + if (!sshProcess.killed) { + logger.log('Force killing SSH process...'); + sshProcess.kill('SIGKILL'); + } + }, 5000); + }); + + process.on('SIGTERM', () => { + logger.log('Received SIGTERM, terminating SSH tunnel...'); + sshProcess.kill('SIGTERM'); + }); + + + logger.log(`SSH tunnel starting on local port ${sshTunnelLocalPort}`); + logger.log(`For better experience turn on keep-alive in your SSH client`); + await sshProcess; + + } catch (error) { + throw new DevNetError(`SSH tunnel failed: ${error instanceof Error ? error.message : String(error)}`); + } + }, +}); diff --git a/src/commands/stands/csm-v2.ts b/src/commands/stands/csm-v2.ts index 777c0935..7bf2286f 100644 --- a/src/commands/stands/csm-v2.ts +++ b/src/commands/stands/csm-v2.ts @@ -1,23 +1,23 @@ import { Params, command } from "@devnet/command"; import { BlockscoutUp } from "../blockscout/up.js"; -import { KurtosisGetInfo } from "../chain/info.js"; -import { KurtosisUp } from "../chain/up.js"; -import { CouncilUp } from "../council/up.js"; +import { ChainGetInfo } from "../chain/info.js"; +import { ChainUp } from "../chain/up.js"; +import { CouncilK8sUp } from "../council-k8s/up.js"; import { ActivateCSM } from "../csm/activate.js"; import { LidoAddCSMOperatorWithKeys } from "../csm/add-operator.js"; import { DeployCSMContracts } from "../csm/deploy.js"; import { DataBusDeploy } from "../data-bus/deploy.js"; -import { DSMBotsUp } from "../dsm-bots/up.js"; +import { DSMBotsK8sUp } from "../dsm-bots-k8s/up.js"; import { GitCheckout } from "../git/checkout.js"; -import { KapiUp } from "../kapi/up.js"; +import { KapiK8sUp } from "../kapi-k8s/up.js"; import { ActivateLidoProtocol } from "../lido-core/activate.js"; import { DeployLidoContracts } from "../lido-core/deploy.js"; import { LidoDeposit } from "../lido-core/deposit.js"; import { GenerateLidoDevNetKeys } from "../lido-core/keys/generate.js"; import { UseLidoDevNetKeys } from "../lido-core/keys/use.js"; import { ReplaceDSM } from "../lido-core/replace-dsm.js"; -import { OracleUp } from "../oracles/up.js"; +import { OracleK8sUp } from "../oracles-k8s/up.js"; import { ValidatorAdd } from "../validator/add.js"; import { CSMUpdateState } from "../csm/update-state.js"; @@ -57,12 +57,9 @@ export const PectraDevNetUp = command.cli({ ref: "develop", }); - await dre.runCommand(KurtosisUp, { preset: "csm-v2" }); + await dre.runCommand(ChainUp, { preset: "csm-v2" }); logger.log("✅ Network initialized."); - await dre.runCommand(BlockscoutUp, {}); - logger.log("✅ BlockScout launched for transaction visualization."); - // if (!params.full) { // await dre.runCommand(KurtosisGetInfo, {}); // return; @@ -120,10 +117,10 @@ export const PectraDevNetUp = command.cli({ } logger.log("🚀 Run KAPI service."); - await dre.runCommand(KapiUp, {}); + await dre.runCommand(KapiK8sUp, {}); logger.log("🚀 Run Oracle service."); - await dre.runCommand(OracleUp, {}); + await dre.runCommand(OracleK8sUp, { tag: '6.0.1', build: false }); if (params.dsm) { logger.log("🚀 Deploying Data-bus..."); @@ -131,11 +128,11 @@ export const PectraDevNetUp = command.cli({ logger.log("✅ Data-bus deployed."); logger.log("🚀 Running Council service..."); - await dre.runCommand(CouncilUp, {}); + await dre.runCommand(CouncilK8sUp, {}); logger.log("✅ Council service started."); logger.log("🚀 Running DSM-bots service..."); - await dre.runCommand(DSMBotsUp, {}); + await dre.runCommand(DSMBotsK8sUp, {}); logger.log("✅ DSM-bots service started."); } @@ -151,6 +148,6 @@ export const PectraDevNetUp = command.cli({ await dre.runCommand(ValidatorAdd, {}); logger.log("✅ Validator keys added."); - await dre.runCommand(KurtosisGetInfo, {}); + await dre.runCommand(ChainGetInfo, {}); }, }); diff --git a/src/commands/stands/fusaka-zk-test.ts b/src/commands/stands/fusaka-zk-test.ts new file mode 100644 index 00000000..ed4d6181 --- /dev/null +++ b/src/commands/stands/fusaka-zk-test.ts @@ -0,0 +1,45 @@ +import { Params, command } from "@devnet/command"; + +import { ChainGetInfo } from "../chain/info.js"; +import { ChainUp } from "../chain/up.js"; +import { CouncilK8sUp } from "../council-k8s/up.js"; +import { ActivateCSM } from "../csm/activate.js"; +import { LidoAddCSMOperatorWithKeys } from "../csm/add-operator.js"; +import { DeployCSVerifier } from "../csm/add-verifier.js"; +import { DeployCSMContracts } from "../csm/deploy.js"; +import { DataBusDeploy } from "../data-bus/deploy.js"; +import { DSMBotsK8sUp } from "../dsm-bots-k8s/up.js"; +import { GitCheckout } from "../git/checkout.js"; +import { KapiK8sUp } from "../kapi-k8s/up.js"; +import { ActivateLidoProtocol } from "../lido-core/activate.js"; +import { LidoAddKeys } from "../lido-core/add-keys.js"; +import { LidoAddOperator } from "../lido-core/add-operator.js"; +import { DeployLidoContracts } from "../lido-core/deploy.js"; +import { LidoDeposit } from "../lido-core/deposit.js"; +import { GenerateLidoDevNetKeys } from "../lido-core/keys/generate.js"; +import { UseLidoDevNetKeys } from "../lido-core/keys/use.js"; +import { ReplaceDSM } from "../lido-core/replace-dsm.js"; +import { LidoSetStakingLimit } from "../lido-core/set-staking-limit.js"; +import { NoWidgetUp } from "../no-widget/up.js"; +import { NoWidgetBackendUp } from "../no-widget-backend/up.js"; +import { OracleK8sUp } from "../oracles-k8s/up.js"; +import { ValidatorAdd } from "../validator/add.js"; + +export const FusakaZkTestDevNetUp = command.cli({ + description: "Fusaka ZK Test test stand.", + params: {}, + async handler({ params, dre, dre: { logger } }) { + await dre.runCommand(GitCheckout, { + service: "lidoCore", + ref: "fix/scratch-deploy-tw", + }); + + await dre.runCommand(GitCheckout, { + service: "csm", + ref: "main", + }); + + await dre.runCommand(ChainUp, { preset: 'fusaka-zk-test' }); + logger.log("✅ Network initialized."); + }, +}); diff --git a/src/commands/stands/fusaka.ts b/src/commands/stands/fusaka.ts new file mode 100644 index 00000000..e19f4fd9 --- /dev/null +++ b/src/commands/stands/fusaka.ts @@ -0,0 +1,167 @@ +import { Params, command } from "@devnet/command"; + +import { ChainGetInfo } from "../chain/info.js"; +import { ChainUp } from "../chain/up.js"; +import { CouncilK8sUp } from "../council-k8s/up.js"; +import { ActivateCSM } from "../csm/activate.js"; +import { LidoAddCSMOperatorWithKeys } from "../csm/add-operator.js"; +import { DeployCSMContracts } from "../csm/deploy.js"; +import { CSMProverToolK8sUp } from "../csm-prover-tool-k8s/up.js"; +import { DataBusDeploy } from "../data-bus/deploy.js"; +import { DSMBotsK8sUp } from "../dsm-bots-k8s/up.js"; +import { GitCheckout } from "../git/checkout.js"; +import { KapiK8sUp } from "../kapi-k8s/up.js"; +import { ActivateLidoProtocol } from "../lido-core/activate.js"; +import { LidoAddKeys } from "../lido-core/add-keys.js"; +import { LidoAddOperator } from "../lido-core/add-operator.js"; +import { DeployLidoContracts } from "../lido-core/deploy.js"; +import { LidoDeposit } from "../lido-core/deposit.js"; +import { GenerateLidoDevNetKeys } from "../lido-core/keys/generate.js"; +import { UseLidoDevNetKeys } from "../lido-core/keys/use.js"; +import { ReplaceDSM } from "../lido-core/replace-dsm.js"; +import { LidoSetStakingLimit } from "../lido-core/set-staking-limit.js"; +import { NoWidgetUp } from "../no-widget/up.js"; +import { NoWidgetBackendUp } from "../no-widget-backend/up.js"; +import { OracleK8sUp } from "../oracles-k8s/up.js"; +import { ValidatorAdd } from "../validator/add.js"; + +export const FusakaDevNetUp = command.cli({ + description: "Base Fusaka test stand.", + params: { + verify: Params.boolean({ + description: "Enables verification of smart contracts during deployment.", + }), + dsm: Params.boolean({ + description: "Use full DSM setup.", + default: false, + }), + preset: Params.string({ + description: "Kurtosis preset name", + default: "fusaka-devnet2", + }), + }, + async handler({ params, dre, dre: { logger } }) { + await dre.runCommand(GitCheckout, { + service: "lidoCore", + ref: "master", + }); + + await dre.runCommand(GitCheckout, { + service: "csm", + ref: "develop", + }); + + await dre.runCommand(GitCheckout, { + service: "lidoCLI", + ref: "fix/vroom-306-temp-fix-fusaka-1", + }); + + await dre.runCommand(ChainUp, { preset: params.preset }); + logger.log("✅ Network initialized."); + + const deployArgs = { verify: params.verify }; + const depositArgs = { dsm: params.dsm }; + + logger.log("🚀 Deploying Lido Core contracts..."); + await dre.runCommand(DeployLidoContracts, deployArgs); + logger.log("✅ Lido contracts deployed."); + + logger.log("🚀 Deploying CSM contracts..."); + await dre.runCommand(DeployCSMContracts, deployArgs); + logger.log("✅ CSM contracts deployed."); + + await dre.network.waitCLFinalizedEpoch(1); + + logger.log("🚀 Activating Lido Core protocol..."); + await dre.runCommand(ActivateLidoProtocol, {}); + logger.log("✅ Lido Core protocol activated."); + + logger.log("🚀 Activating CSM protocol..."); + await dre.runCommand(ActivateCSM, { + stakeShareLimitBP: 2000, + priorityExitShareThresholdBP: 2500, + maxDepositsPerBlock: 30, + }); + logger.log("✅ CSM protocol activated."); + + if (!params.dsm) { + logger.log("🚀 Replacing DSM with an EOA..."); + await dre.runCommand(ReplaceDSM, {}); + logger.log("✅ DSM replaced with an EOA."); + } + + const NOR_DEVNET_OPERATOR = "devnet_nor_1"; + const CSM_DEVNET_OPERATOR = "devnet_csm_1"; + + logger.log("🚀 Generating and allocating keys for NOR Module..."); + await dre.runCommand(GenerateLidoDevNetKeys, { validators: 30 }); + await dre.runCommand(UseLidoDevNetKeys, { name: NOR_DEVNET_OPERATOR }); + logger.log("✅ NOR Module keys generated and allocated."); + + logger.log("🚀 Generating and allocating keys for CSM Module..."); + await dre.runCommand(GenerateLidoDevNetKeys, { validators: 30 }); + await dre.runCommand(UseLidoDevNetKeys, { name: CSM_DEVNET_OPERATOR }); + logger.log("✅ CSM Module keys generated and allocated."); + + logger.log("🚀 Adding NOR operator..."); + await dre.runCommand(LidoAddOperator, { name: NOR_DEVNET_OPERATOR }); + logger.log(`✅ Operator ${NOR_DEVNET_OPERATOR} added.`); + + logger.log("🚀 Adding NOR keys..."); + await dre.runCommand(LidoAddKeys, { name: NOR_DEVNET_OPERATOR, id: 0 }); + logger.log("✅ NOR keys added."); + + logger.log("🚀 Increasing staking limit for NOR..."); + await dre.runCommand(LidoSetStakingLimit, { operatorId: 0, limit: 30 }); + logger.log("✅ Staking limit for NOR increased."); + + logger.log("🚀 Adding CSM operator with keys..."); + await dre.runCommand(LidoAddCSMOperatorWithKeys, { + name: CSM_DEVNET_OPERATOR, + }); + logger.log(`✅ Keys for operator ${CSM_DEVNET_OPERATOR} added.`); + + logger.log("🚀 Run KAPI service in K8s."); + await dre.runCommand(KapiK8sUp, {}); + + logger.log("🚀 Run Oracle service in K8s."); + await dre.runCommand(OracleK8sUp, { tag: '6.0.1', build: false }); + + if (params.dsm) { + logger.log("🚀 Deploying Data-bus..."); + await dre.runCommand(DataBusDeploy, {}); + logger.log("✅ Data-bus deployed."); + + logger.log("🚀 Running Council service..."); + await dre.runCommand(CouncilK8sUp, {}); + logger.log("✅ Council service started."); + + logger.log("🚀 Running DSM-bots service..."); + await dre.runCommand(DSMBotsK8sUp, {}); + logger.log("✅ DSM-bots service started."); + } + + logger.log("🚀 Making deposit to NOR..."); + await dre.runCommand(LidoDeposit, { id: 1, deposits: 30, ...depositArgs }); + logger.log("✅ Deposit to NOR completed."); + + logger.log("🚀 Making deposit to CSM..."); + await dre.runCommand(LidoDeposit, { id: 3, deposits: 30, ...depositArgs }); + logger.log("✅ Deposit to CSM completed."); + + logger.log("🚀 Adding keys to the validator..."); + await dre.runCommand(ValidatorAdd, {}); + logger.log("✅ Validator keys added."); + + logger.log("🚀 Run No Widget Backend"); + await dre.runCommand(NoWidgetBackendUp, { }); + + logger.log("🚀 Run No Widget"); + await dre.runCommand(NoWidgetUp, { }); + + logger.log("🚀 Run CSM Prover Tool"); + await dre.runCommand(CSMProverToolK8sUp, {}); + + await dre.runCommand(ChainGetInfo, {}); + }, +}); diff --git a/src/commands/stands/pectra-only-chain.ts b/src/commands/stands/pectra-only-chain.ts index a5ec1728..92dee5f4 100644 --- a/src/commands/stands/pectra-only-chain.ts +++ b/src/commands/stands/pectra-only-chain.ts @@ -1,7 +1,7 @@ import { Params, command } from "@devnet/command"; import { BlockscoutUp } from "../blockscout/up.js"; -import { KurtosisUp } from "../chain/up.js"; +import { ChainUp } from "../chain/up.js"; import { GitCheckout } from "../git/checkout.js"; export const PectraChainUp = command.cli({ @@ -18,12 +18,9 @@ export const PectraChainUp = command.cli({ ref: "develop", }); - await dre.runCommand(KurtosisUp, { preset: params.preset }); + await dre.runCommand(ChainUp, { preset: params.preset }); logger.log("✅ Chain network initialized."); - await dre.runCommand(BlockscoutUp, {}); - logger.log("✅ BlockScout launched for transaction visualization."); - logger.log("✅ Pectra chain environment is ready."); }, }); diff --git a/src/commands/stands/pectra-only-contracts.ts b/src/commands/stands/pectra-only-contracts.ts index 7b116357..33f4e1b3 100644 --- a/src/commands/stands/pectra-only-contracts.ts +++ b/src/commands/stands/pectra-only-contracts.ts @@ -1,7 +1,7 @@ import { Params, command } from "@devnet/command"; import { BlockscoutUp } from "../blockscout/up.js"; -import { KurtosisUp } from "../chain/up.js"; +import { ChainUp } from "../chain/up.js"; import { DeployCSMContracts } from "../csm/deploy.js"; import { GitCheckout } from "../git/checkout.js"; import { DeployLidoContracts } from "../lido-core/deploy.js"; @@ -28,12 +28,9 @@ export const PectraContractsOnlyDevNetUp = command.cli({ ref: "develop", }); - await dre.runCommand(KurtosisUp, { preset: params.preset }); + await dre.runCommand(ChainUp, { preset: params.preset }); logger.log("✅ Network initialized."); - await dre.runCommand(BlockscoutUp, {}); - logger.log("✅ BlockScout launched for transaction visualization."); - const deployArgs = { verify: params.verify }; logger.log("🚀 Deploying Lido Core contracts..."); diff --git a/src/commands/stands/pectra.ts b/src/commands/stands/pectra.ts index 5e4da5f5..ac33bff0 100644 --- a/src/commands/stands/pectra.ts +++ b/src/commands/stands/pectra.ts @@ -1,17 +1,17 @@ import { Params, command } from "@devnet/command"; import { BlockscoutUp } from "../blockscout/up.js"; -import { KurtosisGetInfo } from "../chain/info.js"; -import { KurtosisUp } from "../chain/up.js"; -import { CouncilUp } from "../council/up.js"; +import { ChainGetInfo } from "../chain/info.js"; +import { ChainUp } from "../chain/up.js"; +import { CouncilK8sUp } from "../council-k8s/up.js"; import { ActivateCSM } from "../csm/activate.js"; import { LidoAddCSMOperatorWithKeys } from "../csm/add-operator.js"; import { DeployCSVerifier } from "../csm/add-verifier.js"; import { DeployCSMContracts } from "../csm/deploy.js"; import { DataBusDeploy } from "../data-bus/deploy.js"; -import { DSMBotsUp } from "../dsm-bots/up.js"; +import { DSMBotsK8sUp } from "../dsm-bots-k8s/up.js"; import { GitCheckout } from "../git/checkout.js"; -import { KapiUp } from "../kapi/up.js"; +import { KapiK8sUp } from "../kapi-k8s/up.js"; import { ActivateLidoProtocol } from "../lido-core/activate.js"; import { LidoAddKeys } from "../lido-core/add-keys.js"; import { LidoAddOperator } from "../lido-core/add-operator.js"; @@ -21,7 +21,7 @@ import { GenerateLidoDevNetKeys } from "../lido-core/keys/generate.js"; import { UseLidoDevNetKeys } from "../lido-core/keys/use.js"; import { ReplaceDSM } from "../lido-core/replace-dsm.js"; import { LidoSetStakingLimit } from "../lido-core/set-staking-limit.js"; -import { OracleUp } from "../oracles/up.js"; +import { OracleK8sUp } from "../oracles-k8s/up.js"; import { ValidatorAdd } from "../validator/add.js"; export const PectraDevNetUp = command.cli({ @@ -50,12 +50,9 @@ export const PectraDevNetUp = command.cli({ ref: "main", }); - await dre.runCommand(KurtosisUp, { preset: params.preset }); + await dre.runCommand(ChainUp, { preset: params.preset }); logger.log("✅ Network initialized."); - await dre.runCommand(BlockscoutUp, {}); - logger.log("✅ BlockScout launched for transaction visualization."); - const deployArgs = { verify: params.verify }; const depositArgs = { dsm: params.dsm }; @@ -117,10 +114,10 @@ export const PectraDevNetUp = command.cli({ logger.log(`✅ Keys for operator ${CSM_DEVNET_OPERATOR} added.`); logger.log("🚀 Run KAPI service."); - await dre.runCommand(KapiUp, {}); + await dre.runCommand(KapiK8sUp, {}); logger.log("🚀 Run Oracle service."); - await dre.runCommand(OracleUp, {}); + await dre.runCommand(OracleK8sUp, {tag: '', build: true }); if (params.dsm) { logger.log("🚀 Deploying Data-bus..."); @@ -128,11 +125,11 @@ export const PectraDevNetUp = command.cli({ logger.log("✅ Data-bus deployed."); logger.log("🚀 Running Council service..."); - await dre.runCommand(CouncilUp, {}); + await dre.runCommand(CouncilK8sUp, {}); logger.log("✅ Council service started."); logger.log("🚀 Running DSM-bots service..."); - await dre.runCommand(DSMBotsUp, {}); + await dre.runCommand(DSMBotsK8sUp, {}); logger.log("✅ DSM-bots service started."); } @@ -148,6 +145,6 @@ export const PectraDevNetUp = command.cli({ await dre.runCommand(ValidatorAdd, {}); logger.log("✅ Validator keys added."); - await dre.runCommand(KurtosisGetInfo, {}); + await dre.runCommand(ChainGetInfo, {}); }, }); diff --git a/src/commands/tunnel.ts b/src/commands/tunnel.ts deleted file mode 100644 index 622595ee..00000000 --- a/src/commands/tunnel.ts +++ /dev/null @@ -1,63 +0,0 @@ -import { command } from "@devnet/command"; - -export const TunnelGetInfo = command.cli({ - description: "Displays a single SSH tunnel command for all services.", - params: {}, - async handler({ - dre: { - logger, - state, - services: { kurtosis }, - }, - }) { - const [kurtosisInfo, blockscoutInfo, chainServices] = await Promise.all([ - kurtosis.getDockerInfo(false), - state.getBlockScout(false), - (async () => { - const chainServices = await state.getChain(); - return Object.entries(chainServices).filter( - ([k]) => !k.endsWith("Private"), - ); - })(), - ]); - - // If no services are enabled, log a message and return - if ( - !kurtosisInfo && - !blockscoutInfo && - Object.keys(chainServices).length === 0 - ) { - logger.log(`No services are enabled`); - return; - } - - let sshCommand = "ssh"; - - const addPortToCommand = (url: string) => { - const port = new URL(url).port || url.split(":").pop(); - sshCommand += ` -L ${port}:localhost:${port}`; - }; - - // Collect all service URLs and names - const serviceEntries = [ - ...chainServices, - ...(kurtosisInfo?.dora.map((dora) => [ - "dora", - dora.ports[0].publicUrl!, - ]) || []), - ["blockscout", blockscoutInfo?.url], - ].filter(([, url]) => Boolean(url)); - - // Log service names in a single line - const serviceNames = serviceEntries.map(([name]) => name).join(", "); - logger.log(`Services included in the SSH tunnel: ${serviceNames}`); - - // Add all services to SSH command - serviceEntries.forEach(([, url]) => addPortToCommand(url!)); - - sshCommand += " user@remote_host"; - - logger.log("SSH tunnel command for all services:"); - logger.log(sshCommand); - }, -}); diff --git a/src/commands/validator/add.ts b/src/commands/validator/add.ts index 43c8cd97..94ebf44a 100644 --- a/src/commands/validator/add.ts +++ b/src/commands/validator/add.ts @@ -1,5 +1,7 @@ -import { assert, command } from "@devnet/command"; +import { command } from "@devnet/command"; import * as keyManager from "@devnet/key-manager-api"; +import { assert, sleep } from "@devnet/utils"; +import { pipe, A, RA, TE, NEA, E } from "@devnet/fp"; import { ValidatorRestart } from "./restart.js"; @@ -14,12 +16,14 @@ export const ValidatorAdd = command.cli({ state, }, }) { - const { validatorsApi } = await dre.state.getChain(); + const { validatorsApiPublic } = await dre.state.getChain(); const keystoresResponse = await keyManager.fetchKeystores( - validatorsApi, + validatorsApiPublic, keyManager.KEY_MANAGER_DEFAULT_API_TOKEN, ); + logger.log(`Total keystores: ${keystoresResponse.data.length}`); + const existingPubKeys = new Set( keystoresResponse.data.map((p) => p.validating_pubkey.replace("0x", "")), ); @@ -38,17 +42,33 @@ export const ValidatorAdd = command.cli({ logger.log(`Detected new keystores: ${actualKeystores.length}`); + await sleep(25_000); + const keystoresStrings = actualKeystores.map((v) => JSON.stringify(v)); - const keystoresPasswords = actualKeystores.map((_) => "12345678"); - const res = await keyManager.importKeystores( - validatorsApi, + + await pipe( keystoresStrings, - keystoresPasswords, - keyManager.KEY_MANAGER_DEFAULT_API_TOKEN, + A.chunksOf(10), + A.mapWithIndex((index, keystoresChunk) => { + logger.log(`Chunk ${index} of keystores`); + + const keystoresChunkPasswords = keystoresChunk.map((_) => "12345678"); + + return TE.tryCatch(async () => { + await keyManager.importKeystores( + validatorsApiPublic, + keystoresChunk, + keystoresChunkPasswords, + keyManager.KEY_MANAGER_DEFAULT_API_TOKEN, + ); + }, E.toError); + }), + A.sequence(TE.ApplicativeSeq), // sequential execution + TE.execute ); - logger.logJson(res); - await dre.runCommand(ValidatorRestart, {}); + // TODO + // await dre.runCommand(ValidatorRestart, {}); }, }); diff --git a/src/commands/validator/list.ts b/src/commands/validator/list.ts index fb2c800b..05c68707 100644 --- a/src/commands/validator/list.ts +++ b/src/commands/validator/list.ts @@ -5,9 +5,9 @@ export const ValidatorList = command.cli({ description: "Lists all validator keystores in the system", params: {}, async handler({ dre: { logger, state } }) { - const { validatorsApi } = await state.getChain(); + const { validatorsApiPublic } = await state.getChain(); const keystoresResponse = await keyManager.fetchKeystores( - validatorsApi, + validatorsApiPublic, keyManager.KEY_MANAGER_DEFAULT_API_TOKEN, ); diff --git a/src/commands/validator/remove.ts b/src/commands/validator/remove.ts index 5a056645..76d41f32 100644 --- a/src/commands/validator/remove.ts +++ b/src/commands/validator/remove.ts @@ -1,5 +1,6 @@ -import { Params, assert, command } from "@devnet/command"; +import { Params, command } from "@devnet/command"; import * as keyManager from "@devnet/key-manager-api"; +import { assert } from "@devnet/utils"; export const ValidatorRemove = command.cli({ description: @@ -15,9 +16,9 @@ export const ValidatorRemove = command.cli({ logger, }, }) { - const { validatorsApi } = await dre.state.getChain(); + const { validatorsApiPublic } = await dre.state.getChain(); const keystoresResponse = await keyManager.fetchKeystores( - validatorsApi, + validatorsApiPublic, keyManager.KEY_MANAGER_DEFAULT_API_TOKEN, ); @@ -49,7 +50,7 @@ export const ValidatorRemove = command.cli({ logger.log("Removing key from Key Manager") await keyManager.deleteKeystores( - validatorsApi, + validatorsApiPublic, [existingPubKey.validating_pubkey], keyManager.KEY_MANAGER_DEFAULT_API_TOKEN, ); diff --git a/src/commands/validator/restart.ts b/src/commands/validator/restart.ts index a878fea1..e2ee3b73 100644 --- a/src/commands/validator/restart.ts +++ b/src/commands/validator/restart.ts @@ -1,6 +1,5 @@ -import { assert, command } from "@devnet/command"; - -import { KurtosisUpdate } from "../chain/update.js"; +import { command } from "@devnet/command"; +import { assert } from "@devnet/utils"; export const ValidatorRestart = command.cli({ description: "Restarts the Teku validator client.", @@ -34,7 +33,7 @@ export const ValidatorRestart = command.cli({ // Update the state after restarting the container logger.log("Updating state after validator restart..."); - await dre.runCommand(KurtosisUpdate, {}); + // await dre.runCommand(KurtosisUpdate, {}); logger.log("Validator restart completed successfully."); }, }); diff --git a/src/commands/validator/voluntary-exit.ts b/src/commands/validator/voluntary-exit.ts index 09c6eefb..716b8782 100644 --- a/src/commands/validator/voluntary-exit.ts +++ b/src/commands/validator/voluntary-exit.ts @@ -1,4 +1,5 @@ -import { Params, assert, command } from "@devnet/command"; +import { Params, command } from "@devnet/command"; +import { assert } from "@devnet/utils"; const mnemonics = { genesis: @@ -19,8 +20,8 @@ export const VoluntaryExit = command.cli({ required: true, }), }, - async handler({ dre: { services }, params }) { - const { lidoCLI } = services; + async handler({ dre: { services, state, network }, params }) { + const { lidoCLI, } = services; const mnemonic = mnemonics[params.mtype as keyof typeof mnemonics]; @@ -29,6 +30,18 @@ export const VoluntaryExit = command.cli({ `No mnemonics found for key ${params.mtype}`, ); - await lidoCLI.sh`./run.sh validators voluntary-exit ${mnemonic} ${params.index}`; + const { elPublic, clPublic } = await state.getChain(); + const { deployer } = await state.getNamedWallet(); + + await lidoCLI.sh({ + env: { + EL_CHAIN_ID: "32382", + EL_NETWORK_NAME: network.name, + EL_API_PROVIDER: elPublic, + CL_API_PROVIDER: clPublic, + PRIVATE_KEY: deployer.privateKey, + DEPLOYED: `deployed-${network.name}.json`, + }, + })`./run.sh validators voluntary-exit ${mnemonic} ${params.index}`; }, }); diff --git a/src/commands/voting/auto-vote.ts b/src/commands/voting/auto-vote.ts index b5456604..4dad54cf 100644 --- a/src/commands/voting/auto-vote.ts +++ b/src/commands/voting/auto-vote.ts @@ -1,4 +1,5 @@ -import { DevNetLogger, command } from "@devnet/command"; +import { command } from "@devnet/command"; +import { DevNetLogger } from "@devnet/logger"; import { JsonRpcProvider, ethers } from "ethers"; const abi = [ diff --git a/src/commands/voting/install.ts b/src/commands/voting/install.ts index b6621477..1fd4d933 100644 --- a/src/commands/voting/install.ts +++ b/src/commands/voting/install.ts @@ -1,4 +1,5 @@ -import { DevNetError, command } from "@devnet/command"; +import { command } from "@devnet/command"; +import { DevNetError } from "@devnet/utils"; export const VotingInstall = command.cli({ description: "Install voting scripts dependencies", diff --git a/src/commands/voting/prepare-pectra.ts b/src/commands/voting/prepare-pectra.ts index 357a3310..37c225a9 100644 --- a/src/commands/voting/prepare-pectra.ts +++ b/src/commands/voting/prepare-pectra.ts @@ -1,6 +1,8 @@ /* eslint-disable unicorn/numeric-separators-style */ /* eslint-disable perfectionist/sort-object-types */ -import { assert, command } from "@devnet/command"; +import { command } from "@devnet/command"; +import { assert } from "@devnet/utils"; + import * as YAML from "yaml"; export const PreparePectraVoting = command.cli({ @@ -22,7 +24,7 @@ export const PreparePectraVoting = command.cli({ validatorExitBus, acl, oracleDaemonConfig, - + finance, withdrawalVault, withdrawalQueue, @@ -38,7 +40,7 @@ export const PreparePectraVoting = command.cli({ accounting: CS_ACCOUNTING_ADDRESS, } = await state.getCSM(); - const { CSVerifier } = await state.getNewVerifier(); + const { CSVerifier } = await state.getElectraVerifier(); const config = { ACCOUNTING_ORACLE: accountingOracle, diff --git a/tsconfig.tsbuildinfo b/tsconfig.tsbuildinfo index f4ba0cab..ea4ff322 100644 --- a/tsconfig.tsbuildinfo +++ b/tsconfig.tsbuildinfo @@ -1 +1 @@ -{"root":["./src/commands/config.ts","./src/commands/down.ts","./src/commands/tunnel.ts","./src/commands/wallet.ts","./src/commands/assertoor/down.ts","./src/commands/assertoor/info.ts","./src/commands/assertoor/up.ts","./src/commands/blockscout/down.ts","./src/commands/blockscout/info.ts","./src/commands/blockscout/restart.ts","./src/commands/blockscout/up.ts","./src/commands/chain/artifacts.ts","./src/commands/chain/down.ts","./src/commands/chain/fork.ts","./src/commands/chain/info.ts","./src/commands/chain/up.ts","./src/commands/chain/update.ts","./src/commands/council/down.ts","./src/commands/council/logs.ts","./src/commands/council/up.ts","./src/commands/csm/activate.ts","./src/commands/csm/add-operator.ts","./src/commands/csm/add-verifier.ts","./src/commands/csm/deploy.ts","./src/commands/csm/install.ts","./src/commands/csm/update-state.ts","./src/commands/data-bus/deploy.ts","./src/commands/data-bus/install.ts","./src/commands/data-bus/update-state.ts","./src/commands/dsm-bots/down.ts","./src/commands/dsm-bots/logs.ts","./src/commands/dsm-bots/up.ts","./src/commands/git/checkout.ts","./src/commands/git/pull.ts","./src/commands/kapi/down.ts","./src/commands/kapi/logs.ts","./src/commands/kapi/up.ts","./src/commands/lido-cli/install.ts","./src/commands/lido-core/activate.ts","./src/commands/lido-core/add-keys.ts","./src/commands/lido-core/add-operator.ts","./src/commands/lido-core/deploy-tw.ts","./src/commands/lido-core/deploy.ts","./src/commands/lido-core/deposit.ts","./src/commands/lido-core/install.ts","./src/commands/lido-core/prepare-repository.ts","./src/commands/lido-core/replace-dsm.ts","./src/commands/lido-core/set-staking-limit.ts","./src/commands/lido-core/update-state.ts","./src/commands/lido-core/verify.ts","./src/commands/lido-core/keys/generate.ts","./src/commands/lido-core/keys/use.ts","./src/commands/oracles/down.ts","./src/commands/oracles/logs.ts","./src/commands/oracles/up.ts","./src/commands/stands/csm-v2.ts","./src/commands/stands/pectra-only-chain.ts","./src/commands/stands/pectra-only-contracts.ts","./src/commands/stands/pectra-tw.ts","./src/commands/stands/pectra.ts","./src/commands/system/docs.ts","./src/commands/system/server.ts","./src/commands/validator/add.ts","./src/commands/validator/list.ts","./src/commands/validator/remove.ts","./src/commands/validator/restart.ts","./src/commands/validator/voluntary-exit.ts","./src/commands/validator/keys/generate.ts","./src/commands/validator/keys/test-generate.ts","./src/commands/voting/add-account.ts","./src/commands/voting/auto-vote.ts","./src/commands/voting/enact-after-pectra.ts","./src/commands/voting/enact-before-pectra.ts","./src/commands/voting/enact-tw.ts","./src/commands/voting/install.ts","./src/commands/voting/prepare-pectra.ts"],"version":"5.7.3"} \ No newline at end of file +{"root":["./src/commands/config.ts","./src/commands/down-offchain.ts","./src/commands/down.ts","./src/commands/wallet.ts","./src/commands/assertoor/down.ts","./src/commands/assertoor/info.ts","./src/commands/assertoor/up.ts","./src/commands/blockscout/down.ts","./src/commands/blockscout/info.ts","./src/commands/blockscout/restart.ts","./src/commands/blockscout/up.ts","./src/commands/blockscout/constants/blockscout.constants.ts","./src/commands/blockscout/extensions/blockscout.extension.ts","./src/commands/chain/chain-sync-nodes-state-from-k8s.ts","./src/commands/chain/chain-sync-state.ts","./src/commands/chain/down.ts","./src/commands/chain/fork.ts","./src/commands/chain/info.ts","./src/commands/chain/up.ts","./src/commands/chain/constants/nodes-ingress.constants.ts","./src/commands/chain/extensions/nodes-ingress.extension.ts","./src/commands/chain/extensions/nodes.extension.ts","./src/commands/council-k8s/build.ts","./src/commands/council-k8s/down.ts","./src/commands/council-k8s/up.ts","./src/commands/council-k8s/constants/council-k8s.constants.ts","./src/commands/council-k8s/extensions/council-k8s.extension.ts","./src/commands/csm/activate.ts","./src/commands/csm/add-operator.ts","./src/commands/csm/add-verifier.ts","./src/commands/csm/deploy.ts","./src/commands/csm/install.ts","./src/commands/csm/update-state.ts","./src/commands/csm/extensions/csm.extension.ts","./src/commands/csm-prover-tool-k8s/build.ts","./src/commands/csm-prover-tool-k8s/down.ts","./src/commands/csm-prover-tool-k8s/up.ts","./src/commands/csm-prover-tool-k8s/constants/csm-prover-tool-k8s.constants.ts","./src/commands/csm-prover-tool-k8s/extensions/csm-prover-tool-k8s.extension.ts","./src/commands/data-bus/deploy.ts","./src/commands/data-bus/install.ts","./src/commands/data-bus/update-state.ts","./src/commands/docker-registry/down.ts","./src/commands/docker-registry/info.ts","./src/commands/docker-registry/push-pull-secret-to-k8s.ts","./src/commands/docker-registry/up.ts","./src/commands/docker-registry/constants/docker-registry.constants.ts","./src/commands/docker-registry/extensions/docker-registry.extension.ts","./src/commands/docker-registry/templates/registry-auth-secret.template.ts","./src/commands/docker-registry/templates/registry-pull-secret.template.ts","./src/commands/dora/extensions/dora.extension.ts","./src/commands/dsm-bots-k8s/build.ts","./src/commands/dsm-bots-k8s/down.ts","./src/commands/dsm-bots-k8s/up.ts","./src/commands/dsm-bots-k8s/constants/dsm-bots-k8s.constants.ts","./src/commands/dsm-bots-k8s/extensions/dsm-bots-k8s.extension.ts","./src/commands/git/checkout.ts","./src/commands/git/pull.ts","./src/commands/k8s/ping.ts","./src/commands/k8s/set-default-context.ts","./src/commands/k8s/extensions/k8s.extension.ts","./src/commands/kapi-k8s/build.ts","./src/commands/kapi-k8s/down.ts","./src/commands/kapi-k8s/up.ts","./src/commands/kapi-k8s/constants/kapi-k8s.constants.ts","./src/commands/kapi-k8s/extensions/kapi-k8s.extension.ts","./src/commands/kubo-k8s/build.ts","./src/commands/kubo-k8s/down.ts","./src/commands/kubo-k8s/up.ts","./src/commands/kubo-k8s/constants/kubo-k8s.constants.ts","./src/commands/kubo-k8s/extensions/kubo-k8s.extension.ts","./src/commands/kurtosis/download-artifacts.ts","./src/commands/kurtosis/get-cluster-info.ts","./src/commands/kurtosis/restart-service.ts","./src/commands/kurtosis/run-package.ts","./src/commands/kurtosis/stop-package.ts","./src/commands/kurtosis/dora/down.ts","./src/commands/kurtosis/dora/info.ts","./src/commands/kurtosis/dora/up.ts","./src/commands/kurtosis/dora/constants/dora.constants.ts","./src/commands/kurtosis/dora/templates/dora-ingress.template.ts","./src/commands/kurtosis/extensions/kurtosis.extension.ts","./src/commands/kurtosis/nodes/ingress-down.ts","./src/commands/kurtosis/nodes/ingress-up.ts","./src/commands/kurtosis/nodes/templates/consensus-ingress.template.ts","./src/commands/kurtosis/nodes/templates/execution-ingress.template.ts","./src/commands/kurtosis/nodes/templates/validator-client-ingress.template.ts","./src/commands/late-prover-bot-k8s/build.ts","./src/commands/late-prover-bot-k8s/down.ts","./src/commands/late-prover-bot-k8s/up.ts","./src/commands/late-prover-bot-k8s/constants/late-prover-bot-k8s.constants.ts","./src/commands/late-prover-bot-k8s/extensions/late-prover-bot-k8s.extension.ts","./src/commands/lido-cli/install.ts","./src/commands/lido-core/activate.ts","./src/commands/lido-core/add-keys.ts","./src/commands/lido-core/add-new-operator.ts","./src/commands/lido-core/add-operator.ts","./src/commands/lido-core/deploy-tw.ts","./src/commands/lido-core/deploy.ts","./src/commands/lido-core/deposit.ts","./src/commands/lido-core/install.ts","./src/commands/lido-core/prepare-repository.ts","./src/commands/lido-core/replace-dsm.ts","./src/commands/lido-core/set-staking-limit.ts","./src/commands/lido-core/update-state.ts","./src/commands/lido-core/verify.ts","./src/commands/lido-core/extensions/lido-core.extension.ts","./src/commands/lido-core/keys/generate.ts","./src/commands/lido-core/keys/use.ts","./src/commands/no-widget/build.ts","./src/commands/no-widget/down.ts","./src/commands/no-widget/up.ts","./src/commands/no-widget/constants/no-widget.constants.ts","./src/commands/no-widget/extensions/no-widget.extension.ts","./src/commands/no-widget-backend/build.ts","./src/commands/no-widget-backend/down.ts","./src/commands/no-widget-backend/up.ts","./src/commands/no-widget-backend/constants/no-widget-backend.constants.ts","./src/commands/no-widget-backend/extensions/no-widget-backend.extension.ts","./src/commands/oracles-k8s/build.ts","./src/commands/oracles-k8s/down.ts","./src/commands/oracles-k8s/up.ts","./src/commands/oracles-k8s/constants/oracles-k8s.constants.ts","./src/commands/oracles-k8s/extensions/oracles-k8s.extension.ts","./src/commands/ssh/tunnel.ts","./src/commands/stands/csm-v2.ts","./src/commands/stands/fusaka-zk-test.ts","./src/commands/stands/fusaka.ts","./src/commands/stands/pectra-only-chain.ts","./src/commands/stands/pectra-only-contracts.ts","./src/commands/stands/pectra-tw.ts","./src/commands/stands/pectra.ts","./src/commands/system/docs.ts","./src/commands/system/server.ts","./src/commands/validator/add.ts","./src/commands/validator/list.ts","./src/commands/validator/remove.ts","./src/commands/validator/restart.ts","./src/commands/validator/voluntary-exit.ts","./src/commands/validator/keys/generate.ts","./src/commands/validator/keys/test-generate.ts","./src/commands/voting/add-account.ts","./src/commands/voting/auto-vote.ts","./src/commands/voting/enact-after-pectra.ts","./src/commands/voting/enact-before-pectra.ts","./src/commands/voting/enact-tw.ts","./src/commands/voting/install.ts","./src/commands/voting/prepare-pectra.ts"],"version":"5.7.3"} \ No newline at end of file diff --git a/workspaces/blockscout/README.md b/workspaces/blockscout/README.md deleted file mode 100644 index 1a0926a0..00000000 --- a/workspaces/blockscout/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# Docker-compose configuration - -Runs Blockscout locally in Docker containers with [docker-compose](https://github.com/docker/compose). - -## Prerequisites - -- Docker v20.10+ -- Docker-compose 2.x.x+ -- Running Ethereum JSON RPC client - -## Building Docker containers from source - -**Note**: in all below examples, you can use `docker compose` instead of `docker-compose`, if compose v2 plugin is installed in Docker. - -```bash -cd ./docker-compose -docker-compose up --build -``` - -**Note**: if you don't need to make backend customizations, you can run `docker-compose up` in order to launch from pre-build backend Docker image. This will be much faster. - -This command uses `docker-compose.yml` by-default, which builds the backend of the explorer into the Docker image and runs 9 Docker containers: - -- Postgres 14.x database, which will be available at port 7432 on the host machine. -- Redis database of the latest version. -- Blockscout backend with api at /api path. -- Nginx proxy to bind backend, frontend and microservices. -- Blockscout explorer at http://localhost. - -and 5 containers for microservices (written in Rust): - -- [Stats](https://github.com/blockscout/blockscout-rs/tree/main/stats) service with a separate Postgres 14 DB. -- [Sol2UML visualizer](https://github.com/blockscout/blockscout-rs/tree/main/visualizer) service. -- [Sig-provider](https://github.com/blockscout/blockscout-rs/tree/main/sig-provider) service. -- [User-ops-indexer](https://github.com/blockscout/blockscout-rs/tree/main/user-ops-indexer) service. - -**Note for Linux users**: Linux users need to run the local node on http://0.0.0.0/ rather than http://127.0.0.1/ - -## Configs for different Ethereum clients - -The repo contains built-in configs for different JSON RPC clients without need to build the image. - -| __JSON RPC Client__ | __Docker compose launch command__ | -| -------- | ------- | -| Erigon | `docker-compose -f erigon.yml up -d` | -| Geth (suitable for Reth as well) | `docker-compose -f geth.yml up -d` | -| Geth Clique | `docker-compose -f geth-clique-consensus.yml up -d` | -| Nethermind, OpenEthereum | `docker-compose -f nethermind.yml up -d` | -| Ganache | `docker-compose -f ganache.yml up -d` | -| HardHat network | `docker-compose -f hardhat-network.yml up -d` | - -- Running only explorer without DB: `docker-compose -f external-db.yml up -d`. In this case, no db container is created. And it assumes that the DB credentials are provided through `DATABASE_URL` environment variable on the backend container. -- Running explorer with external backend: `docker-compose -f external-backend.yml up -d` -- Running explorer with external frontend: `docker-compose -f external-frontend.yml up -d` -- Running all microservices: `docker-compose -f microservices.yml up -d` -- Running only explorer without microservices: `docker-compose -f no-services.yml up -d` - -All of the configs assume the Ethereum JSON RPC is running at http://localhost:8545. - -In order to stop launched containers, run `docker-compose -d -f config_file.yml down`, replacing `config_file.yml` with the file name of the config which was previously launched. - -You can adjust BlockScout environment variables: - -- for backend in `./envs/common-blockscout.env` -- for frontend in `./envs/common-frontend.env` -- for stats service in `./envs/common-stats.env` -- for visualizer in `./envs/common-visualizer.env` -- for user-ops-indexer in `./envs/common-user-ops-indexer.env` - -Descriptions of the ENVs are available - -- for [backend](https://docs.blockscout.com/for-developers/information-and-settings/env-variables) -- for [frontend](https://github.com/blockscout/frontend/blob/main/docs/ENVS.md). - -## Running Docker containers via Makefile - -Prerequisites are the same, as for docker-compose setup. - -Start all containers: - -```bash -cd ./docker -make start -``` - -Stop all containers: - -```bash -cd ./docker -make stop -``` - -***Note***: Makefile uses the same .env files since it is running docker-compose services inside. diff --git a/workspaces/blockscout/blockscout-postgresql/Makefile b/workspaces/blockscout/blockscout-postgresql/Makefile new file mode 100644 index 00000000..cea06dd4 --- /dev/null +++ b/workspaces/blockscout/blockscout-postgresql/Makefile @@ -0,0 +1,97 @@ +# Makefile for Helm Operations +# This Makefile provides convenient commands for managing Helm charts + +# Default namespace for Helm operations +NAMESPACE ?= test-namespace + +# Helm chart paths +HELM_CHART_ROOT_PATH ?= ../../../helm +HELM_CHART_PATH = $(HELM_CHART_ROOT_PATH)/vendor/postgresql +HELM_VALUES_PATH = ./values.yaml + +# Release names +HELM_RELEASE = postgresql + +# Default Helm timeout +TIMEOUT ?= 5m + +# Default Helm flags +HELM_DEBUG ?= false +ifeq ($(HELM_DEBUG), true) + HELM_DEBUG_FLAG = --debug +else + HELM_DEBUG_FLAG = +endif + +# Default values for chart versions (empty means latest) +HELM_CHART_VERSION ?= + +# Version flags (only add if version is specified) +ifneq ($(HELM_CHART_VERSION),) + HELM_VERSION_FLAG = --version $(HELM_CHART_VERSION) +else + HELM_VERSION_FLAG = +endif + +# Default ENV Variables specific to chart +# - + +# HELM overrides +# Provides a easy way to override values in helm chart by they path in YAML file +define HELM_CHART_VALUES_OVERRIDES + +endef + +# Lint Helm chart +.PHONY: debug +debug: + echo "\n" \ + echo "HELM_CHART_PATH=[$(HELM_CHART_PATH)]\n" && \ + echo "HELM_VALUES_PATH=[$(HELM_VALUES_PATH)]\n" && \ + echo "HELM_CHART_VALUES_OVERRIDES=[$(HELM_CHART_VALUES_OVERRIDES)]\n" + +# Lint Helm chart +.PHONY: lint +lint: + helm lint $(HELM_CHART_PATH) -f $(HELM_VALUES_PATH) $(HELM_CHART_VALUES_OVERRIDES) + +# Print rendered Helm chart templates to stdout +.PHONY: template +template: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) -f $(HELM_VALUES_PATH) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) + +# Dry-run Helm chart install into K8s +.PHONY: dry-run +dry-run: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) -f $(HELM_VALUES_PATH) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + | kubectl apply --dry-run=client -f - + +# Helm chart install into K8s +.PHONY: install +install: + helm install $(HELM_RELEASE) $(HELM_CHART_PATH) \ + -f $(HELM_VALUES_PATH) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart upgrade existing installation +.PHONY: upgrade +upgrade: + helm upgrade $(HELM_RELEASE) $(HELM_CHART_PATH) \ + -f $(HELM_VALUES_PATH) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart uninstall +.PHONY: uninstall +uninstall: + helm uninstall $(HELM_RELEASE) --namespace $(NAMESPACE) --ignore-not-found diff --git a/workspaces/blockscout/blockscout-postgresql/values.yaml b/workspaces/blockscout/blockscout-postgresql/values.yaml new file mode 100644 index 00000000..3992adc1 --- /dev/null +++ b/workspaces/blockscout/blockscout-postgresql/values.yaml @@ -0,0 +1,1937 @@ +# Copyright Broadcom, Inc. All Rights Reserved. +# SPDX-License-Identifier: APACHE-2.0 +# Default values for blockscout-posgresql. + + +## @section Global parameters +## Please, note that this will override the parameters, including dependencies, configured to use the global value +## +global: + ## @param global.imageRegistry Global Docker image registry + ## + imageRegistry: "" + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## e.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + ## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead + ## + defaultStorageClass: "" + storageClass: "" + ## Security parameters + ## + security: + ## @param global.security.allowInsecureImages Allows skipping image verification + allowInsecureImages: false + postgresql: + ## @param global.postgresql.fullnameOverride Full chart name (overrides `fullnameOverride`) + ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) + ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) + ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) + ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) + ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). + ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## + fullnameOverride: "" + + auth: + postgresPassword: "admin" + username: "admin" + password: "admin" + database: "blockscout" + existingSecret: "" + secretKeys: + adminPasswordKey: "" + userPasswordKey: "" + replicationPasswordKey: "" + ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) + ## + service: + ports: + postgresql: "" + ## Compatibility adaptations for Kubernetes platforms + ## + compatibility: + ## Compatibility adaptations for Openshift + ## + openshift: + ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) + ## + adaptSecurityContext: auto +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## @param secretAnnotations Add annotations to the secrets +## +secretAnnotations: {} +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity +## @section PostgreSQL common parameters +## + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## @param image.registry [default: REGISTRY_NAME] PostgreSQL image registry +## @param image.repository [default: REPOSITORY_NAME/postgresql] PostgreSQL image repository +## @skip image.tag PostgreSQL image tag (immutable tags are recommended) +## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy PostgreSQL image pull policy +## @param image.pullSecrets Specify image pull secrets +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: "latest" + digest: "" + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#setting-the-root-password-on-first-run +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-on-first-run +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-user-on-first-run +## +auth: + ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user + ## + enablePostgresUser: true + ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided + ## + postgresPassword: "" + ## @param auth.username Name for a custom user to create + ## + username: "" + ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` is provided + ## + password: "" + ## @param auth.database Name for a custom database to create + ## + database: "" + ## @param auth.replicationUsername Name of the replication user + ## + replicationUsername: repl_user + ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` is provided + ## + replicationPassword: "" + ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. + ## + existingSecret: "" + ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## + secretKeys: + adminPasswordKey: postgres-password + userPasswordKey: password + replicationPasswordKey: replication-password + ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable + ## + usePasswordFiles: true +## @param architecture PostgreSQL architecture (`standalone` or `replication`) +## +architecture: standalone +## Replication configuration +## Ignored if `architecture` is `standalone` +## +replication: + ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` + ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. + ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT + ## + synchronousCommit: "off" + numSynchronousReplicas: 0 + ## @param replication.applicationName Cluster application name. Useful for advanced replication settings + ## + applicationName: my_application +## @param containerPorts.postgresql PostgreSQL container port +## +containerPorts: + postgresql: 5432 +## Audit settings +## https://github.com/bitnami/containers/tree/main/bitnami/postgresql#auditing +## @param audit.logHostname Log client hostnames +## @param audit.logConnections Add client log-in operations to the log file +## @param audit.logDisconnections Add client log-outs operations to the log file +## @param audit.pgAuditLog Add operations to log using the pgAudit extension +## @param audit.pgAuditLogCatalog Log catalog using pgAudit +## @param audit.clientMinMessages Message log level to share with the user +## @param audit.logLinePrefix Template for log line prefix (default if not set) +## @param audit.logTimezone Timezone for the log timestamps +## +audit: + logHostname: false + logConnections: false + logDisconnections: false + pgAuditLog: "" + pgAuditLogCatalog: "off" + clientMinMessages: error + logLinePrefix: "" + logTimezone: "" +## LDAP configuration +## @param ldap.enabled Enable LDAP support +## @param ldap.server IP address or name of the LDAP server. +## @param ldap.port Port number on the LDAP server to connect to +## @param ldap.prefix String to prepend to the user name when forming the DN to bind +## @param ldap.suffix String to append to the user name when forming the DN to bind +## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead +## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead +## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead +## @param ldap.basedn Root DN to begin the search for the user in +## @param ldap.binddn DN of user to bind to LDAP +## @param ldap.bindpw Password for the user to bind to LDAP +## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead +## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead +## @param ldap.searchAttribute Attribute to match against the user name in the search +## @param ldap.searchFilter The search filter to use when doing search+bind authentication +## @param ldap.scheme Set to `ldaps` to use LDAPS +## DEPRECATED ldap.tls as string is deprecated, please use 'ldap.tls.enabled' instead +## @param ldap.tls.enabled Se to true to enable TLS encryption +## +ldap: + enabled: false + server: "" + port: "" + prefix: "" + suffix: "" + basedn: "" + binddn: "" + bindpw: "" + searchAttribute: "" + searchFilter: "" + scheme: "" + tls: + enabled: false + ## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. + ## Ref: https://www.postgresql.org/docs/current/auth-ldap.html + ## + uri: "" +## @param postgresqlDataDir PostgreSQL data dir folder +## +postgresqlDataDir: /bitnami/postgresql/data +## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) +## +postgresqlSharedPreloadLibraries: "pgaudit" +## Start PostgreSQL pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` +## ref: https://github.com/docker-library/postgres/issues/416 +## ref: https://github.com/containerd/containerd/issues/3654 +## +shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## + enabled: true + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: 1Gi +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic support + ## + enabled: false + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's + ## + preferServerCiphers: true + ## @param tls.certificatesSecret Name of an existing secret that contains the certificates + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html + ## + certCAFilename: "" + ## @param tls.crlFilename File containing a Certificate Revocation List + ## + crlFilename: "" +## @section PostgreSQL Primary parameters +## +primary: + ## @param primary.name Name of the primary database (eg primary, master, leader, ...) + ## + name: primary + ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap + ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html + ## + configuration: "" + ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration + ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html + ## e.g:# + ## pgHbaConfiguration: |- + ## local all all trust + ## host all all localhost trust + ## host mydatabase mysuser 192.168.0.0/24 md5 + ## + pgHbaConfiguration: "" + ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration + ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored + ## + existingConfigmap: "" + ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration + ## NOTE: `primary.extendedConfiguration` will be ignored + ## + existingExtendedConfigmap: "" + ## Initdb configuration + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#specifying-initdb-arguments + ## + initdb: + ## @param primary.initdb.args PostgreSQL initdb extra arguments + ## + args: "" + ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log + ## + postgresqlWalDir: "" + ## @param primary.initdb.scripts Dictionary of initdb scripts + ## Specify dictionary of scripts to be run at first boot + ## e.g: + ## scripts: + ## my_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + ## + scripts: {} + ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot + ## NOTE: This will override `primary.initdb.scripts` + ## + scriptsConfigMap: "" + ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) + ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` + ## + scriptsSecret: "" + ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts + ## + user: "" + ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts + ## + password: "" + ## Pre-init configuration + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql/#on-container-start + preInitDb: + ## @param primary.preInitDb.scripts Dictionary of pre-init scripts + ## Specify dictionary of shell scripts to be run before db boot + ## e.g: + ## scripts: + ## my_pre_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + scripts: {} + ## @param primary.preInitDb.scriptsConfigMap ConfigMap with pre-init scripts to be run + ## NOTE: This will override `primary.preInitDb.scripts` + scriptsConfigMap: "" + ## @param primary.preInitDb.scriptsSecret Secret with pre-init scripts to be run + ## NOTE: This can work along `primary.preInitDb.scripts` or `primary.preInitDb.scriptsConfigMap` + scriptsSecret: "" + ## Configure current cluster's primary server to be the standby server in other cluster. + ## This will allow cross cluster replication and provide cross cluster high availability. + ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. + ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not + ## @param primary.standby.primaryHost The Host of replication primary in the other cluster + ## @param primary.standby.primaryPort The Port of replication primary in the other cluster + ## + standby: + enabled: false + primaryHost: "" + primaryPort: "" + ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: + - name: POSTGRESQL_MAX_CONNECTIONS + value: "1000" + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsSecret: "" + ## @param primary.command Override default container command (useful when using custom images) + ## + command: [] + ## @param primary.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL Primary resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "2xlarge" + ## @param primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.podSecurityContext.enabled Enable security context + ## @param primary.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param primary.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param primary.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param primary.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.containerSecurityContext.enabled Enabled containers' Security Context + ## @param primary.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param primary.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param primary.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param primary.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param primary.containerSecurityContext.privileged Set container's Security Context privileged + ## @param primary.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param primary.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param primary.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param primary.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param primary.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param primary.hostAliases PostgreSQL primary pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary) + ## + hostNetwork: false + ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) + ## + labels: {} + ## @param primary.annotations Annotations for PostgreSQL primary pods + ## + annotations: {} + ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) + ## + podLabels: {} + ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) + ## + podAnnotations: {} + ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity Affinity for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) + ## + priorityClassName: "" + ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type + ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param primary.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param primary.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param primary.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `primary.pdb.minAvailable` and `primary.pdb.maxUnavailable` are empty. + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) + ## + extraPodSpec: {} + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param primary.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param primary.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param primary.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param primary.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param primary.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param primary.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param primary.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + ## PostgreSQL Primary service configuration + ## + service: + ## @param primary.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param primary.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param primary.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.labels Map of labels to add to the primary service + ## + labels: {} + ## @param primary.service.annotations Annotations for PostgreSQL primary service + ## + annotations: {} + ## @param primary.service.loadBalancerClass Load balancer class if service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + ## + loadBalancerClass: "" + ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service + ## + extraPorts: [] + ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param primary.service.headless.annotations Additional custom annotations for headless PostgreSQL primary service + ## + annotations: {} + ## PostgreSQL Primary persistence configuration + ## + persistence: + ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC + ## + enabled: true + ## @param primary.persistence.volumeName Name to assign the volume + ## + volumeName: "data" + ## @param primary.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param primary.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param primary.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 16Gi + ## @param primary.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param primary.persistence.labels Labels for the PVC + ## + labels: {} + ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## PostgreSQL Primary Persistent Volume Claim Retention Policy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## + persistentVolumeClaimRetentionPolicy: + ## @param primary.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for Primary Statefulset + ## + enabled: false + ## @param primary.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## + whenScaled: Retain + ## @param primary.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + whenDeleted: Retain +## @section PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`) +## +readReplicas: + ## @param readReplicas.name Name of the read replicas database (eg secondary, slave, ...) + ## + name: read + ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas + ## + replicaCount: 1 + ## @param readReplicas.extendedConfiguration Extended PostgreSQL read only replicas configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsCM: "" + ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsSecret: "" + ## @param readReplicas.command Override default container command (useful when using custom images) + ## + command: [] + ## @param readReplicas.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers + ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers + ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers + ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe + ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL read only resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param readReplicas.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if readReplicas.resources is set (readReplicas.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param readReplicas.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.podSecurityContext.enabled Enable security context + ## @param readReplicas.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param readReplicas.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param readReplicas.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.containerSecurityContext.enabled Enabled containers' Security Context + ## @param readReplicas.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param readReplicas.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param readReplicas.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param readReplicas.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param readReplicas.containerSecurityContext.privileged Set container's Security Context privileged + ## @param readReplicas.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param readReplicas.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param readReplicas.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param readReplicas.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param readReplicas.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) + ## + hostNetwork: false + ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) + ## + labels: {} + ## @param readReplicas.annotations Annotations for PostgreSQL read only pods + ## + annotations: {} + ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) + ## + podLabels: {} + ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) + ## + podAnnotations: {} + ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL read only node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) + ## + priorityClassName: "" + ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type + ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) + ## + extraVolumeMounts: [] + ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) + ## + extraVolumes: [] + ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param readReplicas.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param readReplicas.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param readReplicas.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `readReplicas.pdb.minAvailable` and `readReplicas.pdb.maxUnavailable` are empty. + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) + ## + extraPodSpec: {} + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param readReplicas.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param readReplicas.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param readReplicas.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param readReplicas.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param readReplicas.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param readReplicas.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param readReplicas.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + ## PostgreSQL read only service configuration + ## + service: + ## @param readReplicas.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param readReplicas.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param readReplicas.service.labels Map of labels to add to the read service + ## + labels: {} + ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service + ## + annotations: {} + ## @param readReplicas.service.loadBalancerClass Load balancer class if service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + ## + loadBalancerClass: "" + ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service + ## + extraPorts: [] + ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param readReplicas.service.headless.annotations Additional custom annotations for headless PostgreSQL read only service + ## + annotations: {} + ## PostgreSQL read only persistence configuration + ## + persistence: + ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC + ## + enabled: true + ## @param readReplicas.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param readReplicas.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param readReplicas.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param readReplicas.persistence.labels Labels for the PVC + ## + labels: {} + ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param readReplicas.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## PostgreSQL Read only Persistent Volume Claim Retention Policy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## + persistentVolumeClaimRetentionPolicy: + ## @param readReplicas.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for read only Statefulset + ## + enabled: false + ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## + whenScaled: Retain + ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + whenDeleted: Retain +## @section Backup parameters +## This section implements a trivial logical dump cronjob of the database. +## This only comes with the consistency guarantees of the dump program. +## This is not a snapshot based roll forward/backward recovery backup. +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ +backup: + ## @param backup.enabled Enable the logical dump of the database "regularly" + enabled: false + cronjob: + ## @param backup.cronjob.schedule Set the cronjob parameter schedule + schedule: "@daily" + ## @param backup.cronjob.timeZone Set the cronjob parameter timeZone + timeZone: "" + ## @param backup.cronjob.concurrencyPolicy Set the cronjob parameter concurrencyPolicy + concurrencyPolicy: Allow + ## @param backup.cronjob.failedJobsHistoryLimit Set the cronjob parameter failedJobsHistoryLimit + failedJobsHistoryLimit: 1 + ## @param backup.cronjob.successfulJobsHistoryLimit Set the cronjob parameter successfulJobsHistoryLimit + successfulJobsHistoryLimit: 3 + ## @param backup.cronjob.startingDeadlineSeconds Set the cronjob parameter startingDeadlineSeconds + startingDeadlineSeconds: "" + ## @param backup.cronjob.ttlSecondsAfterFinished Set the cronjob parameter ttlSecondsAfterFinished + ttlSecondsAfterFinished: "" + ## @param backup.cronjob.restartPolicy Set the cronjob parameter restartPolicy + restartPolicy: OnFailure + ## @param backup.cronjob.podSecurityContext.enabled Enable PodSecurityContext for CronJob/Backup + ## @param backup.cronjob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param backup.cronjob.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param backup.cronjob.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param backup.cronjob.podSecurityContext.fsGroup Group ID for the CronJob + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## backup container's Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param backup.cronjob.containerSecurityContext.enabled Enabled containers' Security Context + ## @param backup.cronjob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param backup.cronjob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param backup.cronjob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param backup.cronjob.containerSecurityContext.privileged Set container's Security Context privileged + ## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param backup.cronjob.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param backup.cronjob.command Set backup container's command to run + command: + - /bin/bash + - -c + - PGPASSWORD="${PGPASSWORD:-$(< "$PGPASSWORD_FILE")}" pg_dumpall --clean --if-exists --load-via-partition-root --quote-all-identifiers --no-password --file="${PGDUMP_DIR}/pg_dumpall-$(date '+%Y-%m-%d-%H-%M').pgdump" + ## @param backup.cronjob.labels Set the cronjob labels + labels: {} + ## @param backup.cronjob.annotations Set the cronjob annotations + annotations: {} + ## @param backup.cronjob.nodeSelector Node labels for PostgreSQL backup CronJob pod assignment + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/ + ## + nodeSelector: {} + ## @param backup.cronjob.tolerations Tolerations for PostgreSQL backup CronJob pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## backup cronjob container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param backup.cronjob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if backup.cronjob.resources is set (backup.cronjob.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param backup.cronjob.resources Set container requests and limits for different resources like CPU or memory + ## Example: + resources: {} + ## resources: + ## requests: + ## cpu: 1 + ## memory: 512Mi + ## limits: + ## cpu: 2 + ## memory: 1024Mi + networkPolicy: + ## @param backup.cronjob.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + storage: + ## @param backup.cronjob.storage.enabled Enable using a `PersistentVolumeClaim` as backup data volume + ## + enabled: true + ## @param backup.cronjob.storage.existingClaim Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) + ## If defined, PVC must be created manually before volume will be bound + ## + existingClaim: "" + ## @param backup.cronjob.storage.resourcePolicy Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted + ## + resourcePolicy: "" + ## @param backup.cronjob.storage.storageClass PVC Storage Class for the backup data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param backup.cronjob.storage.accessModes PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## @param backup.cronjob.storage.size PVC Storage Request for the backup data volume + ## + size: 8Gi + ## @param backup.cronjob.storage.annotations PVC annotations + ## + annotations: {} + ## @param backup.cronjob.storage.mountPath Path to mount the volume at + ## + mountPath: /backup/pgdump + ## @param backup.cronjob.storage.subPath Subdirectory of the volume to mount at + ## and one PV for multiple services. + ## + subPath: "" + ## Fine tuning for volumeClaimTemplates + ## + volumeClaimTemplates: + ## @param backup.cronjob.storage.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes) + ## A label query over volumes to consider for binding (e.g. when using local volumes) + ## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details + ## + selector: {} + ## @param backup.cronjob.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the backup container + ## + extraVolumeMounts: [] + ## @param backup.cronjob.extraVolumes Optionally specify extra list of additional volumes for the backup container + ## + extraVolumes: [] + +## @section Password update job +## +passwordUpdateJob: + ## @param passwordUpdateJob.enabled Enable password update job + ## + enabled: false + ## @param passwordUpdateJob.backoffLimit set backoff limit of the job + ## + backoffLimit: 10 + ## @param passwordUpdateJob.command Override default container command on mysql Primary container(s) (useful when using custom images) + ## + command: [] + ## @param passwordUpdateJob.args Override default container args on mysql Primary container(s) (useful when using custom images) + ## + args: [] + ## @param passwordUpdateJob.extraCommands Extra commands to pass to the generation job + ## + extraCommands: "" + ## @param passwordUpdateJob.previousPasswords.postgresPassword Previous postgres password (set if the password secret was already changed) + ## @param passwordUpdateJob.previousPasswords.password Previous password (set if the password secret was already changed) + ## @param passwordUpdateJob.previousPasswords.replicationPassword Previous replication password (set if the password secret was already changed) + ## @param passwordUpdateJob.previousPasswords.existingSecret Name of a secret containing the previous passwords (set if the password secret was already changed) + previousPasswords: + postgresPassword: "" + password: "" + replicationPassword: "" + existingSecret: "" + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param passwordUpdateJob.containerSecurityContext.enabled Enabled containers' Security Context + ## @param passwordUpdateJob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param passwordUpdateJob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param passwordUpdateJob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param passwordUpdateJob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param passwordUpdateJob.containerSecurityContext.privileged Set container's Security Context privileged + ## @param passwordUpdateJob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param passwordUpdateJob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param passwordUpdateJob.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param passwordUpdateJob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param passwordUpdateJob.podSecurityContext.enabled Enabled credential init job pods' Security Context + ## @param passwordUpdateJob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param passwordUpdateJob.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param passwordUpdateJob.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param passwordUpdateJob.podSecurityContext.fsGroup Set credential init job pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## @param passwordUpdateJob.extraEnvVars Array containing extra env vars to configure the credential init job + ## For example: + ## extraEnvVars: + ## - name: GF_DEFAULT_INSTANCE_NAME + ## value: my-instance + ## + extraEnvVars: [] + ## @param passwordUpdateJob.extraEnvVarsCM ConfigMap containing extra env vars to configure the credential init job + ## + extraEnvVarsCM: "" + ## @param passwordUpdateJob.extraEnvVarsSecret Secret containing extra env vars to configure the credential init job (in case of sensitive data) + ## + extraEnvVarsSecret: "" + ## @param passwordUpdateJob.extraVolumes Optionally specify extra list of additional volumes for the credential init job + ## + extraVolumes: [] + ## @param passwordUpdateJob.extraVolumeMounts Array of extra volume mounts to be added to the jwt Container (evaluated as template). Normally used with `extraVolumes`. + ## + extraVolumeMounts: [] + ## @param passwordUpdateJob.initContainers Add additional init containers for the mysql Primary pod(s) + ## + initContainers: [] + ## Container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param passwordUpdateJob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if passwordUpdateJob.resources is set (passwordUpdateJob.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "micro" + ## @param passwordUpdateJob.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param passwordUpdateJob.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param passwordUpdateJob.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param passwordUpdateJob.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param passwordUpdateJob.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param passwordUpdateJob.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param passwordUpdateJob.annotations [object] Add annotations to the job + ## + annotations: {} + ## @param passwordUpdateJob.podLabels Additional pod labels + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param passwordUpdateJob.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository + ## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 12-debian-12-r50 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## @param volumePermissions.containerSecurityContext.runAsGroup Group ID for the init container + ## @param volumePermissions.containerSecurityContext.runAsNonRoot runAsNonRoot for the init container + ## @param volumePermissions.containerSecurityContext.seccompProfile.type seccompProfile.type for the init container + ## + containerSecurityContext: + seLinuxOptions: {} + runAsUser: 0 + runAsGroup: 0 + runAsNonRoot: false + seccompProfile: + type: RuntimeDefault +## @section Other Parameters +## + +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false +## Service account for PostgreSQL to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Creates role for ServiceAccount +## @param rbac.create Create Role and RoleBinding (required for PSP to work) +## +rbac: + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later +## +psp: + create: false +## @section Metrics Parameters +## +metrics: + ## @param metrics.enabled Start a prometheus exporter + ## + enabled: false + ## @param metrics.image.registry [default: REGISTRY_NAME] PostgreSQL Prometheus Exporter image registry + ## @param metrics.image.repository [default: REPOSITORY_NAME/postgres-exporter] PostgreSQL Prometheus Exporter image repository + ## @skip metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) + ## @param metrics.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy + ## @param metrics.image.pullSecrets Specify image pull secrets + ## + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.17.1-debian-12-r15 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.collectors Control enabled collectors + ## ref: https://github.com/prometheus-community/postgres_exporter#flags + ## Example: + ## collectors: + ## wal: false + collectors: {} + ## @param metrics.customMetrics Define additional custom metrics + ## ref: https://github.com/prometheus-community/postgres_exporter#adding-new-metrics-via-a-config-file-deprecated + ## customMetrics: + ## pg_database: + ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + ## metrics: + ## - name: + ## usage: "LABEL" + ## description: "Name of the database" + ## - size_bytes: + ## usage: "GAUGE" + ## description: "Size of the database in bytes" + ## + customMetrics: {} + ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter + ## see: https://github.com/prometheus-community/postgres_exporter#environment-variables + ## For example: + ## extraEnvVars: + ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + ## value: "true" + ## + extraEnvVars: [] + ## PostgreSQL Prometheus exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.containerSecurityContext.enabled Enabled containers' Security Context + ## @param metrics.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param metrics.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param metrics.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.privileged Set container's Security Context privileged + ## @param metrics.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param metrics.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param metrics.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port + ## + containerPorts: + metrics: 9187 + ## PostgreSQL Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param metrics.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Service configuration + ## + service: + ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port + ## + ports: + metrics: 9187 + ## @param metrics.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "postgresql.v1.chart.fullname" .) }}"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ include "postgresql.v1.chart.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] diff --git a/workspaces/blockscout/blockscout-stack/Makefile b/workspaces/blockscout/blockscout-stack/Makefile new file mode 100644 index 00000000..566da761 --- /dev/null +++ b/workspaces/blockscout/blockscout-stack/Makefile @@ -0,0 +1,107 @@ +# Makefile for Helm Operations +# This Makefile provides convenient commands for managing Helm charts + +# Default namespace for Helm operations +NAMESPACE ?= test-namespace + +# Helm chart paths +HELM_CHART_ROOT_PATH ?= ../../../helm +HELM_CHART_PATH = $(HELM_CHART_ROOT_PATH)/vendor/blockscout-stack +HELM_VALUES_PATH = ./values.yaml + +# Release names +HELM_RELEASE = blockscout-stack + +# Default Helm timeout +TIMEOUT ?= 5m + +# Default Helm flags +HELM_DEBUG ?= false +ifeq ($(HELM_DEBUG), true) + HELM_DEBUG_FLAG = --debug +else + HELM_DEBUG_FLAG = +endif + +# Default values for chart versions (empty means latest) +HELM_CHART_VERSION ?= + +# Version flags (only add if version is specified) +ifneq ($(HELM_CHART_VERSION),) + HELM_VERSION_FLAG = --version $(HELM_CHART_VERSION) +else + HELM_VERSION_FLAG = +endif + +# Default ENV Variables specific to chart +BLOCKSCOUT_ETHEREUM_JSONRPC_VARIANT ?= geth +BLOCKSCOUT_ETHEREUM_JSONRPC_WS_URL ?= http://execution:8546 +BLOCKSCOUT_ETHEREUM_JSONRPC_TRACE_URL ?= http://execution:8545 +BLOCKSCOUT_ETHEREUM_JSONRPC_HTTP_URL ?= http://execution:8545 +BLOCKSCOUT_BACKEND_INGRESS_HOSTNAME ?= blockscout-backend.devnet.valset-02.testnet.fi +BLOCKSCOUT_FRONTEND_INGRESS_HOSTNAME ?= blockscout.devnet.valset-02.testnet.fi + +# HELM overrides +# Provides a easy way to override values in helm chart by they path in YAML file +define HELM_CHART_VALUES_OVERRIDES + --set blockscout.env.ETHEREUM_JSONRPC_VARIANT=$(BLOCKSCOUT_ETHEREUM_JSONRPC_VARIANT) \ + --set blockscout.env.ETHEREUM_JSONRPC_WS_URL=$(BLOCKSCOUT_ETHEREUM_JSONRPC_WS_URL) \ + --set blockscout.env.ETHEREUM_JSONRPC_TRACE_URL=$(BLOCKSCOUT_ETHEREUM_JSONRPC_TRACE_URL) \ + --set blockscout.env.ETHEREUM_JSONRPC_HTTP_URL=$(BLOCKSCOUT_ETHEREUM_JSONRPC_HTTP_URL) \ + --set blockscout.ingress.hostname=$(BLOCKSCOUT_BACKEND_INGRESS_HOSTNAME) \ + --set frontend.ingress.hostname=$(BLOCKSCOUT_FRONTEND_INGRESS_HOSTNAME) +endef + +# Lint Helm chart +.PHONY: debug +debug: + echo "\n" \ + echo "HELM_CHART_PATH=[$(HELM_CHART_PATH)]\n" && \ + echo "HELM_VALUES_PATH=[$(HELM_VALUES_PATH)]\n" && \ + echo "HELM_CHART_VALUES_OVERRIDES=[$(HELM_CHART_VALUES_OVERRIDES)]\n" + +# Lint Helm chart +.PHONY: lint +lint: + helm lint $(HELM_CHART_PATH) -f $(HELM_VALUES_PATH) $(HELM_CHART_VALUES_OVERRIDES) + +# Print rendered Helm chart templates to stdout +.PHONY: template +template: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) -f $(HELM_VALUES_PATH) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) + +# Dry-run Helm chart install into K8s +.PHONY: dry-run +dry-run: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) -f $(HELM_VALUES_PATH) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + | kubectl apply --dry-run=client -f - + +# Helm chart install into K8s +.PHONY: install +install: + helm install $(HELM_RELEASE) $(HELM_CHART_PATH) \ + -f $(HELM_VALUES_PATH) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart upgrade existing installation +.PHONY: upgrade +upgrade: + helm upgrade $(HELM_RELEASE) $(HELM_CHART_PATH) \ + -f $(HELM_VALUES_PATH) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart uninstall +.PHONY: uninstall +uninstall: + helm uninstall $(HELM_RELEASE) --namespace $(NAMESPACE) --ignore-not-found diff --git a/workspaces/blockscout/blockscout-stack/values.yaml b/workspaces/blockscout/blockscout-stack/values.yaml new file mode 100644 index 00000000..63061323 --- /dev/null +++ b/workspaces/blockscout/blockscout-stack/values.yaml @@ -0,0 +1,546 @@ +# Default values for blockscout-stack. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Provide a name in place of blockscout-stack for `app:` labels +## +nameOverride: "" +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +## +imagePullSecrets: [] +## Blockscout configuration options +## +config: + network: + id: 32382 + name: Ether + shortname: Ether + currency: + name: Ether + symbol: ETH + decimals: 18 + # if network uses dual token model like gnosis (in most case it should be set to false) + dualToken: false + account: + enabled: false + testnet: true + nameService: + enabled: false + url: "" + ## Creates redirect from additional domain to frontend domain + ## Works only if backend and frontend runs on single domain + redirect: + enabled: false + hostnames: ["extra-chart-example.local"] + ingress: + className: nginx + annotations: + nginx.ingress.kubernetes.io/server-snippet: | + if ($request_uri !~ ^/.well-known/(.*)$) { + return 301 $scheme://{{ .Values.frontend.ingress.hostname }}$request_uri; + } + tls: + enabled: false + ## If set to true will create service monitors for blockscout and stats + ## + prometheus: + enabled: false + ## Whitelist metrics path on ingress to make metrics non-public + ingressWhitelist: + enabled: false + annotations: + nginx.ingress.kubernetes.io/whitelist-source-range: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + blackbox: + enabled: false + path: /api/health + rules: + enabled: false + ## If latest block timestamp is older than healthyBlockPeriod instance is considered unhealthy and alert is created + healthyBlockPeriod: 300 + ## Alert is created if there is no new batches for more than batchTimeMultiplier x average_batch_time + batchTimeMultiplier: 2 + labels: {} +## Configuration options for backend +## +blockscout: + enabled: true + ## Replica count for indexer (if separate api is not used this replica count for deployment containing both indexer and api). Currently only one replica is supported + ## + replicaCount: 1 + ## Image parametes + ## + image: + repository: ghcr.io/blockscout/blockscout + pullPolicy: IfNotPresent + tag: "latest" + ## Init container configuration (used to run DB migrations) + ## + init: + enabled: true + command: + - /bin/sh + args: + - -c + - bin/blockscout eval "Elixir.Explorer.ReleaseTasks.create_and_migrate()" + ## Run API service as separate deployment + ## + separateApi: + enabled: false + replicaCount: 1 + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + nftStorage: + enabled: false + cookie: secret + bucketHost: xxx.r2.cloudflarestorage.com + accessKey: "" + secretKey: "" + bucketName: nft + bucketUrl: https://pub-xxx.r2.dev + ipfsGateway: https://ipfs.io/ipfs + workerConcurrency: 10 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + ## Blockscout ENV vars + ## ref: https://docs.blockscout.com/setup/env-variables + ## + env: + ETHEREUM_JSONRPC_VARIANT: 'geth' + ETHEREUM_JSONRPC_WS_URL: 'http://execution' + ETHEREUM_JSONRPC_TRACE_URL: 'http://execution' + ETHEREUM_JSONRPC_HTTP_URL: 'http://execution' + DATABASE_URL: 'postgresql://admin:admin@postgresql:5432/blockscout?sslmode=disable' + ECTO_USE_SSL: false + INDEXER_DISABLE_PENDING_TRANSACTIONS_FETCHER: "true" + SUBNETWORK: "lido" + INDEXER_COIN_BALANCES_BATCH_SIZE: "50" + INDEXER_COIN_BALANCES_CONCURRENCY: "2" + SECRET_KEY_BASE: "56NtB48ear7+wMSf0IQuWDAAazhpb31qyc7GiyspBP2vh7t5zlCsF5QDv76chXeN" + DISABLE_MARKET: true + POOL_SIZE: "80" + POOL_SIZE_API: "10" + HEART_BEAT_TIMEOUT: "30" + ETHEREUM_JSONRPC_DISABLE_ARCHIVE_BALANCES: false + CONTRACT_VERIFICATION_ALLOWED_SOLIDITY_EVM_VERSIONS: "homestead,tangerineWhistle,spuriousDragon,byzantium,constantinople,petersburg,istanbul,berlin,london,paris,shanghai,cancun,default" + CONTRACT_VERIFICATION_ALLOWED_VYPER_EVM_VERSIONS: "byzantium,constantinople,petersburg,istanbul,berlin,paris,shanghai,cancun,default" + CONTRACT_MAX_STRING_LENGTH_WITHOUT_TRIMMING: "2040" + CHECKSUM_ADDRESS_HASHES: true + CHECKSUM_FUNCTION: eth + MICROSERVICE_SC_VERIFIER_ENABLED: true + # MICROSERVICE_SC_VERIFIER_URL: http://sc-verifier:8050/ + # MICROSERVICE_SC_VERIFIER_TYPE: sc_verifier + # MICROSERVICE_SC_VERIFIER_URL: https://eth-bytecode-db.services.blockscout.com/ + # MICROSERVICE_SC_VERIFIER_TYPE: eth_bytecode_db + MICROSERVICE_SC_VERIFIER_URL: http://blockscout-verifier-smart-contract-verifier:8050/ + MICROSERVICE_SC_VERIFIER_TYPE: sc_verifier + MICROSERVICE_ETH_BYTECODE_DB_INTERVAL_BETWEEN_LOOKUPS: "10m" + MICROSERVICE_ETH_BYTECODE_DB_MAX_LOOKUPS_CONCURRENCY: "10" + MICROSERVICE_VISUALIZE_SOL2UML_ENABLED: false + MICROSERVICE_VISUALIZE_SOL2UML_URL: http://visualizer:8050/ + MICROSERVICE_SIG_PROVIDER_ENABLED: true + MICROSERVICE_SIG_PROVIDER_URL: http://blockscout-verifier-sig-provider:8043/ + # MICROSERVICE_BENS_URL: + # MICROSERVICE_BENS_ENABLED: + MICROSERVICE_ACCOUNT_ABSTRACTION_ENABLED: false + MICROSERVICE_ACCOUNT_ABSTRACTION_URL: http://user-ops-indexer:8050/ + # MICROSERVICE_METADATA_URL: + # MICROSERVICE_METADATA_ENABLED: + # MICROSERVICE_STYLUS_VERIFIER_URL: + DECODE_NOT_A_CONTRACT_CALLS: true + + # NAME: VALUE + # Refer to an existing Secret/ConfigMap + extraEnv: [] + # - name: DATABASE_URL + # valueFrom: + # secretKeyRef: + # name: blockscout-secret + # key: DATABASE_URL + ## Set ENV vars via secret, this can be useful for DB connection params, api keys, etc. + ## + # This will create a Secret with the specified data + envFromSecret: [] + # NAME: VALUE + # Refer to an existing Secret/ConfigMap + envFrom: [] + # - secretRef: + # name: blockscout-secret + # - configMapRef: + # name: blockscout-config + ## Command to start blockscout instance + ## + command: + - /bin/sh + args: + - -c + - bin/blockscout start + ## Annotations to add to blockscout pod + podAnnotations: {} + + ## Annotations to add to blockscout deployment + annotations: {} + + podSecurityContext: {} + ## SecurityContext holds pod-level security attributes and common container settings. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: {} + terminationGracePeriodSeconds: 300 + ## Liveness probe + ## + livenessProbe: + enabled: true + path: /api/health/liveness + params: + initialDelaySeconds: 100 + periodSeconds: 100 + timeoutSeconds: 30 + ## Readiness probe + ## + readinessProbe: + enabled: true + path: /api/health/readiness + params: + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 60 + + service: + type: ClusterIP + port: 80 + ## Configure ingress resource that allow you to access the blockscout installation. + ## ref: http://kubernetes.io/docs/user-guide/ingress/ + ## + ingress: + enabled: true + className: "public" + annotations: {} + hostname: blockscout-backend.valset-02.testnet.fi + tls: + enabled: false + #secretName: + paths: + - path: /api + pathType: Prefix + - path: /socket + pathType: Prefix + - path: /public-metrics + pathType: Prefix + - path: /auth/auth0 + pathType: Exact + - path: /auth/auth0/callback + pathType: Exact + - path: /auth/logout + pathType: Exact + + resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 2 + memory: 4Gi + # Additional volumes on the output Blockscout Deployment definition. + volumes: [] + # - name: foo + # secret: + # secretName: mysecret + # optional: false + + # Additional volumeMounts on the output Blockscout Deployment definition. + volumeMounts: [] + # - name: foo + # mountPath: "/etc/foo" + # readOnly: true +## Configuration options for frontend +## +frontend: + enabled: true + ## Image parametes + image: + repository: ghcr.io/blockscout/frontend + tag: latest + pullPolicy: IfNotPresent + + replicaCount: 1 + ## Annotations to add to frontend pod + podAnnotations: {} + + ## Annotations to add to frontend deployment + annotations: {} + + podSecurityContext: {} + ## SecurityContext holds pod-level security attributes and common container settings. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: {} + + service: + type: ClusterIP + port: 80 + ## Configure ingress resource that allow you to access the frontend installation. + ## ref: http://kubernetes.io/docs/user-guide/ingress/ + ## + ingress: + enabled: true + className: "public" + annotations: {} + hostname: blockscout.valset-02.testnet.fi + tls: + enabled: false + #secretName: + paths: + - path: / + + resources: + limits: + memory: "1Gi" + cpu: "500m" + requests: + memory: "256Mi" + cpu: "250m" + ## Liveness probe + ## + livenessProbe: + enabled: true + path: /api/healthz + params: + initialDelaySeconds: 100 + periodSeconds: 100 + timeoutSeconds: 30 + ## Readiness probe + ## + readinessProbe: + enabled: true + path: /api/healthz + params: + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 30 + ## Frontend ENV vars + ## ref: https://github.com/blockscout/frontend/blob/main/docs/ENVS.md + ## + env: + NEXT_PUBLIC_API_PROTOCOL: "http" + NEXT_PUBLIC_API_WEBSOCKET_PROTOCOL: "ws" + NEXT_PUBLIC_APP_PROTOCOL: "http" + # NAME: VALUE + # Refer to an existing Secret/ConfigMap + extraEnv: [] + # - name: FAVICON_GENERATOR_API_KEY + # valueFrom: + # secretKeyRef: + # name: blockscout-frontend-secret + # key: FAVICON_GENERATOR_API_KEY + # This will create a Secret with the specified data + envFromSecret: [] + # NAME: VALUE + # Refer to an existing Secret/ConfigMap + envFrom: [] + # - secretRef: + # name: blockscout-frontend-secret + # - configMapRef: + # name: blockscout-frontend-config + +stats: + enabled: false + ## Image parametes + ## + image: + repository: ghcr.io/blockscout/stats + tag: v2.4.0 + pullPolicy: IfNotPresent + + replicaCount: 1 + service: + type: ClusterIP + port: 80 + metricsPort: 6060 + + podAnnotations: {} + + ## Annotations to add to stats deployment + annotations: {} + + podSecurityContext: {} + + securityContext: {} + + basePath: "/" + + ## Configure ingress resource that allow you to access the stats installation. + ## ref: http://kubernetes.io/docs/user-guide/ingress/ + ## + ingress: + enabled: false + className: "public" + annotations: {} + hostname: blockscout-stats.valset-02.testnet.fi + tls: + enabled: false + #secretName: + paths: + - path: / + pathType: Prefix + + resources: + limits: + memory: "512Mi" + cpu: 250m + requests: + memory: 512Mi + cpu: 250m + ## Files to mount to stats pod + ## + files: + enabled: false + list: {} + # file.txt: | + # test + mountPath: /tmp/path + + ## Liveness probe + ## + livenessProbe: + enabled: true + path: /health + params: + initialDelaySeconds: 100 + periodSeconds: 100 + timeoutSeconds: 30 + ## Readiness probe + ## + readinessProbe: + enabled: true + path: /health + params: + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 60 + ## Stats ENV vars + ## ref: https://github.com/blockscout/blockscout-rs/tree/main/stats#env + env: [] + # NAME: VALUE + envFromSecret: [] + # NAME: VALUE + extraEnv: [] + # - name: STATS__DB_URL + # valueFrom: + # secretKeyRef: + # name: blockscout-stats-secret + # key: STATS__DB_URL + +userOpsIndexer: + enabled: false + ## Image parametes + ## + image: + repository: ghcr.io/blockscout/user-ops-indexer + tag: latest + pullPolicy: IfNotPresent + + replicaCount: 1 + service: + type: ClusterIP + port: 80 + grpc: + enabled: true + port: 8051 + metricsPort: 6060 + + podAnnotations: {} + + ## Annotations to add to user-ops-indexer deployment + annotations: {} + + podSecurityContext: {} + + securityContext: {} + + ## Configure ingress resource that allow you to access the stats installation. + ## ref: http://kubernetes.io/docs/user-guide/ingress/ + ## + ingress: + enabled: false + className: "" + annotations: {} + hostname: chart-example-stats.local + tls: + enabled: false + #secretName: + paths: + - path: / + pathType: Prefix + + resources: + limits: + memory: "512Mi" + cpu: 250m + requests: + memory: 512Mi + cpu: 250m + + ## Liveness probe + ## + livenessProbe: + enabled: false + path: /health + params: + initialDelaySeconds: 100 + periodSeconds: 100 + timeoutSeconds: 30 + ## Readiness probe + ## + readinessProbe: + enabled: false + path: /health + params: + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 60 + ## Stats ENV vars + ## ref: https://github.com/blockscout/blockscout-rs/tree/main/stats#env + env: [] + # NAME: VALUE + envFromSecret: [] + # NAME: VALUE + extraEnv: [] + # - name: USER_OPS_INDEXER__DATABASE__CONNECT__URL + # valueFrom: + # secretKeyRef: + # name: blockscout-userops-indexer-secret + # key: USER_OPS_INDEXER__DATABASE__CONNECT__URL + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" +## Node labels for blockscout-stack pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +tolerations: [] + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} diff --git a/workspaces/blockscout/docker-compose.yml b/workspaces/blockscout/docker-compose.yml deleted file mode 100644 index 97c04ed1..00000000 --- a/workspaces/blockscout/docker-compose.yml +++ /dev/null @@ -1,102 +0,0 @@ -version: '3.9' - -services: - redis-db: - extends: - file: ./services/redis.yml - service: redis-db - - db-init: - extends: - file: ./services/db.yml - service: db-init - - db: - depends_on: - db-init: - condition: service_completed_successfully - extends: - file: ./services/db.yml - service: db - - backend: - depends_on: - - db - - redis-db - extends: - file: ./services/backend.yml - service: backend - build: - context: .. - dockerfile: ./docker/Dockerfile - args: - CACHE_EXCHANGE_RATES_PERIOD: "" - API_V1_READ_METHODS_DISABLED: "false" - DISABLE_WEBAPP: "false" - API_V1_WRITE_METHODS_DISABLED: "false" - CACHE_TOTAL_GAS_USAGE_COUNTER_ENABLED: "" - CACHE_ADDRESS_WITH_BALANCES_UPDATE_INTERVAL: "" - ADMIN_PANEL_ENABLED: "" - RELEASE_VERSION: 6.9.2 - links: - - db:database - environment: - ETHEREUM_JSONRPC_HTTP_URL: http://execution:8545/ - ETHEREUM_JSONRPC_TRACE_URL: http://execution:8545/ - ETHEREUM_JSONRPC_WS_URL: ws://execution:8546/ - CHAIN_ID: '1337' - - visualizer: - extends: - file: ./services/visualizer.yml - service: visualizer - - sig-provider: - extends: - file: ./services/sig-provider.yml - service: sig-provider - - frontend: - depends_on: - - backend - extends: - file: ./services/frontend.yml - service: frontend - - stats-db-init: - extends: - file: ./services/stats.yml - service: stats-db-init - - stats-db: - depends_on: - stats-db-init: - condition: service_completed_successfully - extends: - file: ./services/stats.yml - service: stats-db - - stats: - depends_on: - - stats-db - - backend - extends: - file: ./services/stats.yml - service: stats - - user-ops-indexer: - depends_on: - - db - - backend - extends: - file: ./services/user-ops-indexer.yml - service: user-ops-indexer - - proxy: - depends_on: - - backend - - frontend - - stats - extends: - file: ./services/nginx.yml - service: proxy diff --git a/workspaces/blockscout/envs/common-blockscout.env b/workspaces/blockscout/envs/common-blockscout.env deleted file mode 100644 index 6fadc340..00000000 --- a/workspaces/blockscout/envs/common-blockscout.env +++ /dev/null @@ -1,491 +0,0 @@ -ETHEREUM_JSONRPC_VARIANT=geth -ETHEREUM_JSONRPC_HTTP_URL=http://execution:8545/ -DATABASE_URL=postgresql://blockscout:ceWb1MeLBEeOIfk65gU8EjF8@db:5432/blockscout - -# DATABASE_EVENT_URL= -# DATABASE_QUEUE_TARGET -# TEST_DATABASE_URL= -# TEST_DATABASE_READ_ONLY_API_URL= - -ETHEREUM_JSONRPC_TRANSPORT=http -ETHEREUM_JSONRPC_DISABLE_ARCHIVE_BALANCES=false -# ETHEREUM_JSONRPC_FALLBACK_HTTP_URL= -ETHEREUM_JSONRPC_TRACE_URL=http://execution:8545/ -# ETHEREUM_JSONRPC_FALLBACK_TRACE_URL= -# ETHEREUM_JSONRPC_ETH_CALL_URL= -# ETHEREUM_JSONRPC_FALLBACK_ETH_CALL_URL= -ETHEREUM_JSONRPC_WS_URL=http://execution:8546/ -# ETHEREUM_JSONRPC_FALLBACK_WS_URL= -# ETHEREUM_JSONRPC_WS_RETRY_INTERVAL= -# ETHEREUM_JSONRPC_ARCHIVE_BALANCES_WINDOW=200 -# ETHEREUM_JSONRPC_HTTP_TIMEOUT= -# ETHEREUM_JSONRPC_HTTP_HEADERS= -# ETHEREUM_JSONRPC_HTTP_GZIP_ENABLED= -# ETHEREUM_JSONRPC_WAIT_PER_TIMEOUT= -# ETHEREUM_JSONRPC_GETH_TRACE_BY_BLOCK= -# ETHEREUM_JSONRPC_GETH_ALLOW_EMPTY_TRACES= -# ETHEREUM_JSONRPC_DEBUG_TRACE_TRANSACTION_TIMEOUT= -# ETHEREUM_JSONRPC_HTTP_URLS= -# ETHEREUM_JSONRPC_FALLBACK_HTTP_URLS= -# ETHEREUM_JSONRPC_TRACE_URLS= -# ETHEREUM_JSONRPC_FALLBACK_TRACE_URLS= -# ETHEREUM_JSONRPC_ETH_CALL_URLS= -# ETHEREUM_JSONRPC_FALLBACK_ETH_CALL_URLS= - -# CHAIN_TYPE= -NETWORK= -SUBNETWORK=Awesome chain -LOGO=/images/blockscout_logo.svg -IPC_PATH= -NETWORK_PATH=/ -BLOCKSCOUT_HOST= -BLOCKSCOUT_PROTOCOL= -SECRET_KEY_BASE=56NtB48ear7+wMSf0IQuWDAAazhpb31qyc7GiyspBP2vh7t5zlCsF5QDv76chXeN -# CHECK_ORIGIN= -PORT=4000 -COIN_NAME= -# METADATA_CONTRACT= -# VALIDATORS_CONTRACT= -# KEYS_MANAGER_CONTRACT= -# REWARDS_CONTRACT= -# TOKEN_BRIDGE_CONTRACT= -EMISSION_FORMAT=DEFAULT -# CHAIN_SPEC_PATH= -# SUPPLY_MODULE= -COIN= -EXCHANGE_RATES_COIN= -# EXCHANGE_RATES_SOURCE= -# EXCHANGE_RATES_SECONDARY_COIN_SOURCE= -# EXCHANGE_RATES_MARKET_CAP_SOURCE= -# EXCHANGE_RATES_TVL_SOURCE= -# EXCHANGE_RATES_PRICE_SOURCE= -# EXCHANGE_RATES_COINGECKO_COIN_ID= -# EXCHANGE_RATES_COINGECKO_SECONDARY_COIN_ID= -# EXCHANGE_RATES_COINGECKO_API_KEY= -# EXCHANGE_RATES_COINGECKO_BASE_URL= -# EXCHANGE_RATES_COINGECKO_BASE_PRO_URL= -# EXCHANGE_RATES_COINMARKETCAP_BASE_URL= -# EXCHANGE_RATES_COINMARKETCAP_API_KEY= -# EXCHANGE_RATES_COINMARKETCAP_COIN_ID= -# EXCHANGE_RATES_COINMARKETCAP_SECONDARY_COIN_ID= -# EXCHANGE_RATES_CRYPTOCOMPARE_SECONDARY_COIN_SYMBOL= -# EXCHANGE_RATES_CRYPTORANK_SECONDARY_COIN_ID= -# EXCHANGE_RATES_CRYPTORANK_PLATFORM_ID= -# EXCHANGE_RATES_CRYPTORANK_BASE_URL= -# EXCHANGE_RATES_CRYPTORANK_API_KEY= -# EXCHANGE_RATES_CRYPTORANK_COIN_ID= -# EXCHANGE_RATES_CRYPTORANK_LIMIT= -# TOKEN_EXCHANGE_RATES_SOURCE= -# EXCHANGE_RATES_COINGECKO_PLATFORM_ID= -# TOKEN_EXCHANGE_RATE_INTERVAL= -# TOKEN_EXCHANGE_RATE_REFETCH_INTERVAL= -# TOKEN_EXCHANGE_RATE_MAX_BATCH_SIZE= -# DISABLE_TOKEN_EXCHANGE_RATE= -POOL_SIZE=80 -POOL_SIZE_API=10 -ECTO_USE_SSL=false -# DATADOG_HOST= -# DATADOG_PORT= -# SPANDEX_BATCH_SIZE= -# SPANDEX_SYNC_THRESHOLD= -HEART_BEAT_TIMEOUT=30 -# HEART_COMMAND= -# BLOCKSCOUT_VERSION= -RELEASE_LINK= -BLOCK_TRANSFORMER=base -# BLOCK_RANGES= -# FIRST_BLOCK= -# LAST_BLOCK= -# TRACE_BLOCK_RANGES= -# TRACE_FIRST_BLOCK= -# TRACE_LAST_BLOCK= -# FOOTER_CHAT_LINK= -# FOOTER_FORUM_LINK_ENABLED= -# FOOTER_FORUM_LINK= -# FOOTER_TELEGRAM_LINK_ENABLED= -# FOOTER_TELEGRAM_LINK= -# FOOTER_GITHUB_LINK= -FOOTER_LOGO=/images/blockscout_logo.svg -FOOTER_LINK_TO_OTHER_EXPLORERS=false -FOOTER_OTHER_EXPLORERS={} -SUPPORTED_CHAINS={} -CACHE_BLOCK_COUNT_PERIOD=7200 -CACHE_TXS_COUNT_PERIOD=7200 -CACHE_ADDRESS_SUM_PERIOD=3600 -CACHE_TOTAL_GAS_USAGE_PERIOD=3600 -CACHE_ADDRESS_TRANSACTIONS_GAS_USAGE_COUNTER_PERIOD=1800 -CACHE_TOKEN_HOLDERS_COUNTER_PERIOD=3600 -CACHE_TOKEN_TRANSFERS_COUNTER_PERIOD=3600 -CACHE_ADDRESS_WITH_BALANCES_UPDATE_INTERVAL=1800 -CACHE_AVERAGE_BLOCK_PERIOD=1800 -CACHE_MARKET_HISTORY_PERIOD=21600 -CACHE_ADDRESS_TRANSACTIONS_COUNTER_PERIOD=1800 -CACHE_ADDRESS_TOKENS_USD_SUM_PERIOD=3600 -CACHE_ADDRESS_TOKEN_TRANSFERS_COUNTER_PERIOD=1800 -# CACHE_TRANSACTIONS_24H_STATS_PERIOD= -# CACHE_FRESH_PENDING_TRANSACTIONS_COUNTER_PERIOD= -# TOKEN_BALANCE_ON_DEMAND_FETCHER_THRESHOLD= -# COIN_BALANCE_ON_DEMAND_FETCHER_THRESHOLD= -# CONTRACT_CODE_ON_DEMAND_FETCHER_THRESHOLD= -# TOKEN_INSTANCE_METADATA_REFETCH_ON_DEMAND_FETCHER_THRESHOLD= -TOKEN_METADATA_UPDATE_INTERVAL=172800 -CONTRACT_VERIFICATION_ALLOWED_SOLIDITY_EVM_VERSIONS=homestead,tangerineWhistle,spuriousDragon,byzantium,constantinople,petersburg,istanbul,berlin,london,paris,shanghai,cancun,default -CONTRACT_VERIFICATION_ALLOWED_VYPER_EVM_VERSIONS=byzantium,constantinople,petersburg,istanbul,berlin,paris,shanghai,cancun,default -# CONTRACT_VERIFICATION_MAX_LIBRARIES=10 -CONTRACT_MAX_STRING_LENGTH_WITHOUT_TRIMMING=2040 -# CONTRACT_DISABLE_INTERACTION= -# CONTRACT_AUDIT_REPORTS_AIRTABLE_URL= -# CONTRACT_AUDIT_REPORTS_AIRTABLE_API_KEY= -# CONTRACT_CERTIFIED_LIST= -UNCLES_IN_AVERAGE_BLOCK_TIME=false -DISABLE_WEBAPP=false -API_V2_ENABLED=true -API_V1_READ_METHODS_DISABLED=false -API_V1_WRITE_METHODS_DISABLED=false -# API_RATE_LIMIT_DISABLED=true -# API_SENSITIVE_ENDPOINTS_KEY= -API_RATE_LIMIT_TIME_INTERVAL=1s -API_RATE_LIMIT_BY_IP_TIME_INTERVAL=5m -API_RATE_LIMIT=50 -API_RATE_LIMIT_BY_KEY=50 -API_RATE_LIMIT_BY_WHITELISTED_IP=50 -API_RATE_LIMIT_WHITELISTED_IPS= -API_RATE_LIMIT_STATIC_API_KEY= -API_RATE_LIMIT_UI_V2_WITH_TOKEN=5 -API_RATE_LIMIT_BY_IP=3000 -API_NO_RATE_LIMIT_API_KEY= -# API_GRAPHQL_ENABLED= -# API_GRAPHQL_MAX_COMPLEXITY= -# API_GRAPHQL_TOKEN_LIMIT= -# API_GRAPHQL_DEFAULT_TRANSACTION_HASH= -# API_GRAPHQL_RATE_LIMIT_DISABLED= -# API_GRAPHQL_RATE_LIMIT= -# API_GRAPHQL_RATE_LIMIT_BY_KEY= -# API_GRAPHQL_RATE_LIMIT_TIME_INTERVAL= -# API_GRAPHQL_RATE_LIMIT_BY_IP= -# API_GRAPHQL_RATE_LIMIT_BY_IP_TIME_INTERVAL= -# API_GRAPHQL_RATE_LIMIT_STATIC_API_KEY= -DISABLE_INDEXER=false -DISABLE_REALTIME_INDEXER=false -DISABLE_CATCHUP_INDEXER=false -INDEXER_DISABLE_ADDRESS_COIN_BALANCE_FETCHER=false -INDEXER_DISABLE_TOKEN_INSTANCE_REALTIME_FETCHER=false -INDEXER_DISABLE_TOKEN_INSTANCE_RETRY_FETCHER=false -INDEXER_DISABLE_TOKEN_INSTANCE_SANITIZE_FETCHER=false -INDEXER_DISABLE_TOKEN_INSTANCE_LEGACY_SANITIZE_FETCHER=false -INDEXER_DISABLE_PENDING_TRANSACTIONS_FETCHER=false -INDEXER_DISABLE_INTERNAL_TRANSACTIONS_FETCHER=false -# INDEXER_DISABLE_CATALOGED_TOKEN_UPDATER_FETCHER= -# INDEXER_DISABLE_BLOCK_REWARD_FETCHER= -# INDEXER_DISABLE_EMPTY_BLOCKS_SANITIZER= -# INDEXER_DISABLE_WITHDRAWALS_FETCHER= -# INDEXER_DISABLE_REPLACED_TRANSACTION_FETCHER= -# INDEXER_CATCHUP_BLOCKS_BATCH_SIZE= -# INDEXER_CATCHUP_BLOCKS_CONCURRENCY= -# INDEXER_CATCHUP_BLOCK_INTERVAL= -# INDEXER_EMPTY_BLOCKS_SANITIZER_INTERVAL= -# INDEXER_INTERNAL_TRANSACTIONS_BATCH_SIZE= -# INDEXER_INTERNAL_TRANSACTIONS_CONCURRENCY= -# INDEXER_BLOCK_REWARD_BATCH_SIZE= -# INDEXER_BLOCK_REWARD_CONCURRENCY= -# INDEXER_TOKEN_INSTANCE_USE_BASE_URI_RETRY= -# INDEXER_TOKEN_INSTANCE_RETRY_REFETCH_INTERVAL= -# INDEXER_TOKEN_INSTANCE_RETRY_BATCH_SIZE=10 -# INDEXER_TOKEN_INSTANCE_RETRY_CONCURRENCY= -# INDEXER_TOKEN_INSTANCE_REALTIME_BATCH_SIZE=1 -# INDEXER_TOKEN_INSTANCE_REALTIME_CONCURRENCY= -# INDEXER_TOKEN_INSTANCE_SANITIZE_BATCH_SIZE=10 -# INDEXER_TOKEN_INSTANCE_SANITIZE_CONCURRENCY= -# INDEXER_TOKEN_INSTANCE_LEGACY_SANITIZE_BATCH_SIZE=10 -# INDEXER_TOKEN_INSTANCE_LEGACY_SANITIZE_CONCURRENCY=10 -# INDEXER_DISABLE_TOKEN_INSTANCE_ERC_1155_SANITIZE_FETCHER=false -# INDEXER_DISABLE_TOKEN_INSTANCE_ERC_721_SANITIZE_FETCHER=false -# INDEXER_TOKEN_INSTANCE_ERC_1155_SANITIZE_CONCURRENCY=2 -# INDEXER_TOKEN_INSTANCE_ERC_1155_SANITIZE_BATCH_SIZE=10 -# INDEXER_TOKEN_INSTANCE_ERC_721_SANITIZE_CONCURRENCY=2 -# INDEXER_TOKEN_INSTANCE_ERC_721_SANITIZE_BATCH_SIZE=10 -# INDEXER_TOKEN_INSTANCE_ERC_721_SANITIZE_TOKENS_BATCH_SIZE=100 -# TOKEN_INSTANCE_OWNER_MIGRATION_CONCURRENCY=5 -# TOKEN_INSTANCE_OWNER_MIGRATION_BATCH_SIZE=50 -# INDEXER_COIN_BALANCES_BATCH_SIZE= -# INDEXER_COIN_BALANCES_CONCURRENCY= -# INDEXER_RECEIPTS_BATCH_SIZE= -# INDEXER_RECEIPTS_CONCURRENCY= -# INDEXER_TOKEN_CONCURRENCY= -# INDEXER_TOKEN_BALANCES_BATCH_SIZE= -# INDEXER_TOKEN_BALANCES_CONCURRENCY= -# INDEXER_TOKEN_BALANCES_MAX_REFETCH_INTERVAL= -# INDEXER_TOKEN_BALANCES_EXPONENTIAL_TIMEOUT_COEFF= -# INDEXER_TX_ACTIONS_ENABLE= -# INDEXER_TX_ACTIONS_MAX_TOKEN_CACHE_SIZE= -# INDEXER_TX_ACTIONS_REINDEX_FIRST_BLOCK= -# INDEXER_TX_ACTIONS_REINDEX_LAST_BLOCK= -# INDEXER_TX_ACTIONS_REINDEX_PROTOCOLS= -# INDEXER_TX_ACTIONS_AAVE_V3_POOL_CONTRACT= -# INDEXER_POLYGON_EDGE_L1_RPC= -# INDEXER_POLYGON_EDGE_L1_EXIT_HELPER_CONTRACT= -# INDEXER_POLYGON_EDGE_L1_WITHDRAWALS_START_BLOCK= -# INDEXER_POLYGON_EDGE_L1_STATE_SENDER_CONTRACT= -# INDEXER_POLYGON_EDGE_L1_DEPOSITS_START_BLOCK= -# INDEXER_POLYGON_EDGE_L2_STATE_SENDER_CONTRACT= -# INDEXER_POLYGON_EDGE_L2_WITHDRAWALS_START_BLOCK= -# INDEXER_POLYGON_EDGE_L2_STATE_RECEIVER_CONTRACT= -# INDEXER_POLYGON_EDGE_L2_DEPOSITS_START_BLOCK= -# INDEXER_POLYGON_EDGE_ETH_GET_LOGS_RANGE_SIZE= -# INDEXER_POLYGON_ZKEVM_BATCHES_ENABLED= -# INDEXER_POLYGON_ZKEVM_BATCHES_CHUNK_SIZE= -# INDEXER_POLYGON_ZKEVM_BATCHES_RECHECK_INTERVAL= -# INDEXER_POLYGON_ZKEVM_L1_RPC= -# INDEXER_POLYGON_ZKEVM_L1_BRIDGE_START_BLOCK= -# INDEXER_POLYGON_ZKEVM_L1_BRIDGE_CONTRACT= -# INDEXER_POLYGON_ZKEVM_L1_BRIDGE_NATIVE_SYMBOL= -# INDEXER_POLYGON_ZKEVM_L1_BRIDGE_NATIVE_DECIMALS= -# INDEXER_POLYGON_ZKEVM_L1_BRIDGE_NETWORK_ID= -# INDEXER_POLYGON_ZKEVM_L1_BRIDGE_ROLLUP_INDEX= -# INDEXER_POLYGON_ZKEVM_L2_BRIDGE_START_BLOCK= -# INDEXER_POLYGON_ZKEVM_L2_BRIDGE_CONTRACT= -# INDEXER_POLYGON_ZKEVM_L2_BRIDGE_NETWORK_ID= -# INDEXER_POLYGON_ZKEVM_L2_BRIDGE_ROLLUP_INDEX= -# INDEXER_ZKSYNC_BATCHES_ENABLED= -# INDEXER_ZKSYNC_BATCHES_CHUNK_SIZE= -# INDEXER_ZKSYNC_NEW_BATCHES_MAX_RANGE= -# INDEXER_ZKSYNC_NEW_BATCHES_RECHECK_INTERVAL= -# INDEXER_ZKSYNC_L1_RPC= -# INDEXER_ZKSYNC_BATCHES_STATUS_RECHECK_INTERVAL= -# INDEXER_ARBITRUM_ARBSYS_CONTRACT= -# INDEXER_ARBITRUM_NODE_INTERFACE_CONTRACT= -# INDEXER_ARBITRUM_L1_RPC= -# INDEXER_ARBITRUM_L1_RPC_CHUNK_SIZE= -# INDEXER_ARBITRUM_L1_RPC_HISTORICAL_BLOCKS_RANGE= -# INDEXER_ARBITRUM_L1_ROLLUP_CONTRACT= -# INDEXER_ARBITRUM_L1_ROLLUP_INIT_BLOCK= -# INDEXER_ARBITRUM_L1_COMMON_START_BLOCK= -# INDEXER_ARBITRUM_L1_FINALIZATION_THRESHOLD= -# INDEXER_ARBITRUM_ROLLUP_CHUNK_SIZE= -# INDEXER_ARBITRUM_BATCHES_TRACKING_ENABLED= -# INDEXER_ARBITRUM_BATCHES_TRACKING_RECHECK_INTERVAL= -# INDEXER_ARBITRUM_NEW_BATCHES_LIMIT= -# INDEXER_ARBITRUM_MISSING_BATCHES_RANGE= -# INDEXER_ARBITRUM_BATCHES_TRACKING_MESSAGES_TO_BLOCKS_SHIFT= -# INDEXER_ARBITRUM_CONFIRMATIONS_TRACKING_FINALIZED= -# INDEXER_ARBITRUM_BATCHES_TRACKING_L1_FINALIZATION_CHECK_ENABLED= -# INDEXER_ARBITRUM_BRIDGE_MESSAGES_TRACKING_ENABLED= -# INDEXER_ARBITRUM_TRACKING_MESSAGES_ON_L1_RECHECK_INTERVAL= -# INDEXER_ARBITRUM_MISSED_MESSAGES_RECHECK_INTERVAL= -# INDEXER_ARBITRUM_MISSED_MESSAGES_BLOCKS_DEPTH= -# CELO_CORE_CONTRACTS= -# INDEXER_CELO_VALIDATOR_GROUP_VOTES_BATCH_SIZE=200000 -# INDEXER_DISABLE_CELO_EPOCH_FETCHER=false -# INDEXER_DISABLE_CELO_VALIDATOR_GROUP_VOTES_FETCHER=false -# BERYX_API_TOKEN= -# BERYX_API_BASE_URL= -# FILECOIN_NETWORK_PREFIX=f -# FILECOIN_PENDING_ADDRESS_OPERATIONS_MIGRATION_BATCH_SIZE= -# FILECOIN_PENDING_ADDRESS_OPERATIONS_MIGRATION_CONCURRENCY= -# INDEXER_DISABLE_FILECOIN_ADDRESS_INFO_FETCHER=false -# INDEXER_FILECOIN_ADDRESS_INFO_CONCURRENCY=1 -# INDEXER_REALTIME_FETCHER_MAX_GAP= -# INDEXER_FETCHER_INIT_QUERY_LIMIT= -# INDEXER_TOKEN_BALANCES_FETCHER_INIT_QUERY_LIMIT= -# INDEXER_COIN_BALANCES_FETCHER_INIT_QUERY_LIMIT= -# INDEXER_GRACEFUL_SHUTDOWN_PERIOD= -# INDEXER_INTERNAL_TRANSACTIONS_FETCH_ORDER= -# INDEXER_SYSTEM_MEMORY_PERCENTAGE= -# WITHDRAWALS_FIRST_BLOCK= -# INDEXER_OPTIMISM_L1_RPC= -# INDEXER_OPTIMISM_L1_SYSTEM_CONFIG_CONTRACT= -# INDEXER_OPTIMISM_L1_PORTAL_CONTRACT= -# INDEXER_OPTIMISM_L1_START_BLOCK= -# INDEXER_OPTIMISM_L1_BATCH_INBOX= -# INDEXER_OPTIMISM_L1_BATCH_SUBMITTER= -# INDEXER_OPTIMISM_L1_BATCH_BLOCKS_CHUNK_SIZE= -# INDEXER_OPTIMISM_L2_BATCH_GENESIS_BLOCK_NUMBER= -# INDEXER_OPTIMISM_BLOCK_DURATION= -# INDEXER_OPTIMISM_L1_OUTPUT_ORACLE_CONTRACT= -# INDEXER_OPTIMISM_L2_WITHDRAWALS_START_BLOCK= -# INDEXER_OPTIMISM_L2_MESSAGE_PASSER_CONTRACT= -# INDEXER_OPTIMISM_L1_DEPOSITS_TRANSACTION_TYPE= -# INDEXER_OPTIMISM_L1_ETH_GET_LOGS_RANGE_SIZE= -# INDEXER_OPTIMISM_L2_ETH_GET_LOGS_RANGE_SIZE= -# INDEXER_SCROLL_L1_RPC= -# INDEXER_SCROLL_L1_MESSENGER_CONTRACT= -# INDEXER_SCROLL_L1_MESSENGER_START_BLOCK= -# INDEXER_SCROLL_L1_CHAIN_CONTRACT= -# INDEXER_SCROLL_L1_BATCH_START_BLOCK= -# INDEXER_SCROLL_L2_MESSENGER_CONTRACT= -# INDEXER_SCROLL_L2_MESSENGER_START_BLOCK= -# INDEXER_SCROLL_L2_GAS_ORACLE_CONTRACT= -# INDEXER_SCROLL_L1_ETH_GET_LOGS_RANGE_SIZE= -# INDEXER_SCROLL_L2_ETH_GET_LOGS_RANGE_SIZE= -# SCROLL_L2_CURIE_UPGRADE_BLOCK= -# SCROLL_L1_SCALAR_INIT= -# SCROLL_L1_OVERHEAD_INIT= -# SCROLL_L1_COMMIT_SCALAR_INIT= -# SCROLL_L1_BLOB_SCALAR_INIT= -# SCROLL_L1_BASE_FEE_INIT= -# SCROLL_L1_BLOB_BASE_FEE_INIT= -# ROOTSTOCK_REMASC_ADDRESS= -# ROOTSTOCK_BRIDGE_ADDRESS= -# ROOTSTOCK_LOCKED_BTC_CACHE_PERIOD= -# ROOTSTOCK_LOCKING_CAP= -# INDEXER_DISABLE_ROOTSTOCK_DATA_FETCHER= -# INDEXER_ROOTSTOCK_DATA_FETCHER_INTERVAL= -# INDEXER_ROOTSTOCK_DATA_FETCHER_BATCH_SIZE= -# INDEXER_ROOTSTOCK_DATA_FETCHER_CONCURRENCY= -# INDEXER_ROOTSTOCK_DATA_FETCHER_DB_BATCH_SIZE= -# INDEXER_BEACON_RPC_URL=http://localhost:5052 -# INDEXER_DISABLE_BEACON_BLOB_FETCHER= -# INDEXER_BEACON_BLOB_FETCHER_SLOT_DURATION=12 -# INDEXER_BEACON_BLOB_FETCHER_REFERENCE_SLOT=8000000 -# INDEXER_BEACON_BLOB_FETCHER_REFERENCE_TIMESTAMP=1702824023 -# INDEXER_BEACON_BLOB_FETCHER_START_BLOCK=19200000 -# INDEXER_BEACON_BLOB_FETCHER_END_BLOCK=0 -# TOKEN_ID_MIGRATION_FIRST_BLOCK= -# TOKEN_ID_MIGRATION_CONCURRENCY= -# TOKEN_ID_MIGRATION_BATCH_SIZE= -# MISSING_BALANCE_OF_TOKENS_WINDOW_SIZE= -# INDEXER_INTERNAL_TRANSACTIONS_TRACER_TYPE= -# WEBAPP_URL= -# API_URL= -SHOW_ADDRESS_MARKETCAP_PERCENTAGE=true -CHECKSUM_ADDRESS_HASHES=true -CHECKSUM_FUNCTION=eth -DISABLE_EXCHANGE_RATES=true -TXS_STATS_ENABLED=true -SHOW_PRICE_CHART=false -SHOW_PRICE_CHART_LEGEND=false -SHOW_TXS_CHART=true -TXS_HISTORIAN_INIT_LAG=0 -TXS_STATS_DAYS_TO_COMPILE_AT_INIT=10 -COIN_BALANCE_HISTORY_DAYS=90 -APPS_MENU=true -EXTERNAL_APPS=[] -# GAS_PRICE= -# GAS_PRICE_ORACLE_CACHE_PERIOD= -# GAS_PRICE_ORACLE_SIMPLE_TRANSACTION_GAS= -# GAS_PRICE_ORACLE_NUM_OF_BLOCKS= -# GAS_PRICE_ORACLE_SAFELOW_PERCENTILE= -# GAS_PRICE_ORACLE_AVERAGE_PERCENTILE= -# GAS_PRICE_ORACLE_FAST_PERCENTILE= -# GAS_PRICE_ORACLE_SAFELOW_TIME_COEFFICIENT= -# GAS_PRICE_ORACLE_AVERAGE_TIME_COEFFICIENT= -# GAS_PRICE_ORACLE_FAST_TIME_COEFFICIENT= -# RESTRICTED_LIST= -# RESTRICTED_LIST_KEY= -SHOW_MAINTENANCE_ALERT=false -MAINTENANCE_ALERT_MESSAGE= -CHAIN_ID=32382 -MAX_SIZE_UNLESS_HIDE_ARRAY=50 -HIDE_BLOCK_MINER=false -# HIDE_SCAM_ADDRESSES= -DISPLAY_TOKEN_ICONS=false -RE_CAPTCHA_SECRET_KEY= -RE_CAPTCHA_CLIENT_KEY= -RE_CAPTCHA_V3_SECRET_KEY= -RE_CAPTCHA_V3_CLIENT_KEY= -RE_CAPTCHA_DISABLED=false -# RE_CAPTCHA_CHECK_HOSTNAME -# RE_CAPTCHA_SCORE_THRESHOLD -JSON_RPC= -# API_RATE_LIMIT_HAMMER_REDIS_URL=redis://redis-db:6379/1 -# API_RATE_LIMIT_IS_BLOCKSCOUT_BEHIND_PROXY=false -API_RATE_LIMIT_UI_V2_TOKEN_TTL_IN_SECONDS=18000 -FETCH_REWARDS_WAY=trace_block -MICROSERVICE_SC_VERIFIER_ENABLED=true -# MICROSERVICE_SC_VERIFIER_URL=http://sc-verifier:8050/ -# MICROSERVICE_SC_VERIFIER_TYPE=sc_verifier -# MICROSERVICE_SC_VERIFIER_URL=https://eth-bytecode-db.services.blockscout.com/ -# MICROSERVICE_SC_VERIFIER_TYPE=eth_bytecode_db -MICROSERVICE_SC_VERIFIER_URL=http://sc-verifier:8050/ -MICROSERVICE_SC_VERIFIER_TYPE=sc_verifier -MICROSERVICE_ETH_BYTECODE_DB_INTERVAL_BETWEEN_LOOKUPS=10m -MICROSERVICE_ETH_BYTECODE_DB_MAX_LOOKUPS_CONCURRENCY=10 -MICROSERVICE_VISUALIZE_SOL2UML_ENABLED=true -MICROSERVICE_VISUALIZE_SOL2UML_URL=http://visualizer:8050/ -MICROSERVICE_SIG_PROVIDER_ENABLED=true -MICROSERVICE_SIG_PROVIDER_URL=http://sig-provider:8050/ -# MICROSERVICE_BENS_URL= -# MICROSERVICE_BENS_ENABLED= -MICROSERVICE_ACCOUNT_ABSTRACTION_ENABLED=false -MICROSERVICE_ACCOUNT_ABSTRACTION_URL=http://user-ops-indexer:8050/ -# MICROSERVICE_METADATA_URL= -# MICROSERVICE_METADATA_ENABLED= -# MICROSERVICE_STYLUS_VERIFIER_URL= -DECODE_NOT_A_CONTRACT_CALLS=true -# DATABASE_READ_ONLY_API_URL= -# ACCOUNT_DATABASE_URL= -# ACCOUNT_POOL_SIZE= -# ACCOUNT_AUTH0_DOMAIN= -# ACCOUNT_AUTH0_CLIENT_ID= -# ACCOUNT_AUTH0_CLIENT_SECRET= -# ACCOUNT_PUBLIC_TAGS_AIRTABLE_URL= -# ACCOUNT_PUBLIC_TAGS_AIRTABLE_API_KEY= -# ACCOUNT_SENDGRID_API_KEY= -# ACCOUNT_SENDGRID_SENDER= -# ACCOUNT_SENDGRID_TEMPLATE= -# ACCOUNT_VERIFICATION_EMAIL_RESEND_INTERVAL= -# ACCOUNT_OTP_RESEND_INTERVAL= -# ACCOUNT_PRIVATE_TAGS_LIMIT=2000 -# ACCOUNT_WATCHLIST_ADDRESSES_LIMIT=15 -# ACCOUNT_SIWE_MESSAGE= -ACCOUNT_CLOAK_KEY= -ACCOUNT_ENABLED=false -ACCOUNT_REDIS_URL=redis://redis-db:6379 -EIP_1559_ELASTICITY_MULTIPLIER=2 -# MIXPANEL_TOKEN= -# MIXPANEL_URL= -# AMPLITUDE_API_KEY= -# AMPLITUDE_URL= -# IPFS_GATEWAY_URL= -# IPFS_GATEWAY_URL_PARAM_KEY= -# IPFS_GATEWAY_URL_PARAM_VALUE= -# IPFS_GATEWAY_URL_PARAM_LOCATION= -# IPFS_PUBLIC_GATEWAY_URL= -# ADDRESSES_TABS_COUNTERS_TTL=10m -# DENORMALIZATION_MIGRATION_BATCH_SIZE= -# DENORMALIZATION_MIGRATION_CONCURRENCY= -# TOKEN_TRANSFER_TOKEN_TYPE_MIGRATION_BATCH_SIZE= -# TOKEN_TRANSFER_TOKEN_TYPE_MIGRATION_CONCURRENCY= -# SANITIZE_INCORRECT_NFT_BATCH_SIZE= -# SANITIZE_INCORRECT_NFT_CONCURRENCY= -# MIGRATION_RESTORE_OMITTED_WETH_TOKEN_TRANSFERS_CONCURRENCY= -# MIGRATION_RESTORE_OMITTED_WETH_TOKEN_TRANSFERS_BATCH_SIZE= -# MIGRATION_RESTORE_OMITTED_WETH_TOKEN_TRANSFERS_TIMEOUT= -# MIGRATION_SANITIZE_DUPLICATED_LOG_INDEX_LOGS_CONCURRENCY= -# MIGRATION_SANITIZE_DUPLICATED_LOG_INDEX_LOGS_BATCH_SIZE= -# MIGRATION_REFETCH_CONTRACT_CODES_BATCH_SIZE= -# MIGRATION_REFETCH_CONTRACT_CODES_CONCURRENCY= -SOURCIFY_INTEGRATION_ENABLED=false -SOURCIFY_SERVER_URL= -SOURCIFY_REPO_URL= -SHOW_TENDERLY_LINK=false -TENDERLY_CHAIN_PATH= -# SOLIDITYSCAN_PLATFORM_ID= -# SOLIDITYSCAN_CHAIN_ID= -# SOLIDITYSCAN_API_TOKEN= -# NOVES_FI_BASE_API_URL= -# NOVES_FI_CHAIN_NAME= -# NOVES_FI_API_TOKEN= -# ZERION_BASE_API_URL= -# ZERION_API_TOKEN= -# XNAME_BASE_API_URL= -# XNAME_API_TOKEN= -# BRIDGED_TOKENS_ENABLED= -# BRIDGED_TOKENS_ETH_OMNI_BRIDGE_MEDIATOR= -# BRIDGED_TOKENS_BSC_OMNI_BRIDGE_MEDIATOR= -# BRIDGED_TOKENS_POA_OMNI_BRIDGE_MEDIATOR= -# BRIDGED_TOKENS_AMB_BRIDGE_MEDIATORS -# BRIDGED_TOKENS_FOREIGN_JSON_RPC -# MUD_INDEXER_ENABLED= -# MUD_DATABASE_URL= -# MUD_POOL_SIZE=50 -# WETH_TOKEN_TRANSFERS_FILTERING_ENABLED=false -# WHITELISTED_WETH_CONTRACTS= -# SANITIZE_INCORRECT_WETH_BATCH_SIZE=100 -# SANITIZE_INCORRECT_WETH_CONCURRENCY=1 -# PUBLIC_METRICS_ENABLED= -# PUBLIC_METRICS_UPDATE_PERIOD_HOURS= -# CSV_EXPORT_LIMIT= -# SHRINK_INTERNAL_TRANSACTIONS_ENABLED= -# SHRINK_INTERNAL_TRANSACTIONS_BATCH_SIZE= -# SHRINK_INTERNAL_TRANSACTIONS_CONCURRENCY= diff --git a/workspaces/blockscout/envs/common-frontend.env b/workspaces/blockscout/envs/common-frontend.env deleted file mode 100644 index 291e7fd9..00000000 --- a/workspaces/blockscout/envs/common-frontend.env +++ /dev/null @@ -1,26 +0,0 @@ -NEXT_PUBLIC_API_HOST=localhost:3080 -NEXT_PUBLIC_API_PROTOCOL=http -NEXT_PUBLIC_STATS_API_HOST=http://localhost:8080 -NEXT_PUBLIC_NETWORK_NAME=Lido DevNet -NEXT_PUBLIC_NETWORK_SHORT_NAME=Lido DevNet - -NEXT_PUBLIC_NETWORK_LOGO=https://raw.githubusercontent.com/gist/eddort/84ad372669f75fbf63ac01aae8fdf420/raw/ac5a0b6bb196bd4abc8e79a671e8626b13beb310/logo.svg -NEXT_PUBLIC_NETWORK_LOGO_DARK=https://raw.githubusercontent.com/gist/eddort/c2017e199179b1d1756532e8ea14af76/raw/ef0c2cf96624f46c43d4874551d96d4bcae35e57/logo-light.svg - - -NEXT_PUBLIC_NETWORK_ICON=https://docs.lido.fi/img/logo.svg -NEXT_PUBLIC_NETWORK_ICON_DARK=https://raw.githubusercontent.com/gist/eddort/728ba9b98718a44480498028f78adfcd/raw/bf761e8c45ff0a393e1c127bba7787885ffdf0a8/icon-light.svg - -NEXT_PUBLIC_OG_IMAGE_URL=https://avatars.githubusercontent.com/u/68384064?s=200&v=4 -NEXT_PUBLIC_NETWORK_ID=5 -NEXT_PUBLIC_NETWORK_CURRENCY_NAME=Ether -NEXT_PUBLIC_NETWORK_CURRENCY_SYMBOL=ETH -NEXT_PUBLIC_NETWORK_CURRENCY_DECIMALS=18 -NEXT_PUBLIC_API_BASE_PATH=/ -NEXT_PUBLIC_APP_HOST=localhost:3080 -NEXT_PUBLIC_APP_PROTOCOL=http -NEXT_PUBLIC_HOMEPAGE_CHARTS=['daily_txs'] -NEXT_PUBLIC_VISUALIZE_API_HOST=http://localhost:8081 -NEXT_PUBLIC_IS_TESTNET=true -NEXT_PUBLIC_API_WEBSOCKET_PROTOCOL=ws -NEXT_PUBLIC_API_SPEC_URL=https://raw.githubusercontent.com/blockscout/blockscout-api-v2-swagger/main/swagger.yaml diff --git a/workspaces/blockscout/envs/common-smart-contract-verifier.env b/workspaces/blockscout/envs/common-smart-contract-verifier.env deleted file mode 100644 index 9455f796..00000000 --- a/workspaces/blockscout/envs/common-smart-contract-verifier.env +++ /dev/null @@ -1,40 +0,0 @@ -# Those are examples of existing configuration variables and their default values. -# When uncommented, they would overwrite corresponding values from `base.toml` -# configuration file. - -SMART_CONTRACT_VERIFIER__SERVER__HTTP__ENABLED=true -SMART_CONTRACT_VERIFIER__SERVER__HTTP__ADDR=0.0.0.0:8050 -SMART_CONTRACT_VERIFIER__SERVER__HTTP__MAX_BODY_SIZE=2097152 - -SMART_CONTRACT_VERIFIER__SERVER__GRPC__ENABLED=false -SMART_CONTRACT_VERIFIER__SERVER__GRPC__ADDR=0.0.0.0:8051 - -SMART_CONTRACT_VERIFIER__SOLIDITY__ENABLED=true -SMART_CONTRACT_VERIFIER__SOLIDITY__COMPILERS_DIR=/tmp/solidity-compilers -SMART_CONTRACT_VERIFIER__SOLIDITY__REFRESH_VERSIONS_SCHEDULE=0 0 * * * * * - -# It depends on the OS you are running the service on -SMART_CONTRACT_VERIFIER__SOLIDITY__FETCHER__LIST__LIST_URL=https://solc-bin.ethereum.org/linux-amd64/list.json -#SMART_CONTRACT_VERIFIER__SOLIDITY__FETCHER__LIST__LIST_URL=https://solc-bin.ethereum.org/macosx-amd64/list.json -#SMART_CONTRACT_VERIFIER__SOLIDITY__FETCHER__LIST__LIST_URL=https://solc-bin.ethereum.org/windows-amd64/list.json - -SMART_CONTRACT_VERIFIER__VYPER__ENABLED=true -SMART_CONTRACT_VERIFIER__VYPER__COMPILERS_DIR=/tmp/vyper-compilers -SMART_CONTRACT_VERIFIER__VYPER__REFRESH_VERSIONS_SCHEDULE=0 0 * * * * * - -# It depends on the OS you are running the service on -SMART_CONTRACT_VERIFIER__VYPER__FETCHER__LIST__LIST_URL=https://raw.githubusercontent.com/blockscout/solc-bin/main/vyper.list.json -#SMART_CONTRACT_VERIFIER__VYPER__FETCHER__LIST__LIST_URL=https://raw.githubusercontent.com/blockscout/solc-bin/main/vyper.macos.list.json - -SMART_CONTRACT_VERIFIER__SOURCIFY__ENABLED=false -SMART_CONTRACT_VERIFIER__SOURCIFY__API_URL=https://sourcify.dev/server/ -SMART_CONTRACT_VERIFIER__SOURCIFY__VERIFICATION_ATTEMPTS=3 -SMART_CONTRACT_VERIFIER__SOURCIFY__REQUEST_TIMEOUT=10 - -SMART_CONTRACT_VERIFIER__METRICS__ENABLED=false -SMART_CONTRACT_VERIFIER__METRICS__ADDR=0.0.0.0:6060 -SMART_CONTRACT_VERIFIER__METRICS__ROUTE=/metrics - -SMART_CONTRACT_VERIFIER__JAEGER__ENABLED=false -SMART_CONTRACT_VERIFIER__JAEGER__AGENT_ENDPOINT=localhost:6831 -MICROSERVICE_SC_VERIFIER_API_KEY=kek \ No newline at end of file diff --git a/workspaces/blockscout/envs/common-stats.env b/workspaces/blockscout/envs/common-stats.env deleted file mode 100644 index ae6b1f7a..00000000 --- a/workspaces/blockscout/envs/common-stats.env +++ /dev/null @@ -1,29 +0,0 @@ -# Those are examples of existing configuration variables and their default values. -# When uncommented, they would overwrite corresponding values from `base.toml` -# configuration file. - -STATS__SERVER__HTTP__ENABLED=true -STATS__SERVER__HTTP__ADDR=0.0.0.0:8050 -STATS__SERVER__HTTP__MAX_BODY_SIZE=2097152 - -STATS__SERVER__GRPC__ENABLED=false -STATS__SERVER__GRPC__ADDR=0.0.0.0:8051 - -STATS__DB_URL= -STATS__BLOCKSCOUT_DB_URL= -STATS__CREATE_DATABASE=false -STATS__RUN_MIGRATIONS=false -STATS__DEFAULT_SCHEDULE=0 0 1 * * * * -STATS__FORCE_UPDATE_ON_START=false - -STATS__METRICS__ENABLED=false -STATS__METRICS__ADDR=0.0.0.0:6060 -STATS__METRICS__ROUTE=/metrics - -STATS__JAEGER__ENABLED=false -STATS__JAEGER__AGENT_ENDPOINT=localhost:6831 - -STATS__TRACING__ENABLED=true -STATS__TRACING__FORMAT=default - -STATS__BLOCKSCOUT_API_URL=http://proxy diff --git a/workspaces/blockscout/envs/common-user-ops-indexer.env b/workspaces/blockscout/envs/common-user-ops-indexer.env deleted file mode 100644 index 3345cba4..00000000 --- a/workspaces/blockscout/envs/common-user-ops-indexer.env +++ /dev/null @@ -1,48 +0,0 @@ -## Those are examples of existing configuration variables and their default values. -## When uncommented, they would overwrite corresponding values from `base.toml` -## configuration file. - -USER_OPS_INDEXER__SERVER__HTTP__ENABLED=true -USER_OPS_INDEXER__SERVER__HTTP__ADDR=0.0.0.0:8050 -USER_OPS_INDEXER__SERVER__HTTP__MAX_BODY_SIZE=2097152 -USER_OPS_INDEXER__SERVER__GRPC__ENABLED=false -USER_OPS_INDEXER__SERVER__GRPC__ADDR=0.0.0.0:8051 - -USER_OPS_INDEXER__API__MAX_PAGE_SIZE=100 - -## (required) no default value available -USER_OPS_INDEXER__INDEXER__RPC_URL="" -USER_OPS_INDEXER__INDEXER__CONCURRENCY=20 -USER_OPS_INDEXER__INDEXER__ENTRYPOINTS__V06=true -USER_OPS_INDEXER__INDEXER__ENTRYPOINTS__V07=true - -USER_OPS_INDEXER__INDEXER__REALTIME__ENABLED=true - -USER_OPS_INDEXER__INDEXER__PAST_RPC_LOGS_INDEXER__ENABLED=true -USER_OPS_INDEXER__INDEXER__PAST_RPC_LOGS_INDEXER__BLOCK_RANGE=1000 - -USER_OPS_INDEXER__INDEXER__PAST_DB_LOGS_INDEXER__ENABLED=true -USER_OPS_INDEXER__INDEXER__PAST_DB_LOGS_INDEXER__START_BLOCK=-100000 -USER_OPS_INDEXER__INDEXER__PAST_DB_LOGS_INDEXER__END_BLOCK=0 - -## (required) no default value available -USER_OPS_INDEXER__DATABASE__CONNECT__URL="" -# OR -#USER_OPS_INDEXER__DATABASE__CONNECT__KV__HOST= -#USER_OPS_INDEXER__DATABASE__CONNECT__KV__PORT= -#USER_OPS_INDEXER__DATABASE__CONNECT__KV__USER= -#USER_OPS_INDEXER__DATABASE__CONNECT__KV__PASSWORD= -#USER_OPS_INDEXER__DATABASE__CONNECT__KV__DBNAME= - -USER_OPS_INDEXER__DATABASE__CREATE_DATABASE=false -USER_OPS_INDEXER__DATABASE__RUN_MIGRATIONS=false - -USER_OPS_INDEXER__METRICS__ENABLED=true -USER_OPS_INDEXER__METRICS__ADDR=0.0.0.0:6060 -USER_OPS_INDEXER__METRICS__ROUTE=/metrics - -USER_OPS_INDEXER__JAEGER__ENABLED=false -USER_OPS_INDEXER__JAEGER__AGENT_ENDPOINT=localhost:6831 - -USER_OPS_INDEXER__TRACING__ENABLED=true -USER_OPS_INDEXER__TRACING__FORMAT=default diff --git a/workspaces/blockscout/envs/common-visualizer.env b/workspaces/blockscout/envs/common-visualizer.env deleted file mode 100644 index b4fd4708..00000000 --- a/workspaces/blockscout/envs/common-visualizer.env +++ /dev/null @@ -1 +0,0 @@ -VISUALIZER__SERVER__GRPC__ENABLED=false diff --git a/workspaces/blockscout/erigon.yml b/workspaces/blockscout/erigon.yml deleted file mode 100644 index c91739d0..00000000 --- a/workspaces/blockscout/erigon.yml +++ /dev/null @@ -1,87 +0,0 @@ -version: '3.9' - -services: - redis-db: - extends: - file: ./services/redis.yml - service: redis-db - - db-init: - extends: - file: ./services/db.yml - service: db-init - - db: - depends_on: - db-init: - condition: service_completed_successfully - extends: - file: ./services/db.yml - service: db - - backend: - depends_on: - - db - - redis-db - extends: - file: ./services/backend.yml - service: backend - links: - - db:database - environment: - ETHEREUM_JSONRPC_VARIANT: 'erigon' - - visualizer: - extends: - file: ./services/visualizer.yml - service: visualizer - - sig-provider: - extends: - file: ./services/sig-provider.yml - service: sig-provider - - frontend: - depends_on: - - backend - extends: - file: ./services/frontend.yml - service: frontend - - stats-db-init: - extends: - file: ./services/stats.yml - service: stats-db-init - - stats-db: - depends_on: - stats-db-init: - condition: service_completed_successfully - extends: - file: ./services/stats.yml - service: stats-db - - stats: - depends_on: - - stats-db - - backend - extends: - file: ./services/stats.yml - service: stats - - user-ops-indexer: - depends_on: - - db - - backend - extends: - file: ./services/user-ops-indexer.yml - service: user-ops-indexer - - proxy: - depends_on: - - backend - - frontend - - stats - extends: - file: ./services/nginx.yml - service: proxy diff --git a/workspaces/blockscout/external-backend.yml b/workspaces/blockscout/external-backend.yml deleted file mode 100644 index 4dd9e285..00000000 --- a/workspaces/blockscout/external-backend.yml +++ /dev/null @@ -1,70 +0,0 @@ -version: '3.9' - -services: - redis-db: - extends: - file: ./services/redis.yml - service: redis-db - - db-init: - extends: - file: ./services/db.yml - service: db-init - - db: - depends_on: - db-init: - condition: service_completed_successfully - extends: - file: ./services/db.yml - service: db - - visualizer: - extends: - file: ./services/visualizer.yml - service: visualizer - - sig-provider: - extends: - file: ./services/sig-provider.yml - service: sig-provider - - frontend: - extends: - file: ./services/frontend.yml - service: frontend - - stats-db-init: - extends: - file: ./services/stats.yml - service: stats-db-init - - stats-db: - depends_on: - stats-db-init: - condition: service_completed_successfully - extends: - file: ./services/stats.yml - service: stats-db - - stats: - depends_on: - - stats-db - extends: - file: ./services/stats.yml - service: stats - - user-ops-indexer: - depends_on: - - db - extends: - file: ./services/user-ops-indexer.yml - service: user-ops-indexer - - proxy: - depends_on: - - frontend - - stats - extends: - file: ./services/nginx.yml - service: proxy diff --git a/workspaces/blockscout/external-db.yml b/workspaces/blockscout/external-db.yml deleted file mode 100644 index 082927be..00000000 --- a/workspaces/blockscout/external-db.yml +++ /dev/null @@ -1,70 +0,0 @@ -version: '3.9' - -services: - redis-db: - extends: - file: ./services/redis.yml - service: redis-db - - backend: - depends_on: - - redis-db - extends: - file: ./services/backend.yml - service: backend - environment: - ETHEREUM_JSONRPC_VARIANT: 'geth' - - visualizer: - extends: - file: ./services/visualizer.yml - service: visualizer - - sig-provider: - extends: - file: ./services/sig-provider.yml - service: sig-provider - - frontend: - depends_on: - - backend - extends: - file: ./services/frontend.yml - service: frontend - - stats-db-init: - extends: - file: ./services/stats.yml - service: stats-db-init - - stats-db: - depends_on: - stats-db-init: - condition: service_completed_successfully - extends: - file: ./services/stats.yml - service: stats-db - - stats: - depends_on: - - stats-db - - backend - extends: - file: ./services/stats.yml - service: stats - - user-ops-indexer: - depends_on: - - backend - extends: - file: ./services/user-ops-indexer.yml - service: user-ops-indexer - - proxy: - depends_on: - - backend - - frontend - - stats - extends: - file: ./services/nginx.yml - service: proxy diff --git a/workspaces/blockscout/external-frontend.yml b/workspaces/blockscout/external-frontend.yml deleted file mode 100644 index 81f4a5c7..00000000 --- a/workspaces/blockscout/external-frontend.yml +++ /dev/null @@ -1,83 +0,0 @@ -version: '3.9' - -services: - redis-db: - extends: - file: ./services/redis.yml - service: redis-db - - db-init: - extends: - file: ./services/db.yml - service: db-init - - db: - depends_on: - db-init: - condition: service_completed_successfully - extends: - file: ./services/db.yml - service: db - - backend: - depends_on: - - db - - redis-db - extends: - file: ./services/backend.yml - service: backend - links: - - db:database - environment: - ETHEREUM_JSONRPC_VARIANT: 'ganache' - ETHEREUM_JSONRPC_WS_URL: ws://execution:8545/ - INDEXER_DISABLE_INTERNAL_TRANSACTIONS_FETCHER: 'true' - INDEXER_DISABLE_PENDING_TRANSACTIONS_FETCHER: 'true' - CHAIN_ID: '1337' - - visualizer: - extends: - file: ./services/visualizer.yml - service: visualizer - - sig-provider: - extends: - file: ./services/sig-provider.yml - service: sig-provider - - stats-db-init: - extends: - file: ./services/stats.yml - service: stats-db-init - - stats-db: - depends_on: - stats-db-init: - condition: service_completed_successfully - extends: - file: ./services/stats.yml - service: stats-db - - stats: - depends_on: - - stats-db - - backend - extends: - file: ./services/stats.yml - service: stats - - user-ops-indexer: - depends_on: - - db - - backend - extends: - file: ./services/user-ops-indexer.yml - service: user-ops-indexer - - proxy: - depends_on: - - backend - - stats - extends: - file: ./services/nginx.yml - service: proxy diff --git a/workspaces/blockscout/ganache.yml b/workspaces/blockscout/ganache.yml deleted file mode 100644 index c9dbcc00..00000000 --- a/workspaces/blockscout/ganache.yml +++ /dev/null @@ -1,94 +0,0 @@ -version: '3.9' - -services: - redis-db: - extends: - file: ./services/redis.yml - service: redis-db - - db-init: - extends: - file: ./services/db.yml - service: db-init - - db: - depends_on: - db-init: - condition: service_completed_successfully - extends: - file: ./services/db.yml - service: db - - backend: - depends_on: - - db - - redis-db - extends: - file: ./services/backend.yml - service: backend - links: - - db:database - environment: - ETHEREUM_JSONRPC_VARIANT: 'ganache' - ETHEREUM_JSONRPC_WS_URL: ws://execution:8545/ - INDEXER_DISABLE_INTERNAL_TRANSACTIONS_FETCHER: 'true' - INDEXER_DISABLE_PENDING_TRANSACTIONS_FETCHER: 'true' - CHAIN_ID: '1337' - - visualizer: - extends: - file: ./services/visualizer.yml - service: visualizer - - sig-provider: - extends: - file: ./services/sig-provider.yml - service: sig-provider - - frontend: - depends_on: - - backend - extends: - file: ./services/frontend.yml - service: frontend - environment: - NEXT_PUBLIC_NETWORK_ID: '1337' - NEXT_PUBLIC_NETWORK_RPC_URL: http://execution:8545/ - - stats-db-init: - extends: - file: ./services/stats.yml - service: stats-db-init - - stats-db: - depends_on: - stats-db-init: - condition: service_completed_successfully - extends: - file: ./services/stats.yml - service: stats-db - - stats: - depends_on: - - stats-db - - backend - extends: - file: ./services/stats.yml - service: stats - - user-ops-indexer: - depends_on: - - db - - backend - extends: - file: ./services/user-ops-indexer.yml - service: user-ops-indexer - - proxy: - depends_on: - - backend - - frontend - - stats - extends: - file: ./services/nginx.yml - service: proxy diff --git a/workspaces/blockscout/geth-clique-consensus.yml b/workspaces/blockscout/geth-clique-consensus.yml deleted file mode 100644 index 75a0b457..00000000 --- a/workspaces/blockscout/geth-clique-consensus.yml +++ /dev/null @@ -1,88 +0,0 @@ -version: '3.9' - -services: - redis-db: - extends: - file: ./services/redis.yml - service: redis-db - - db-init: - extends: - file: ./services/db.yml - service: db-init - - db: - depends_on: - db-init: - condition: service_completed_successfully - extends: - file: ./services/db.yml - service: db - - backend: - depends_on: - - db - - redis-db - extends: - file: ./services/backend.yml - service: backend - links: - - db:database - environment: - ETHEREUM_JSONRPC_VARIANT: 'geth' - BLOCK_TRANSFORMER: 'clique' - - visualizer: - extends: - file: ./services/visualizer.yml - service: visualizer - - sig-provider: - extends: - file: ./services/sig-provider.yml - service: sig-provider - - frontend: - depends_on: - - backend - extends: - file: ./services/frontend.yml - service: frontend - - stats-db-init: - extends: - file: ./services/stats.yml - service: stats-db-init - - stats-db: - depends_on: - stats-db-init: - condition: service_completed_successfully - extends: - file: ./services/stats.yml - service: stats-db - - stats: - depends_on: - - stats-db - - backend - extends: - file: ./services/stats.yml - service: stats - - user-ops-indexer: - depends_on: - - db - - backend - extends: - file: ./services/user-ops-indexer.yml - service: user-ops-indexer - - proxy: - depends_on: - - backend - - frontend - - stats - extends: - file: ./services/nginx.yml - service: proxy diff --git a/workspaces/blockscout/geth-frontend.yml b/workspaces/blockscout/geth-frontend.yml deleted file mode 100644 index 1f5c10da..00000000 --- a/workspaces/blockscout/geth-frontend.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: '3.9' - -networks: - devnet: - name: ${DOCKER_NETWORK_NAME} - external: true - blockscout: - name: blockscout-${DOCKER_NETWORK_NAME} - driver: bridge - -services: - frontend: - extends: - file: ./services/frontend.yml - service: frontend - environment: - NEXT_PUBLIC_API_HOST: ${NEXT_PUBLIC_API_HOST} - NEXT_PUBLIC_APP_HOST: ${NEXT_PUBLIC_API_HOST} - -volumes: - redis-data: - blockscout-db-data: - stats-db-data: - logs-data: diff --git a/workspaces/blockscout/geth.yml b/workspaces/blockscout/geth.yml deleted file mode 100644 index 654a59f7..00000000 --- a/workspaces/blockscout/geth.yml +++ /dev/null @@ -1,107 +0,0 @@ -version: '3.9' - -networks: - devnet: - name: ${DOCKER_NETWORK_NAME} - external: true - blockscout: - name: blockscout-${DOCKER_NETWORK_NAME} - driver: bridge - -services: - redis-db: - extends: - file: ./services/redis.yml - service: redis-db - - db-init: - extends: - file: ./services/db.yml - service: db-init - - sc-verifier: - extends: - file: ./services/smart-contract-verifier.yml - service: smart-contract-verifier - - db: - depends_on: - db-init: - condition: service_completed_successfully - extends: - file: ./services/db.yml - service: db - - backend: - depends_on: - - db - - redis-db - extends: - file: ./services/backend.yml - service: backend - links: - - db:database - environment: - ETHEREUM_JSONRPC_VARIANT: 'geth' - ETHEREUM_JSONRPC_WS_URL: ${BLOCKSCOUT_WS_RPC_URL} - ETHEREUM_JSONRPC_TRACE_URL: ${BLOCKSCOUT_RPC_URL} - ETHEREUM_JSONRPC_HTTP_URL: ${BLOCKSCOUT_RPC_URL} - - visualizer: - extends: - file: ./services/visualizer.yml - service: visualizer - - sig-provider: - extends: - file: ./services/sig-provider.yml - service: sig-provider - frontend: - extends: - file: ./services/frontend.yml - service: frontend - environment: - NEXT_PUBLIC_API_HOST: ${NEXT_PUBLIC_API_HOST} - NEXT_PUBLIC_APP_HOST: ${NEXT_PUBLIC_API_HOST} - stats-db-init: - extends: - file: ./services/stats.yml - service: stats-db-init - - stats-db: - depends_on: - stats-db-init: - condition: service_completed_successfully - extends: - file: ./services/stats.yml - service: stats-db - - stats: - depends_on: - - stats-db - - backend - extends: - file: ./services/stats.yml - service: stats - - user-ops-indexer: - depends_on: - - db - - backend - extends: - file: ./services/user-ops-indexer.yml - service: user-ops-indexer - - proxy: - depends_on: - - backend - - stats - extends: - file: ./services/nginx.yml - service: proxy - -volumes: - redis-data: - blockscout-db-data: - stats-db-data: - logs-data: diff --git a/workspaces/blockscout/hardhat-network.yml b/workspaces/blockscout/hardhat-network.yml deleted file mode 100644 index 49d5c10f..00000000 --- a/workspaces/blockscout/hardhat-network.yml +++ /dev/null @@ -1,94 +0,0 @@ -version: '3.9' - -services: - redis-db: - extends: - file: ./services/redis.yml - service: redis-db - - db-init: - extends: - file: ./services/db.yml - service: db-init - - db: - depends_on: - db-init: - condition: service_completed_successfully - extends: - file: ./services/db.yml - service: db - - backend: - depends_on: - - db - - redis-db - extends: - file: ./services/backend.yml - service: backend - links: - - db:database - environment: - ETHEREUM_JSONRPC_VARIANT: 'geth' - ETHEREUM_JSONRPC_WS_URL: ws://execution:8546/ - INDEXER_DISABLE_PENDING_TRANSACTIONS_FETCHER: 'true' - INDEXER_INTERNAL_TRANSACTIONS_TRACER_TYPE: 'opcode' - CHAIN_ID: '31337' - - visualizer: - extends: - file: ./services/visualizer.yml - service: visualizer - - sig-provider: - extends: - file: ./services/sig-provider.yml - service: sig-provider - - frontend: - depends_on: - - backend - extends: - file: ./services/frontend.yml - service: frontend - environment: - NEXT_PUBLIC_NETWORK_ID: '31337' - NEXT_PUBLIC_NETWORK_RPC_URL: http://execution:8545/ - - stats-db-init: - extends: - file: ./services/stats.yml - service: stats-db-init - - stats-db: - depends_on: - stats-db-init: - condition: service_completed_successfully - extends: - file: ./services/stats.yml - service: stats-db - - stats: - depends_on: - - stats-db - - backend - extends: - file: ./services/stats.yml - service: stats - - user-ops-indexer: - depends_on: - - db - - backend - extends: - file: ./services/user-ops-indexer.yml - service: user-ops-indexer - - proxy: - depends_on: - - backend - - frontend - - stats - extends: - file: ./services/nginx.yml - service: proxy diff --git a/workspaces/blockscout/microservices.yml b/workspaces/blockscout/microservices.yml deleted file mode 100644 index 58a52232..00000000 --- a/workspaces/blockscout/microservices.yml +++ /dev/null @@ -1,57 +0,0 @@ -version: '3.9' - -services: - visualizer: - extends: - file: ./services/visualizer.yml - service: visualizer - - sig-provider: - extends: - file: ./services/sig-provider.yml - service: sig-provider - ports: - - 8083:8050 - - - sc-verifier: - extends: - file: ./services/smart-contract-verifier.yml - service: smart-contract-verifier - ports: - - 0:8050 - - stats-db-init: - extends: - file: ./services/stats.yml - service: stats-db-init - - stats-db: - depends_on: - stats-db-init: - condition: service_completed_successfully - extends: - file: ./services/stats.yml - service: stats-db - - stats: - depends_on: - - stats-db - extends: - file: ./services/stats.yml - service: stats - - user-ops-indexer: - extends: - file: ./services/user-ops-indexer.yml - service: user-ops-indexer - - proxy: - depends_on: - - visualizer - - stats - extends: - file: ./services/nginx.yml - service: proxy - volumes: - - "./proxy/microservices.conf.template:/etc/nginx/templates/default.conf.template" diff --git a/workspaces/blockscout/no-services.yml b/workspaces/blockscout/no-services.yml deleted file mode 100644 index 410ba67c..00000000 --- a/workspaces/blockscout/no-services.yml +++ /dev/null @@ -1,66 +0,0 @@ -version: '3.9' - -services: - redis-db: - extends: - file: ./services/redis.yml - service: redis-db - - db-init: - extends: - file: ./services/db.yml - service: db-init - - db: - depends_on: - db-init: - condition: service_completed_successfully - extends: - file: ./services/db.yml - service: db - - backend: - depends_on: - - db - - redis-db - extends: - file: ./services/backend.yml - service: backend - build: - context: .. - dockerfile: ./docker/Dockerfile - args: - CACHE_EXCHANGE_RATES_PERIOD: "" - API_V1_READ_METHODS_DISABLED: "false" - DISABLE_WEBAPP: "false" - API_V1_WRITE_METHODS_DISABLED: "false" - CACHE_TOTAL_GAS_USAGE_COUNTER_ENABLED: "" - CACHE_ADDRESS_WITH_BALANCES_UPDATE_INTERVAL: "" - ADMIN_PANEL_ENABLED: "" - RELEASE_VERSION: 6.9.0 - links: - - db:database - environment: - ETHEREUM_JSONRPC_HTTP_URL: http://execution:8545/ - ETHEREUM_JSONRPC_TRACE_URL: http://execution:8545/ - ETHEREUM_JSONRPC_WS_URL: ws://execution:8546/ - CHAIN_ID: '1337' - - frontend: - depends_on: - - backend - extends: - file: ./services/frontend.yml - service: frontend - environment: - NEXT_PUBLIC_STATS_API_HOST: - - proxy: - depends_on: - - backend - - frontend - extends: - file: ./services/nginx-explorer.yml - service: proxy - volumes: - - "./proxy/explorer.conf.template:/etc/nginx/templates/default.conf.template" diff --git a/workspaces/blockscout/proxy/default.conf.template b/workspaces/blockscout/proxy/default.conf.template deleted file mode 100644 index cebe8a83..00000000 --- a/workspaces/blockscout/proxy/default.conf.template +++ /dev/null @@ -1,93 +0,0 @@ -map $http_upgrade $connection_upgrade { - - default upgrade; - '' close; -} - -server { - listen 80; - server_name localhost; - proxy_http_version 1.1; - - location ~ ^/(api(?!-docs$)|socket|sitemap.xml|auth/auth0|auth/auth0/callback|auth/logout) { - proxy_pass ${BACK_PROXY_PASS}; - proxy_http_version 1.1; - proxy_set_header Host "$host"; - proxy_set_header X-Real-IP "$remote_addr"; - proxy_set_header X-Forwarded-For "$proxy_add_x_forwarded_for"; - proxy_set_header X-Forwarded-Proto "$scheme"; - proxy_set_header Upgrade "$http_upgrade"; - proxy_set_header Connection $connection_upgrade; - proxy_cache_bypass $http_upgrade; - } - location / { - proxy_pass ${FRONT_PROXY_PASS}; - proxy_http_version 1.1; - proxy_set_header Host "$host"; - proxy_set_header X-Real-IP "$remote_addr"; - proxy_set_header X-Forwarded-For "$proxy_add_x_forwarded_for"; - proxy_set_header X-Forwarded-Proto "$scheme"; - proxy_set_header Upgrade "$http_upgrade"; - proxy_set_header Connection $connection_upgrade; - proxy_cache_bypass $http_upgrade; - } -} -server { - listen 8080; - server_name localhost; - proxy_http_version 1.1; - proxy_hide_header Access-Control-Allow-Origin; - proxy_hide_header Access-Control-Allow-Methods; - add_header 'Access-Control-Allow-Origin' 'http://localhost' always; - add_header 'Access-Control-Allow-Credentials' 'true' always; - add_header 'Access-Control-Allow-Methods' 'PUT, GET, POST, OPTIONS, DELETE, PATCH' always; - - location / { - proxy_pass http://stats:8050/; - proxy_http_version 1.1; - proxy_set_header Host "$host"; - proxy_set_header X-Real-IP "$remote_addr"; - proxy_set_header X-Forwarded-For "$proxy_add_x_forwarded_for"; - proxy_set_header X-Forwarded-Proto "$scheme"; - proxy_set_header Upgrade "$http_upgrade"; - proxy_set_header Connection $connection_upgrade; - proxy_cache_bypass $http_upgrade; - } -} -server { - listen 8081; - server_name localhost; - proxy_http_version 1.1; - proxy_hide_header Access-Control-Allow-Origin; - proxy_hide_header Access-Control-Allow-Methods; - add_header 'Access-Control-Allow-Origin' 'http://localhost' always; - add_header 'Access-Control-Allow-Credentials' 'true' always; - add_header 'Access-Control-Allow-Methods' 'PUT, GET, POST, OPTIONS, DELETE, PATCH' always; - add_header 'Access-Control-Allow-Headers' 'DNT,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,x-csrf-token' always; - - location / { - proxy_pass http://visualizer:8050/; - proxy_http_version 1.1; - proxy_buffering off; - proxy_set_header Host "$host"; - proxy_set_header X-Real-IP "$remote_addr"; - proxy_connect_timeout 30m; - proxy_read_timeout 30m; - proxy_send_timeout 30m; - proxy_set_header X-Forwarded-For "$proxy_add_x_forwarded_for"; - proxy_set_header X-Forwarded-Proto "$scheme"; - proxy_set_header Upgrade "$http_upgrade"; - proxy_set_header Connection $connection_upgrade; - proxy_cache_bypass $http_upgrade; - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' 'http://localhost' always; - add_header 'Access-Control-Allow-Credentials' 'true' always; - add_header 'Access-Control-Allow-Methods' 'PUT, GET, POST, OPTIONS, DELETE, PATCH' always; - add_header 'Access-Control-Allow-Headers' 'DNT,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,x-csrf-token' always; - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } - } -} \ No newline at end of file diff --git a/workspaces/blockscout/proxy/explorer.conf.template b/workspaces/blockscout/proxy/explorer.conf.template deleted file mode 100644 index 7cc794ed..00000000 --- a/workspaces/blockscout/proxy/explorer.conf.template +++ /dev/null @@ -1,34 +0,0 @@ -map $http_upgrade $connection_upgrade { - - default upgrade; - '' close; -} - -server { - listen 80; - server_name localhost; - proxy_http_version 1.1; - - location ~ ^/(api(?!-docs$)|socket|sitemap.xml|auth/auth0|auth/auth0/callback|auth/logout) { - proxy_pass ${BACK_PROXY_PASS}; - proxy_http_version 1.1; - proxy_set_header Host "$host"; - proxy_set_header X-Real-IP "$remote_addr"; - proxy_set_header X-Forwarded-For "$proxy_add_x_forwarded_for"; - proxy_set_header X-Forwarded-Proto "$scheme"; - proxy_set_header Upgrade "$http_upgrade"; - proxy_set_header Connection $connection_upgrade; - proxy_cache_bypass $http_upgrade; - } - location / { - proxy_pass ${FRONT_PROXY_PASS}; - proxy_http_version 1.1; - proxy_set_header Host "$host"; - proxy_set_header X-Real-IP "$remote_addr"; - proxy_set_header X-Forwarded-For "$proxy_add_x_forwarded_for"; - proxy_set_header X-Forwarded-Proto "$scheme"; - proxy_set_header Upgrade "$http_upgrade"; - proxy_set_header Connection $connection_upgrade; - proxy_cache_bypass $http_upgrade; - } -} \ No newline at end of file diff --git a/workspaces/blockscout/proxy/microservices.conf.template b/workspaces/blockscout/proxy/microservices.conf.template deleted file mode 100644 index 708812f5..00000000 --- a/workspaces/blockscout/proxy/microservices.conf.template +++ /dev/null @@ -1,65 +0,0 @@ -map $http_upgrade $connection_upgrade { - - default upgrade; - '' close; -} - -server { - listen 8080; - server_name localhost; - proxy_http_version 1.1; - proxy_hide_header Access-Control-Allow-Origin; - proxy_hide_header Access-Control-Allow-Methods; - add_header 'Access-Control-Allow-Origin' 'http://localhost:3000' always; - add_header 'Access-Control-Allow-Credentials' 'true' always; - add_header 'Access-Control-Allow-Methods' 'PUT, GET, POST, OPTIONS, DELETE, PATCH' always; - - location / { - proxy_pass http://stats:8050/; - proxy_http_version 1.1; - proxy_set_header Host "$host"; - proxy_set_header X-Real-IP "$remote_addr"; - proxy_set_header X-Forwarded-For "$proxy_add_x_forwarded_for"; - proxy_set_header X-Forwarded-Proto "$scheme"; - proxy_set_header Upgrade "$http_upgrade"; - proxy_set_header Connection $connection_upgrade; - proxy_cache_bypass $http_upgrade; - } -} -server { - listen 8081; - server_name localhost; - proxy_http_version 1.1; - proxy_hide_header Access-Control-Allow-Origin; - proxy_hide_header Access-Control-Allow-Methods; - add_header 'Access-Control-Allow-Origin' 'http://localhost:3000' always; - add_header 'Access-Control-Allow-Credentials' 'true' always; - add_header 'Access-Control-Allow-Methods' 'PUT, GET, POST, OPTIONS, DELETE, PATCH' always; - add_header 'Access-Control-Allow-Headers' 'DNT,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,x-csrf-token' always; - - location / { - proxy_pass http://visualizer:8050/; - proxy_http_version 1.1; - proxy_buffering off; - proxy_set_header Host "$host"; - proxy_set_header X-Real-IP "$remote_addr"; - proxy_connect_timeout 30m; - proxy_read_timeout 30m; - proxy_send_timeout 30m; - proxy_set_header X-Forwarded-For "$proxy_add_x_forwarded_for"; - proxy_set_header X-Forwarded-Proto "$scheme"; - proxy_set_header Upgrade "$http_upgrade"; - proxy_set_header Connection $connection_upgrade; - proxy_cache_bypass $http_upgrade; - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' 'http://localhost:3000' always; - add_header 'Access-Control-Allow-Credentials' 'true' always; - add_header 'Access-Control-Allow-Methods' 'PUT, GET, POST, OPTIONS, DELETE, PATCH' always; - add_header 'Access-Control-Allow-Headers' 'DNT,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,x-csrf-token' always; - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } - } -} \ No newline at end of file diff --git a/workspaces/blockscout/services/backend.yml b/workspaces/blockscout/services/backend.yml deleted file mode 100644 index 7d1f8971..00000000 --- a/workspaces/blockscout/services/backend.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: '3.9' - -networks: - devnet: - name: ${DOCKER_NETWORK_NAME} - external: true - blockscout: - name: blockscout-${DOCKER_NETWORK_NAME} - external: true - -services: - backend: - image: blockscout/${DOCKER_REPO:-blockscout}:${DOCKER_TAG:-latest} - - restart: always - stop_grace_period: 5m - command: sh -c "bin/blockscout eval \"Elixir.Explorer.ReleaseTasks.create_and_migrate()\" && bin/blockscout start" - env_file: - - ../envs/common-blockscout.env - networks: - - blockscout - - devnet - volumes: - - logs-data:/app/logs/ diff --git a/workspaces/blockscout/services/db.yml b/workspaces/blockscout/services/db.yml deleted file mode 100644 index a19bdbeb..00000000 --- a/workspaces/blockscout/services/db.yml +++ /dev/null @@ -1,44 +0,0 @@ -version: '3.9' - -networks: - devnet: - name: ${DOCKER_NETWORK_NAME} - external: true - blockscout: - name: blockscout-${DOCKER_NETWORK_NAME} - external: true - -services: - db-init: - image: postgres:15 - volumes: - - blockscout-db-data:/var/lib/postgresql/data - entrypoint: - - sh - - -c - - | - chown -R 2000:2000 /var/lib/postgresql/data - - db: - image: postgres:15 - user: 2000:2000 - shm_size: 256m - restart: always - command: postgres -c 'max_connections=200' -c 'client_connection_check_interval=60000' - environment: - POSTGRES_DB: 'blockscout' - POSTGRES_USER: 'blockscout' - POSTGRES_PASSWORD: 'ceWb1MeLBEeOIfk65gU8EjF8' - ports: - - target: 5432 - published: 0 - networks: - - blockscout - volumes: - - blockscout-db-data:/var/lib/postgresql/data - healthcheck: - test: ["CMD-SHELL", "pg_isready -U blockscout -d blockscout"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 10s diff --git a/workspaces/blockscout/services/frontend.yml b/workspaces/blockscout/services/frontend.yml deleted file mode 100644 index 5f017e7c..00000000 --- a/workspaces/blockscout/services/frontend.yml +++ /dev/null @@ -1,20 +0,0 @@ -version: '3.9' - -networks: - devnet: - name: ${DOCKER_NETWORK_NAME} - external: true - blockscout: - name: blockscout-${DOCKER_NETWORK_NAME} - external: true - -services: - frontend: - image: ghcr.io/blockscout/frontend:${FRONTEND_DOCKER_TAG:-latest} - - platform: linux/amd64 - restart: always - networks: - - blockscout - env_file: - - ../envs/common-frontend.env diff --git a/workspaces/blockscout/services/nginx-explorer.yml b/workspaces/blockscout/services/nginx-explorer.yml deleted file mode 100644 index 0c06cc56..00000000 --- a/workspaces/blockscout/services/nginx-explorer.yml +++ /dev/null @@ -1,22 +0,0 @@ -version: '3.9' - -networks: - devnet: - name: ${DOCKER_NETWORK_NAME} - external: true - blockscout: - name: blockscout-${DOCKER_NETWORK_NAME} - external: true - -services: - proxy: - image: nginx - networks: - - blockscout - - devnet - environment: - BACK_PROXY_PASS: ${BACK_PROXY_PASS:-http://backend:4000} - FRONT_PROXY_PASS: ${FRONT_PROXY_PASS:-http://frontend:3000} - ports: - - target: 80 - published: 80 diff --git a/workspaces/blockscout/services/nginx.yml b/workspaces/blockscout/services/nginx.yml deleted file mode 100644 index 9801610c..00000000 --- a/workspaces/blockscout/services/nginx.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: '3.9' - -networks: - devnet: - name: ${DOCKER_NETWORK_NAME} - external: true - blockscout: - name: blockscout-${DOCKER_NETWORK_NAME} - external: true - -services: - proxy: - image: nginx - restart: unless-stopped - volumes: - - "../proxy:/etc/nginx/templates" - environment: - BACK_PROXY_PASS: ${BACK_PROXY_PASS:-http://backend:4000} - FRONT_PROXY_PASS: ${FRONT_PROXY_PASS:-http://frontend:3000} - networks: - - blockscout - - devnet - ports: - - target: 80 - published: 0 - # - target: 8080 - # published: 0 - # - target: 8081 - # published: 8781 diff --git a/workspaces/blockscout/services/redis.yml b/workspaces/blockscout/services/redis.yml deleted file mode 100644 index 0d04ee83..00000000 --- a/workspaces/blockscout/services/redis.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: '3.9' - -networks: - devnet: - name: ${DOCKER_NETWORK_NAME} - external: true - blockscout: - name: blockscout-${DOCKER_NETWORK_NAME} - external: true - -services: - redis-db: - image: 'redis:alpine' - command: redis-server - networks: - - blockscout - volumes: - - redis-data:/data diff --git a/workspaces/blockscout/services/sig-provider.yml b/workspaces/blockscout/services/sig-provider.yml deleted file mode 100644 index fee97ff3..00000000 --- a/workspaces/blockscout/services/sig-provider.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: '3.9' - -networks: - devnet: - name: ${DOCKER_NETWORK_NAME} - external: true - blockscout: - name: blockscout-${DOCKER_NETWORK_NAME} - external: true - -services: - sig-provider: - image: ghcr.io/blockscout/sig-provider:${SIG_PROVIDER_DOCKER_TAG:-latest} - - platform: linux/amd64 - restart: always - networks: - - blockscout diff --git a/workspaces/blockscout/services/smart-contract-verifier.yml b/workspaces/blockscout/services/smart-contract-verifier.yml deleted file mode 100644 index a43d9ece..00000000 --- a/workspaces/blockscout/services/smart-contract-verifier.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: '3.9' - -networks: - devnet: - name: ${DOCKER_NETWORK_NAME} - external: true - blockscout: - name: blockscout-${DOCKER_NETWORK_NAME} - external: true - -services: - smart-contract-verifier: - image: ghcr.io/blockscout/smart-contract-verifier:${SMART_CONTRACT_VERIFIER_DOCKER_TAG:-latest} - - platform: linux/amd64 - restart: always - networks: - - devnet - - blockscout - env_file: - - ../envs/common-smart-contract-verifier.env diff --git a/workspaces/blockscout/services/stats.yml b/workspaces/blockscout/services/stats.yml deleted file mode 100644 index 10240a15..00000000 --- a/workspaces/blockscout/services/stats.yml +++ /dev/null @@ -1,60 +0,0 @@ -version: '3.9' - -networks: - devnet: - name: ${DOCKER_NETWORK_NAME} - external: true - blockscout: - name: blockscout-${DOCKER_NETWORK_NAME} - external: true - -services: - stats-db-init: - image: postgres:15 - volumes: - - stats-db-data:/var/lib/postgresql/data - entrypoint: - - sh - - -c - - | - chown -R 2000:2000 /var/lib/postgresql/data - - stats-db: - image: postgres:15 - user: 2000:2000 - shm_size: 256m - restart: always - command: postgres -c 'max_connections=200' - environment: - POSTGRES_DB: 'stats' - POSTGRES_USER: 'stats' - POSTGRES_PASSWORD: 'n0uejXPl61ci6ldCuE2gQU5Y' - ports: - - target: 5433 - published: 0 - networks: - - blockscout - volumes: - - stats-db-data:/var/lib/postgresql/data - healthcheck: - test: ["CMD-SHELL", "pg_isready -U stats -d stats"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 10s - - stats: - image: ghcr.io/blockscout/stats:${STATS_DOCKER_TAG:-latest} - - platform: linux/amd64 - restart: always - networks: - - blockscout - - devnet - env_file: - - ../envs/common-stats.env - environment: - - STATS__DB_URL=${STATS__DB_URL:-postgres://stats:n0uejXPl61ci6ldCuE2gQU5Y@stats-db:5433/stats} - - STATS__BLOCKSCOUT_DB_URL=${STATS__BLOCKSCOUT_DB_URL:-postgresql://blockscout:ceWb1MeLBEeOIfk65gU8EjF8@db:5433/blockscout} - - STATS__CREATE_DATABASE=${STATS__CREATE_DATABASE:-true} - - STATS__RUN_MIGRATIONS=${STATS__RUN_MIGRATIONS:-true} diff --git a/workspaces/blockscout/services/user-ops-indexer.yml b/workspaces/blockscout/services/user-ops-indexer.yml deleted file mode 100644 index aaafaaf7..00000000 --- a/workspaces/blockscout/services/user-ops-indexer.yml +++ /dev/null @@ -1,25 +0,0 @@ -version: '3.9' - -networks: - devnet: - name: ${DOCKER_NETWORK_NAME} - external: true - blockscout: - name: blockscout-${DOCKER_NETWORK_NAME} - external: true - -services: - user-ops-indexer: - image: ghcr.io/blockscout/user-ops-indexer:${USER_OPS_INDEXER_DOCKER_TAG:-latest} - - platform: linux/amd64 - restart: always - networks: - - blockscout - - devnet - env_file: - - ../envs/common-user-ops-indexer.env - environment: - - USER_OPS_INDEXER__INDEXER__RPC_URL=${USER_OPS_INDEXER__INDEXER__RPC_URL:-ws://execution:8546/} - - USER_OPS_INDEXER__DATABASE__CONNECT__URL=${USER_OPS_INDEXER__DATABASE__CONNECT__URL:-postgresql://blockscout:ceWb1MeLBEeOIfk65gU8EjF8@db:5432/blockscout} - - USER_OPS_INDEXER__DATABASE__RUN_MIGRATIONS=true diff --git a/workspaces/blockscout/services/visualizer.yml b/workspaces/blockscout/services/visualizer.yml deleted file mode 100644 index 0ff0424b..00000000 --- a/workspaces/blockscout/services/visualizer.yml +++ /dev/null @@ -1,20 +0,0 @@ -version: '3.9' - -networks: - devnet: - name: ${DOCKER_NETWORK_NAME} - external: true - blockscout: - name: blockscout-${DOCKER_NETWORK_NAME} - external: true - -services: - visualizer: - image: ghcr.io/blockscout/visualizer:${VISUALIZER_DOCKER_TAG:-latest} - - platform: linux/amd64 - restart: always - networks: - - blockscout - env_file: - - ../envs/common-visualizer.env diff --git a/workspaces/blockscout/verification/Makefile b/workspaces/blockscout/verification/Makefile new file mode 100644 index 00000000..4dd5ffc6 --- /dev/null +++ b/workspaces/blockscout/verification/Makefile @@ -0,0 +1,103 @@ +# Makefile for Helm Operations +# This Makefile provides convenient commands for managing Helm charts + +# Default namespace for Helm operations +NAMESPACE ?= test-namespace + +# Helm chart paths +HELM_CHART_ROOT_PATH ?= ../../../helm +HELM_CHART_PATH = $(HELM_CHART_ROOT_PATH)/vendor/blockscout-verification +HELM_VALUES_PATH ?= ./values.yaml + +ifneq ($(HELM_VALUES_PATH),) + HELM_VALUES = -f $(HELM_VALUES_PATH) +else + HELM_VALUES = +endif + +# Release names +HELM_RELEASE = blockscout-verifier + +# Default Helm timeout +TIMEOUT ?= 5m + +# Default Helm flags +HELM_DEBUG ?= false +ifeq ($(HELM_DEBUG), true) + HELM_DEBUG_FLAG = --debug +else + HELM_DEBUG_FLAG = +endif + +# Default values for chart versions (empty means latest) +HELM_CHART_VERSION ?= + +# Version flags (only add if version is specified) +ifneq ($(HELM_CHART_VERSION),) + HELM_VERSION_FLAG = --version $(HELM_CHART_VERSION) +else + HELM_VERSION_FLAG = +endif + +# Default ENV Variables specific to chart +# - + +# HELM overrides +# Provides a easy way to override values in helm chart by they path in YAML file +define HELM_CHART_VALUES_OVERRIDES + +endef + +# Lint Helm chart +.PHONY: debug +debug: + echo "\n" \ + echo "HELM_CHART_PATH=[$(HELM_CHART_PATH)]\n" && \ + echo "HELM_VALUES_PATH=[$(HELM_VALUES_PATH)]\n" && \ + echo "HELM_CHART_VALUES_OVERRIDES=[$(HELM_CHART_VALUES_OVERRIDES)]\n" + +# Lint Helm chart +.PHONY: lint +lint: + helm lint $(HELM_CHART_PATH) $(HELM_VALUES) $(HELM_CHART_VALUES_OVERRIDES) + +# Print rendered Helm chart templates to stdout +.PHONY: template +template: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) + +# Dry-run Helm chart install into K8s +.PHONY: dry-run +dry-run: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + | kubectl apply --dry-run=client -f - + +# Helm chart install into K8s +.PHONY: install +install: + helm install $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart upgrade existing installation +.PHONY: upgrade +upgrade: + helm upgrade $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart uninstall +.PHONY: uninstall +uninstall: + helm uninstall $(HELM_RELEASE) --namespace $(NAMESPACE) --ignore-not-found diff --git a/workspaces/blockscout/verification/values.yaml b/workspaces/blockscout/verification/values.yaml new file mode 100644 index 00000000..31c4e29c --- /dev/null +++ b/workspaces/blockscout/verification/values.yaml @@ -0,0 +1,288 @@ +# Default values for sig-provider. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +imagePullSecrets: [] +nameOverride: "blockscout" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Automatically mount a ServiceAccount's API credentials? + automount: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +sigProvider: + replicaCount: 1 + + image: + repository: ghcr.io/blockscout/sig-provider + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + + podAnnotations: {} + podLabels: {} + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + service: + type: ClusterIP + port: 8043 + + ingress: + enabled: false + className: "public" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hostname: verifier-devnet.testnet.fi + tls: + enabled: false + + env: [] + # NAME: VALUE + + resources: + limits: + memory: 256Mi + cpu: 250m + requests: + memory: 128Mi + cpu: 100m + + livenessProbe: + httpGet: + path: /health?service= + port: http + readinessProbe: + httpGet: + path: /health?service= + port: http + + # Additional volumes on the output Deployment definition. + volumes: [] + # - name: foo + # secret: + # secretName: mysecret + # optional: false + + # Additional volumeMounts on the output Deployment definition. + volumeMounts: [] + # - name: foo + # mountPath: "/etc/foo" + # readOnly: true + +smartContractVerifier: + replicaCount: 1 + + image: + repository: ghcr.io/blockscout/smart-contract-verifier + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + + podAnnotations: {} + podLabels: {} + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + service: + type: ClusterIP + port: 8050 + grpc: + enabled: false + port: 8051 + #GRPC ingress configuration + ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hostname: hr6vb82d1ndsx-blockscout-verifier.fusaka-verification-test.valset-03.testnet.fi + tls: + enabled: false + ingress: + enabled: true + className: "public" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hostname: smart-contract-verifier.local + tls: + enabled: false + + env: + SMART_CONTRACT_VERIFIER__SERVER__HTTP__MAX_BODY_SIZE: "2097152" + SMART_CONTRACT_VERIFIER__SERVER__GRPC__ENABLED: false + SMART_CONTRACT_VERIFIER__SERVER__GRPC__ADDR: "0.0.0.0:8051" + SMART_CONTRACT_VERIFIER__SOLIDITY__ENABLED: true + SMART_CONTRACT_VERIFIER__SOLIDITY__COMPILERS_DIR: "/tmp/solidity-compilers" + SMART_CONTRACT_VERIFIER__SOLIDITY__REFRESH_VERSIONS_SCHEDULE: "0 0 * * * * *" + # It depends on the OS you are running the service on + SMART_CONTRACT_VERIFIER__SOLIDITY__FETCHER__LIST__LIST_URL: https://solc-bin.ethereum.org/linux-amd64/list.json + #SMART_CONTRACT_VERIFIER__SOLIDITY__FETCHER__LIST__LIST_URL: https://solc-bin.ethereum.org/macosx-amd64/list.json + #SMART_CONTRACT_VERIFIER__SOLIDITY__FETCHER__LIST__LIST_URL: https://solc-bin.ethereum.org/windows-amd64/list.json + SMART_CONTRACT_VERIFIER__VYPER__ENABLED: true + SMART_CONTRACT_VERIFIER__VYPER__COMPILERS_DIR: "/tmp/vyper-compilers" + SMART_CONTRACT_VERIFIER__VYPER__REFRESH_VERSIONS_SCHEDULE: 0 0 * * * * * + # It depends on the OS you are running the service on + SMART_CONTRACT_VERIFIER__VYPER__FETCHER__LIST__LIST_URL: "https://raw.githubusercontent.com/blockscout/solc-bin/main/vyper.list.json" + #SMART_CONTRACT_VERIFIER__VYPER__FETCHER__LIST__LIST_URL: https://raw.githubusercontent.com/blockscout/solc-bin/main/vyper.macos.list.json + SMART_CONTRACT_VERIFIER__SOURCIFY__ENABLED: false + SMART_CONTRACT_VERIFIER__SOURCIFY__API_URL: "https://sourcify.dev/server/" + SMART_CONTRACT_VERIFIER__SOURCIFY__VERIFICATION_ATTEMPTS: 3 + SMART_CONTRACT_VERIFIER__SOURCIFY__REQUEST_TIMEOUT: 10 + SMART_CONTRACT_VERIFIER__METRICS__ENABLED: false + SMART_CONTRACT_VERIFIER__METRICS__ADDR: "0.0.0.0:6060" + SMART_CONTRACT_VERIFIER__METRICS__ROUTE: /metrics + SMART_CONTRACT_VERIFIER__JAEGER__ENABLED: false + SMART_CONTRACT_VERIFIER__JAEGER__AGENT_ENDPOINT: "localhost:6831" + MICROSERVICE_SC_VERIFIER_API_KEY: kek + # NAME: VALUE + envFromSecret: [] + # NAME: VALUE + + resources: + limits: + memory: "8Gi" + cpu: "1" + requests: + memory: "1Gi" + cpu: "250m" + + livenessProbe: + httpGet: + path: /health?service= + port: http + readinessProbe: + httpGet: + path: /health?service= + port: http + + # Additional volumes on the output Deployment definition. + volumes: [] + # - name: foo + # secret: + # secretName: mysecret + # optional: false + + # Additional volumeMounts on the output Deployment definition. + volumeMounts: [] + # - name: foo + # mountPath: "/etc/foo" + # readOnly: true +ethBytecodeDb: + enabled: false + replicaCount: 1 + + image: + repository: ghcr.io/blockscout/eth-bytecode-db + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + + podAnnotations: {} + podLabels: {} + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + service: + type: ClusterIP + port: 8050 + + ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hostname: eth-bytecode-db.local + tls: + enabled: false + + env: [] + # NAME: VALUE + envFromSecret: [] + # NAME: VALUE + resources: + limits: + memory: 1Gi + cpu: 500m + requests: + memory: 512Mi + cpu: 250m + + livenessProbe: + httpGet: + path: /health?service= + port: http + initialDelaySeconds: 100 + periodSeconds: 100 + timeoutSeconds: 30 + readinessProbe: + httpGet: + path: /health?service= + port: http + initialDelaySeconds: 100 + periodSeconds: 100 + timeoutSeconds: 30 + + # Additional volumes on the output Deployment definition. + volumes: [] + # - name: foo + # secret: + # secretName: mysecret + # optional: false + + # Additional volumeMounts on the output Deployment definition. + volumeMounts: [] + # - name: foo + # mountPath: "/etc/foo" + # readOnly: true + + +metrics: + enabled: false + port: 6060 + prometheus: + enabled: false + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/workspaces/council/Makefile b/workspaces/council/Makefile new file mode 100644 index 00000000..b4b68a9d --- /dev/null +++ b/workspaces/council/Makefile @@ -0,0 +1,118 @@ +# Makefile for Helm Operations +# This Makefile provides convenient commands for managing Helm charts + +# Default namespace for Helm operations +NAMESPACE ?= test-namespace + +# Helm chart paths +HELM_CHART_ROOT_PATH ?= ../../helm +HELM_CHART_PATH = $(HELM_CHART_ROOT_PATH)/lido/lido-council +HELM_VALUES_PATH ?= + +ifneq ($(HELM_VALUES_PATH),) + HELM_VALUES = -f $(HELM_VALUES_PATH) +else + HELM_VALUES = +endif +# Release names +HELM_RELEASE ?= council-1 + +# Default Helm timeout +TIMEOUT ?= 5m + +# Default Helm flags +HELM_DEBUG ?= true +ifeq ($(HELM_DEBUG), true) + HELM_DEBUG_FLAG = --debug +else + HELM_DEBUG_FLAG = +endif + +# Default values for chart versions (empty means latest) +HELM_CHART_VERSION ?= + +# Version flags (only add if version is specified) +ifneq ($(HELM_CHART_VERSION),) + HELM_VERSION_FLAG = --version $(HELM_CHART_VERSION) +else + HELM_VERSION_FLAG = +endif + +# Default ENV Variables specific to chart + +# HELM overrides +# Provides a easy way to override values in helm chart by they path in YAML file +define HELM_CHART_VALUES_OVERRIDES + --set lido-app.image.repository="${IMAGE}" \ + --set lido-app.image.tag="${TAG}" \ + --set lido-app.image.registry="${REGISTRY_HOSTNAME}" \ + --set lido-app.env.variables.PORT="${PORT}" \ + --set lido-app.env.variables.LOG_LEVEL="${LOG_LEVEL}" \ + --set lido-app.env.variables.LOG_FORMAT="${LOG_FORMAT}" \ + --set lido-app.env.variables.RPC_URL="${RPC_URL}" \ + --set lido-app.env.variables.WALLET_PRIVATE_KEY="${WALLET_PRIVATE_KEY}" \ + --set lido-app.env.variables.KEYS_API_HOST="${KEYS_API_HOST}" \ + --set lido-app.env.variables.KEYS_API_PORT="${KEYS_API_PORT}" \ + --set lido-app.env.variables.PUBSUB_SERVICE="${PUBSUB_SERVICE}" \ + --set lido-app.env.variables.EVM_CHAIN_DATA_BUS_ADDRESS="${EVM_CHAIN_DATA_BUS_ADDRESS}" \ + --set lido-app.env.variables.EVM_CHAIN_DATA_BUS_PROVIDER_URL="${EVM_CHAIN_DATA_BUS_PROVIDER_URL}" \ + --set lido-app.env.variables.LOCATOR_DEVNET_ADDRESS="${LOCATOR_DEVNET_ADDRESS}" \ + --set lido-app.env.variables.RABBITMQ_PASSCODE="${RABBITMQ_PASSCODE}" \ + --set lido-app.env.variables.RABBITMQ_LOGIN="${RABBITMQ_LOGIN}" \ + --set lido-app.env.variables.RABBITMQ_URL="${RABBITMQ_URL}" +endef + +# Lint Helm chart +.PHONY: debug +debug: + echo "\n" \ + echo ${pwd} \ + echo "HELM_CHART_PATH=[$(HELM_CHART_PATH)]\n" && \ + echo "HELM_VALUES_PATH=[$(HELM_VALUES_PATH)]\n" && \ + echo "HELM_CHART_VALUES_OVERRIDES=[$(HELM_CHART_VALUES_OVERRIDES)]\n" + +# Lint Helm chart +.PHONY: lint +lint: + helm lint $(HELM_CHART_PATH) $(HELM_VALUES) $(HELM_CHART_VALUES_OVERRIDES) + +# Print rendered Helm chart templates to stdout +.PHONY: template +template: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) $(HELM_DEBUG_FLAG) + +# Dry-run Helm chart install into K8s +.PHONY: dry-run +dry-run: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + | kubectl apply --dry-run=client -f - + +# Helm chart install into K8s +.PHONY: install +install: + helm install $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart upgrade existing installation +.PHONY: upgrade +upgrade: + helm upgrade $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart uninstall +.PHONY: uninstall +uninstall: + helm uninstall $(HELM_RELEASE) --namespace $(NAMESPACE) --ignore-not-found diff --git a/workspaces/csm-prover-tool/Makefile b/workspaces/csm-prover-tool/Makefile new file mode 100644 index 00000000..957972db --- /dev/null +++ b/workspaces/csm-prover-tool/Makefile @@ -0,0 +1,120 @@ +# Makefile for Helm Operations +# This Makefile provides convenient commands for managing Helm charts + +# Default namespace for Helm operations +NAMESPACE ?= test-namespace + +# Helm chart paths +HELM_CHART_ROOT_PATH ?= ../../helm +HELM_CHART_PATH = $(HELM_CHART_ROOT_PATH)/lido/lido-csm-prover-tool +HELM_VALUES_PATH ?= + +ifneq ($(HELM_VALUES_PATH),) + HELM_VALUES = -f $(HELM_VALUES_PATH) +else + HELM_VALUES = +endif + +# Release names +HELM_RELEASE ?= csm-prover-tool + +# Default Helm timeout +TIMEOUT ?= 5m + +# Default Helm flags +HELM_DEBUG ?= true +ifeq ($(HELM_DEBUG), true) + HELM_DEBUG_FLAG = --debug +else + HELM_DEBUG_FLAG = +endif + +# Default values for chart versions (empty means latest) +HELM_CHART_VERSION ?= + +# Version flags (only add if version is specified) +ifneq ($(HELM_CHART_VERSION),) + HELM_VERSION_FLAG = --version $(HELM_CHART_VERSION) +else + HELM_VERSION_FLAG = +endif + +# Default ENV Variables specific to chart +IMAGE ?= lidofinance/csm-prover-tool +TAG ?= dev +REGISTRY_HOSTNAME ?= docker.io +EL_RPC_URLS ?= http://execution +CL_API_URLS ?= http://consensus +CHAIN_ID ?= 32382 + +# HELM overrides +# Provides a easy way to override values in helm chart by they path in YAML file +define HELM_CHART_VALUES_OVERRIDES + --set lido-app.image.repository="${IMAGE}" \ + --set lido-app.image.tag="${TAG}" \ + --set lido-app.image.registry="${REGISTRY_HOSTNAME}" \ + --set lido-app.env.variables.EL_RPC_URLS="${EL_RPC_URLS}" \ + --set lido-app.env.variables.CL_API_URLS="${CL_API_URLS}" \ + --set lido-app.env.variables.KEYSAPI_API_URLS="${KEYSAPI_API_URLS}" \ + --set lido-app.env.variables.CHAIN_ID="${CHAIN_ID}" \ + --set lido-app.env.variables.CSM_ADDRESS="${CSM_ADDRESS}" \ + --set lido-app.env.variables.VERIFIER_ADDRESS="${VERIFIER_ADDRESS}" \ + --set lido-app.env.variables.TX_SIGNER_PRIVATE_KEY="${TX_SIGNER_PRIVATE_KEY}" \ + --set lido-app.env.variables.START_ROOT="${START_ROOT}" +endef + + +# Lint Helm chart +.PHONY: debug +debug: + echo "\n" \ + echo ${pwd} \ + echo "HELM_CHART_PATH=[$(HELM_CHART_PATH)]\n" && \ + echo "HELM_VALUES_PATH=[$(HELM_VALUES_PATH)]\n" && \ + echo "HELM_CHART_VALUES_OVERRIDES=[$(HELM_CHART_VALUES_OVERRIDES)]\n" + +# Lint Helm chart +.PHONY: lint +lint: + helm lint $(HELM_CHART_PATH) $(HELM_VALUES) $(HELM_CHART_VALUES_OVERRIDES) + +# Print rendered Helm chart templates to stdout +.PHONY: template +template: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) $(HELM_DEBUG_FLAG) + +# Dry-run Helm chart install into K8s +.PHONY: dry-run +dry-run: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + | kubectl apply --dry-run=client -f - + +# Helm chart install into K8s +.PHONY: install +install: + helm install $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart upgrade existing installation +.PHONY: upgrade +upgrade: + helm upgrade $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart uninstall +.PHONY: uninstall +uninstall: + helm uninstall $(HELM_RELEASE) --namespace $(NAMESPACE) --ignore-not-found diff --git a/workspaces/docker-registry/Makefile b/workspaces/docker-registry/Makefile new file mode 100644 index 00000000..eb5c52ed --- /dev/null +++ b/workspaces/docker-registry/Makefile @@ -0,0 +1,100 @@ +# Makefile for Helm Operations +# This Makefile provides convenient commands for managing Helm charts + +# Default namespace for Helm operations +NAMESPACE ?= test-namespace + +# Helm chart paths +HELM_CHART_ROOT_PATH ?= ../../helm +HELM_CHART_PATH = $(HELM_CHART_ROOT_PATH)/lido/lido-docker-registry +HELM_VALUES_PATH = ./values.yaml + +# Release names +HELM_RELEASE = lido-docker-registry + +# Default Helm timeout +TIMEOUT ?= 5m + +# Default Helm flags +HELM_DEBUG ?= true +ifeq ($(HELM_DEBUG), true) + HELM_DEBUG_FLAG = --debug +else + HELM_DEBUG_FLAG = +endif + +# Default values for chart versions (empty means latest) +HELM_CHART_VERSION ?= + +# Version flags (only add if version is specified) +ifneq ($(HELM_CHART_VERSION),) + HELM_VERSION_FLAG = --version $(HELM_CHART_VERSION) +else + HELM_VERSION_FLAG = +endif + +# Default ENV Variables specific to chart +DOCKER_REGISTRY_INGRESS_HOSTNAME ?= docker-registry.devnet.testnet.fi +DOCKER_REGISTRY_UI_INGRESS_HOSTNAME ?= docker-registry-ui.devnet.testnet.fi + +# HELM overrides +# Provides a easy way to override values in helm chart by they path in YAML file +define HELM_CHART_VALUES_OVERRIDES + --set global.lido.registryUiHostname="$(DOCKER_REGISTRY_UI_INGRESS_HOSTNAME)" \ + --set global.lido.registryHostname="$(DOCKER_REGISTRY_INGRESS_HOSTNAME)" +endef + +# Lint Helm chart +.PHONY: debug +debug: + echo "\n" \ + echo ${pwd} \ + echo "HELM_CHART_PATH=[$(HELM_CHART_PATH)]\n" && \ + echo "HELM_VALUES_PATH=[$(HELM_VALUES_PATH)]\n" && \ + echo "HELM_CHART_VALUES_OVERRIDES=[$(HELM_CHART_VALUES_OVERRIDES)]\n" + +# Lint Helm chart +.PHONY: lint +lint: + helm lint $(HELM_CHART_PATH) -f $(HELM_VALUES_PATH) $(HELM_CHART_VALUES_OVERRIDES) + +# Print rendered Helm chart templates to stdout +.PHONY: template +template: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) -f $(HELM_VALUES_PATH) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) $(HELM_DEBUG_FLAG) + +# Dry-run Helm chart install into K8s +.PHONY: dry-run +dry-run: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) -f $(HELM_VALUES_PATH) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + | kubectl apply --dry-run=client -f - + +# Helm chart install into K8s +.PHONY: install +install: + helm install $(HELM_RELEASE) $(HELM_CHART_PATH) \ + -f $(HELM_VALUES_PATH) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart upgrade existing installation +.PHONY: upgrade +upgrade: + helm upgrade $(HELM_RELEASE) $(HELM_CHART_PATH) \ + -f $(HELM_VALUES_PATH) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart uninstall +.PHONY: uninstall +uninstall: + helm uninstall $(HELM_RELEASE) --namespace $(NAMESPACE) --ignore-not-found diff --git a/workspaces/docker-registry/values.yaml b/workspaces/docker-registry/values.yaml new file mode 100644 index 00000000..f81d67fd --- /dev/null +++ b/workspaces/docker-registry/values.yaml @@ -0,0 +1,5 @@ +global: + # Lido specific values + lido: + registryHostname: "registry.example.com" + registryUiHostname: "registry-ui.example.com" diff --git a/workspaces/dsm-bots/Makefile b/workspaces/dsm-bots/Makefile new file mode 100644 index 00000000..82fff6c1 --- /dev/null +++ b/workspaces/dsm-bots/Makefile @@ -0,0 +1,123 @@ +# Makefile for Helm Operations +# This Makefile provides convenient commands for managing Helm charts + +# Default namespace for Helm operations +NAMESPACE ?= test-namespace + +# Helm chart paths +HELM_CHART_ROOT_PATH ?= ../../helm +HELM_CHART_PATH = $(HELM_CHART_ROOT_PATH)/lido/lido-dsm-bot +HELM_VALUES_PATH ?= + +ifneq ($(HELM_VALUES_PATH),) + HELM_VALUES = -f $(HELM_VALUES_PATH) +else + HELM_VALUES = +endif + +# Release names +HELM_RELEASE ?= dsm-bot + +# Default Helm timeout +TIMEOUT ?= 5m + +# Default Helm flags +HELM_DEBUG ?= true +ifeq ($(HELM_DEBUG), true) + HELM_DEBUG_FLAG = --debug +else + HELM_DEBUG_FLAG = +endif + +# Default values for chart versions (empty means latest) +HELM_CHART_VERSION ?= + +# Version flags (only add if version is specified) +ifneq ($(HELM_CHART_VERSION),) + HELM_VERSION_FLAG = --version $(HELM_CHART_VERSION) +else + HELM_VERSION_FLAG = +endif + +# Default ENV Variables specific to chart +IMAGE ?= lidofinance/depositor-bot +TAG ?= dev +REGISTRY_HOSTNAME ?= docker.io +WEB3_RPC_ENDPOINTS ?= http://execution + +# HELM overrides +# Provides a easy way to override values in helm chart by they path in YAML file +define HELM_CHART_VALUES_OVERRIDES + --set lido-app.image.repository="${IMAGE}" \ + --set lido-app.image.tag="${TAG}" \ + --set lido-app.image.registry="${REGISTRY_HOSTNAME}" \ + --set "lido-app.args={${COMMAND}}" \ + --set lido-app.env.variables.WEB3_RPC_ENDPOINTS="${WEB3_RPC_ENDPOINTS}" \ + --set lido-app.env.variables.WALLET_PRIVATE_KEY="${WALLET_PRIVATE_KEY}" \ + --set lido-app.env.variables.LIDO_LOCATOR="${LIDO_LOCATOR}" \ + --set lido-app.env.variables.DEPOSIT_CONTRACT="${DEPOSIT_CONTRACT}" \ + --set lido-app.env.variables.MESSAGE_TRANSPORTS="${MESSAGE_TRANSPORTS}" \ + --set lido-app.env.variables.ONCHAIN_TRANSPORT_ADDRESS="${ONCHAIN_TRANSPORT_ADDRESS}" \ + --set lido-app.env.variables.ONCHAIN_TRANSPORT_RPC_ENDPOINTS="${ONCHAIN_TRANSPORT_RPC_ENDPOINTS}" \ + --set lido-app.env.variables.RABBIT_MQ_URL="${RABBIT_MQ_URL}" \ + --set lido-app.env.variables.RABBIT_MQ_USERNAME="${RABBIT_MQ_USERNAME}" \ + --set lido-app.env.variables.RABBIT_MQ_PASSWORD="${RABBIT_MQ_PASSWORD}" \ + --set lido-app.env.variables.CREATE_TRANSACTIONS="${CREATE_TRANSACTIONS}" \ + --set lido-app.env.variables.DEPOSIT_MODULES_WHITELIST="${DEPOSIT_MODULES_WHITELIST}" \ + --set lido-app.env.variables.PROMETHEUS_PREFIX="${PROMETHEUS_PREFIX}" +endef + +# Lint Helm chart +.PHONY: debug +debug: + echo "\n" \ + echo ${pwd} \ + echo "HELM_CHART_PATH=[$(HELM_CHART_PATH)]\n" && \ + echo "HELM_VALUES_PATH=[$(HELM_VALUES_PATH)]\n" && \ + echo "HELM_CHART_VALUES_OVERRIDES=[$(HELM_CHART_VALUES_OVERRIDES)]\n" + +# Lint Helm chart +.PHONY: lint +lint: + helm lint $(HELM_CHART_PATH) $(HELM_VALUES) $(HELM_CHART_VALUES_OVERRIDES) + +# Print rendered Helm chart templates to stdout +.PHONY: template +template: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) (HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) $(HELM_DEBUG_FLAG) + +# Dry-run Helm chart install into K8s +.PHONY: dry-run +dry-run: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + | kubectl apply --dry-run=client -f - + +# Helm chart install into K8s +.PHONY: install +install: + helm install $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart upgrade existing installation +.PHONY: upgrade +upgrade: + helm upgrade $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart uninstall +.PHONY: uninstall +uninstall: + helm uninstall $(HELM_RELEASE) --namespace $(NAMESPACE) --ignore-not-found diff --git a/workspaces/kapi/Makefile b/workspaces/kapi/Makefile new file mode 100644 index 00000000..7306c48e --- /dev/null +++ b/workspaces/kapi/Makefile @@ -0,0 +1,131 @@ +# Makefile for Helm Operations +# This Makefile provides convenient commands for managing Helm charts + +# Default namespace for Helm operations +NAMESPACE ?= test-namespace + +# Helm chart paths +HELM_CHART_ROOT_PATH ?= ../../helm +HELM_CHART_PATH = $(HELM_CHART_ROOT_PATH)/lido/lido-kapi +HELM_VALUES_PATH ?= + +ifneq ($(HELM_VALUES_PATH),) + HELM_VALUES = -f $(HELM_VALUES_PATH) +else + HELM_VALUES = +endif + +# Release names +HELM_RELEASE ?= lido-kapi + +# Default Helm timeout +TIMEOUT ?= 5m + +# Default Helm flags +HELM_DEBUG ?= true +ifeq ($(HELM_DEBUG), true) + HELM_DEBUG_FLAG = --debug +else + HELM_DEBUG_FLAG = +endif + +# Default values for chart versions (empty means latest) +HELM_CHART_VERSION ?= + +# Version flags (only add if version is specified) +ifneq ($(HELM_CHART_VERSION),) + HELM_VERSION_FLAG = --version $(HELM_CHART_VERSION) +else + HELM_VERSION_FLAG = +endif + +# Default ENV Variables specific to chart +KAPI_INGRESS_HOSTNAME ?= kapi.devnet.testnet.fi +IMAGE ?= lidofinance/lido-keys-api +TAG ?= dev +REGISTRY_HOSTNAME ?= docker.io +PROVIDERS_URLS ?= http://execution +CL_API_URLS ?= http://consensus +CHAIN_ID ?= 32382 + +# HELM overrides +# Provides a easy way to override values in helm chart by they path in YAML file +define HELM_CHART_VALUES_OVERRIDES + --set lido-app.image.repository="${IMAGE}" \ + --set lido-app.image.tag="${TAG}" \ + --set lido-app.image.registry="${REGISTRY_HOSTNAME}" \ + --set lido-app.env.variables.DB_HOST="${DB_HOST}" \ + --set lido-app.env.variables.CORS_WHITELIST_REGEXP="${CORS_WHITELIST_REGEXP}" \ + --set lido-app.env.variables.GLOBAL_THROTTLE_TTL="${GLOBAL_THROTTLE_TTL}" \ + --set lido-app.env.variables.GLOBAL_THROTTLE_LIMIT="${GLOBAL_THROTTLE_LIMIT}" \ + --set lido-app.env.variables.GLOBAL_CACHE_TTL="${GLOBAL_CACHE_TTL}" \ + --set lido-app.env.variables.PROVIDERS_URLS="${PROVIDERS_URLS}" \ + --set lido-app.env.variables.CL_API_URLS="${CL_API_URLS}" \ + --set lido-app.env.variables.IS_DEVNET_MODE="${IS_DEVNET_MODE}" \ + --set lido-app.env.variables.CHAIN_ID="${CHAIN_ID}" \ + --set lido-app.env.variables.JOB_INTERVAL_REGISTRY="${JOB_INTERVAL_REGISTRY}" \ + --set lido-app.env.variables.VALIDATOR_REGISTRY_ENABLE="${VALIDATOR_REGISTRY_ENABLE}" \ + --set lido-app.env.variables.JOB_INTERVAL_VALIDATORS_REGISTRY="${JOB_INTERVAL_VALIDATORS_REGISTRY}" \ + --set lido-app.env.variables.LIDO_LOCATOR_DEVNET_ADDRESS="${LIDO_LOCATOR_DEVNET_ADDRESS}" \ + --set lido-app.env.variables.CURATED_MODULE_DEVNET_ADDRESS="${CURATED_MODULE_DEVNET_ADDRESS}" \ + --set lido-app.env.variables.CSM_MODULE_DEVNET_ADDRESS="${CSM_MODULE_DEVNET_ADDRESS}" \ + --set lido-app.env.variables.STAKING_ROUTER_DEVNET_ADDRESS="${STAKING_ROUTER_DEVNET_ADDRESS}" \ + --set-json lido-app.ingress.hosts='[{ "host": "${INGRESS_HOSTNAME}", "paths": [{ "path": "/", "pathType": "Prefix", "port": "http"}]}]' +endef + + + +# Lint Helm chart +.PHONY: debug +debug: + echo "\n" \ + echo ${pwd} \ + echo "HELM_CHART_PATH=[$(HELM_CHART_PATH)]\n" && \ + echo "HELM_VALUES_PATH=[$(HELM_VALUES_PATH)]\n" && \ + echo "HELM_CHART_VALUES_OVERRIDES=[$(HELM_CHART_VALUES_OVERRIDES)]\n" + +# Lint Helm chart +.PHONY: lint +lint: + helm lint $(HELM_CHART_PATH) $(HELM_VALUES) $(HELM_CHART_VALUES_OVERRIDES) + +# Print rendered Helm chart templates to stdout +.PHONY: template +template: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) $(HELM_DEBUG_FLAG) + +# Dry-run Helm chart install into K8s +.PHONY: dry-run +dry-run: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + | kubectl apply --dry-run=client -f - + +# Helm chart install into K8s +.PHONY: install +install: + helm install $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart upgrade existing installation +.PHONY: upgrade +upgrade: + helm upgrade $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart uninstall +.PHONY: uninstall +uninstall: + helm uninstall $(HELM_RELEASE) --namespace $(NAMESPACE) --ignore-not-found diff --git a/workspaces/kubo/Makefile b/workspaces/kubo/Makefile new file mode 100644 index 00000000..0e4f8f41 --- /dev/null +++ b/workspaces/kubo/Makefile @@ -0,0 +1,110 @@ +# Makefile for Helm Operations +# This Makefile provides convenient commands for managing Helm charts + +# Default namespace for Helm operations +NAMESPACE ?= test-namespace + +# Helm chart paths +HELM_CHART_ROOT_PATH ?= ../../helm +HELM_CHART_PATH = $(HELM_CHART_ROOT_PATH)/lido/lido-kubo +HELM_VALUES_PATH ?= + +ifneq ($(HELM_VALUES_PATH),) + HELM_VALUES = -f $(HELM_VALUES_PATH) +else + HELM_VALUES = +endif + +# Release names +HELM_RELEASE ?= lido-kubo + +# Default Helm timeout +TIMEOUT ?= 5m + +# Default Helm flags +HELM_DEBUG ?= true +ifeq ($(HELM_DEBUG), true) + HELM_DEBUG_FLAG = --debug +else + HELM_DEBUG_FLAG = +endif + +# Default values for chart versions (empty means latest) +HELM_CHART_VERSION ?= + +# Version flags (only add if version is specified) +ifneq ($(HELM_CHART_VERSION),) + HELM_VERSION_FLAG = --version $(HELM_CHART_VERSION) +else + HELM_VERSION_FLAG = +endif + +# Default ENV Variables specific to chart +CHAIN ?= artifact + +# HELM overrides +# Provides a easy way to override values in helm chart by they path in YAML file +define HELM_CHART_VALUES_OVERRIDES + --set lido-app.image.repository="${IMAGE}" \ + --set lido-app.image.tag="${TAG}" \ + --set lido-app.image.registry="${REGISTRY_HOSTNAME}" \ + --set lido-app.env.variables.CHAIN="${CHAIN}" \ + --set-json lido-app.ingress.hosts='[{ "host": "${KUBO_INGRESS_HOSTNAME}", "paths": [{ "path": "/", "pathType": "Prefix", "port": "http-api"}]}]' +endef + + + +# Lint Helm chart +.PHONY: debug +debug: + echo "\n" \ + echo ${pwd} \ + echo "HELM_CHART_PATH=[$(HELM_CHART_PATH)]\n" && \ + echo "HELM_VALUES_PATH=[$(HELM_VALUES_PATH)]\n" && \ + echo "HELM_CHART_VALUES_OVERRIDES=[$(HELM_CHART_VALUES_OVERRIDES)]\n" + +# Lint Helm chart +.PHONY: lint +lint: + helm lint $(HELM_CHART_PATH) $(HELM_VALUES) $(HELM_CHART_VALUES_OVERRIDES) + +# Print rendered Helm chart templates to stdout +.PHONY: template +template: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) $(HELM_DEBUG_FLAG) + +# Dry-run Helm chart install into K8s +.PHONY: dry-run +dry-run: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + | kubectl apply --dry-run=client -f - + +# Helm chart install into K8s +.PHONY: install +install: + helm install $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart upgrade existing installation +.PHONY: upgrade +upgrade: + helm upgrade $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart uninstall +.PHONY: uninstall +uninstall: + helm uninstall $(HELM_RELEASE) --namespace $(NAMESPACE) --ignore-not-found diff --git a/workspaces/kurtosis/fusaka-devnet2.yml b/workspaces/kurtosis/fusaka-devnet2.yml new file mode 100644 index 00000000..62a8be16 --- /dev/null +++ b/workspaces/kurtosis/fusaka-devnet2.yml @@ -0,0 +1,97 @@ +participants: + + - el_type: geth + el_image: ethereum/client-go:v1.16.7 + el_extra_params: [ --syncmode=full --rpc.allow-unprotected-txs --gcmode=archive ] + el_min_cpu: 0 + el_max_cpu: 0 + el_min_mem: 0 + el_max_mem: 0 + el_volume_size: 51200 + # CL + cl_type: lighthouse + cl_image: sigp/lighthouse:v8.0.0 + cl_extra_params: [ '--hierarchy-exponents=5,7,11', --reconstruct-historic-states ] + supernode: true + cl_volume_size: 51200 + count: 1 + + - el_type: geth + el_image: ethereum/client-go:v1.16.7 + el_extra_params: [ --syncmode=full --rpc.allow-unprotected-txs --gcmode=archive ] + el_min_cpu: 0 + el_max_cpu: 0 + el_min_mem: 0 + el_max_mem: 0 + el_volume_size: 51200 + # CL + cl_type: teku + cl_image: consensys/teku:25.11.0 + cl_extra_params: [ --reconstruct-historic-states=true ] + cl_extra_env_vars: + JAVA_OPTS: "-Xmx32g" + supernode: true + use_separate_vc: true + cl_min_cpu: 4000 + cl_max_cpu: 8000 + cl_min_mem: 16384 + cl_max_mem: 32768 + cl_volume_size: 51200 + count: 1 + + - el_type: erigon + el_image: erigontech/erigon:v3.2.2 + el_extra_params: [ --prune.mode=archive ] + el_min_cpu: 0 + el_max_cpu: 0 + el_min_mem: 0 + el_max_mem: 0 + el_volume_size: 51200 + # CL + cl_type: lighthouse + cl_image: sigp/lighthouse:v8.0.0 + cl_extra_params: [ '--hierarchy-exponents=5,7,11', --reconstruct-historic-states ] + supernode: true + cl_volume_size: 51200 + count: 1 + +network_params: + network_id: "32382" + withdrawal_type: "0x02" + preset: mainnet + shard_committee_period: 1 + fulu_fork_epoch: 1 + bpo_1_epoch: 32 + bpo_1_max_blobs: 12 + bpo_1_target_blobs: 9 + bpo_2_epoch: 33 + bpo_2_max_blobs: 6 + bpo_2_target_blobs: 4 + bpo_3_epoch: 34 + bpo_3_max_blobs: 9 + bpo_3_target_blobs: 6 + bpo_4_epoch: 35 + bpo_4_max_blobs: 18 + bpo_4_target_blobs: 12 + bpo_5_epoch: 36 + bpo_5_max_blobs: 9 + bpo_5_target_blobs: 6 + +persistent: true + +additional_services: + - dora + - spamoor + +keymanager_enabled: true + +spamoor_params: + image: ethpandaops/spamoor:master + max_mem: 8000 + spammers: + - scenario: eoatx + config: + throughput: 5 + - scenario: blobs + config: + throughput: 5 diff --git a/workspaces/kurtosis/fusaka-zk-test.yml b/workspaces/kurtosis/fusaka-zk-test.yml new file mode 100644 index 00000000..a3982ff5 --- /dev/null +++ b/workspaces/kurtosis/fusaka-zk-test.yml @@ -0,0 +1,65 @@ +participants: + + - el_type: geth + el_image: ethereum/client-go:v1.16.4 + el_extra_params: [ --syncmode=full --rpc.allow-unprotected-txs --gcmode=archive ] + el_min_cpu: 0 + el_max_cpu: 0 + el_min_mem: 0 + el_max_mem: 0 + el_volume_size: 51200 + # CL + cl_type: teku + cl_image: consensys/teku:25.9.3 + cl_extra_params: [ --reconstruct-historic-states=true ] + cl_extra_env_vars: + JAVA_OPTS: "-Xmx32g" + supernode: true + use_separate_vc: true + cl_min_cpu: 4000 + cl_max_cpu: 8000 + cl_min_mem: 16384 + cl_max_mem: 32768 + cl_volume_size: 51200 + count: 1 + + +network_params: + network_id: "32382" + fulu_fork_epoch: 1 + bpo_1_epoch: 32 + bpo_1_max_blobs: 12 + bpo_1_target_blobs: 9 + bpo_2_epoch: 33 + bpo_2_max_blobs: 6 + bpo_2_target_blobs: 4 + bpo_3_epoch: 34 + bpo_3_max_blobs: 9 + bpo_3_target_blobs: 6 + bpo_4_epoch: 35 + bpo_4_max_blobs: 18 + bpo_4_target_blobs: 12 + bpo_5_epoch: 36 + bpo_5_max_blobs: 9 + bpo_5_target_blobs: 6 + withdrawal_type: "0x02" + preset: mainnet + +persistent: true + +additional_services: + - dora + - spamoor + +keymanager_enabled: true + +spamoor_params: + image: ethpandaops/spamoor:master + max_mem: 8000 + spammers: + - scenario: eoatx + config: + throughput: 5 + - scenario: blobs + config: + throughput: 5 diff --git a/workspaces/late-prover-bot/Makefile b/workspaces/late-prover-bot/Makefile new file mode 100644 index 00000000..871f35ea --- /dev/null +++ b/workspaces/late-prover-bot/Makefile @@ -0,0 +1,117 @@ +# Makefile for Helm Operations +# This Makefile provides convenient commands for managing Helm charts + +# Default namespace for Helm operations +NAMESPACE ?= test-namespace + +# Helm chart paths +HELM_CHART_ROOT_PATH ?= ../../helm +HELM_CHART_PATH = $(HELM_CHART_ROOT_PATH)/lido/lido-late-prover-bot +HELM_VALUES_PATH ?= + +ifneq ($(HELM_VALUES_PATH),) + HELM_VALUES = -f $(HELM_VALUES_PATH) +else + HELM_VALUES = +endif + +# Release names +HELM_RELEASE ?= late-prover-bot + +# Default Helm timeout +TIMEOUT ?= 5m + +# Default Helm flags +HELM_DEBUG ?= true +ifeq ($(HELM_DEBUG), true) + HELM_DEBUG_FLAG = --debug +else + HELM_DEBUG_FLAG = +endif + +# Default values for chart versions (empty means latest) +HELM_CHART_VERSION ?= + +# Version flags (only add if version is specified) +ifneq ($(HELM_CHART_VERSION),) + HELM_VERSION_FLAG = --version $(HELM_CHART_VERSION) +else + HELM_VERSION_FLAG = +endif + +# Default ENV Variables specific to chart +IMAGE ?= lidofinance/late-prover-bot +TAG ?= dev +REGISTRY_HOSTNAME ?= docker.io +EL_RPC_URLS ?= http://execution +CL_API_URLS ?= http://consensus +CHAIN_ID ?= 32382 + +# HELM overrides +# Provides a easy way to override values in helm chart by they path in YAML file +define HELM_CHART_VALUES_OVERRIDES + --set lido-app.image.repository="${IMAGE}" \ + --set lido-app.image.tag="${TAG}" \ + --set lido-app.image.registry="${REGISTRY_HOSTNAME}" \ + --set lido-app.env.variables.EL_RPC_URLS="${EL_RPC_URLS}" \ + --set lido-app.env.variables.CL_API_URLS="${CL_API_URLS}" \ + --set lido-app.env.variables.CHAIN_ID="${CHAIN_ID}" \ + --set lido-app.env.variables.LIDO_LOCATOR_ADDRESS="${LIDO_LOCATOR_ADDRESS}" \ + --set lido-app.env.variables.TX_SIGNER_PRIVATE_KEY="${TX_SIGNER_PRIVATE_KEY}" +endef + + +# Lint Helm chart +.PHONY: debug +debug: + echo "\n" \ + echo ${pwd} \ + echo "HELM_CHART_PATH=[$(HELM_CHART_PATH)]\n" && \ + echo "HELM_VALUES_PATH=[$(HELM_VALUES_PATH)]\n" && \ + echo "HELM_CHART_VALUES_OVERRIDES=[$(HELM_CHART_VALUES_OVERRIDES)]\n" + +# Lint Helm chart +.PHONY: lint +lint: + helm lint $(HELM_CHART_PATH) $(HELM_VALUES) $(HELM_CHART_VALUES_OVERRIDES) + +# Print rendered Helm chart templates to stdout +.PHONY: template +template: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) $(HELM_DEBUG_FLAG) + +# Dry-run Helm chart install into K8s +.PHONY: dry-run +dry-run: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + | kubectl apply --dry-run=client -f - + +# Helm chart install into K8s +.PHONY: install +install: + helm install $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart upgrade existing installation +.PHONY: upgrade +upgrade: + helm upgrade $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart uninstall +.PHONY: uninstall +uninstall: + helm uninstall $(HELM_RELEASE) --namespace $(NAMESPACE) --ignore-not-found diff --git a/workspaces/no-widget-backend/Makefile b/workspaces/no-widget-backend/Makefile new file mode 100644 index 00000000..a211d9bc --- /dev/null +++ b/workspaces/no-widget-backend/Makefile @@ -0,0 +1,145 @@ +# Makefile for Helm Operations +# This Makefile provides convenient commands for managing Helm charts + +# Default namespace for Helm operations +NAMESPACE ?= test-namespace + +# Helm chart paths +HELM_CHART_ROOT_PATH ?= ../../helm +HELM_CHART_PATH = $(HELM_CHART_ROOT_PATH)/lido/lido-no-widget-backend +HELM_VALUES_PATH ?= + +ifneq ($(HELM_VALUES_PATH),) + HELM_VALUES = -f $(HELM_VALUES_PATH) +else + HELM_VALUES = +endif + +# Release names +HELM_RELEASE ?= lido-no-widget-backend + +# Default Helm timeout +TIMEOUT ?= 5m + +# Default Helm flags +HELM_DEBUG ?= true +ifeq ($(HELM_DEBUG), true) + HELM_DEBUG_FLAG = --debug +else + HELM_DEBUG_FLAG = +endif + +# Default values for chart versions (empty means latest) +HELM_CHART_VERSION ?= + +# Version flags (only add if version is specified) +ifneq ($(HELM_CHART_VERSION),) + HELM_VERSION_FLAG = --version $(HELM_CHART_VERSION) +else + HELM_VERSION_FLAG = +endif + +# Default ENV Variables specific to chart +API_INGRESS_HOSTNAME ?= no-widget-backend.devnet.testnet.fi +IMAGE ?= lidofinance/lido-no-widget-backend +TAG ?= dev +REGISTRY_HOSTNAME ?= docker.io +CHAIN_ID ?= 32382 + +# HELM overrides +# Provides a easy way to override values in helm chart by they path in YAML file +define HELM_CHART_VALUES_OVERRIDES + --set api.image.repository="${IMAGE}" \ + --set api.image.tag="${TAG}" \ + --set api.image.registry="${REGISTRY_HOSTNAME}" \ + --set api.env.variables.PG_HOST="${PG_HOST}" \ + --set api.env.variables.NODE_ENV="${NODE_ENV}" \ + --set api.env.variables.PORT="${PORT}" \ + --set api.env.variables.CORS_WHITELIST_REGEXP="${CORS_WHITELIST_REGEXP}" \ + --set api.env.variables.GLOBAL_THROTTLE_TTL="${GLOBAL_THROTTLE_TTL}" \ + --set api.env.variables.GLOBAL_THROTTLE_LIMIT="${GLOBAL_THROTTLE_LIMIT}" \ + --set api.env.variables.GLOBAL_CACHE_TTL="${GLOBAL_CACHE_TTL}" \ + --set api.env.variables.SENTRY_DSN="${SENTRY_DSN}" \ + --set api.env.variables.LOG_LEVEL="${LOG_LEVEL}" \ + --set api.env.variables.LOG_FORMAT="${LOG_FORMAT}" \ + --set api.env.variables.KEYS_API_HOST="${KEYS_API_HOST}" \ + --set api.env.variables.EL_API_URLS="${EL_API_URLS}" \ + --set api.env.variables.CHAIN_ID="${CHAIN_ID}" \ + --set api.env.variables.DEVNET_GENESIS_FORK_VERSION="${DEVNET_GENESIS_FORK_VERSION}" \ + --set api.env.variables.LIDO_DEVNET_ADDRESS="${LIDO_DEVNET_ADDRESS}" \ + --set-json api.ingress.hosts='[{ "host": "${INGRESS_HOSTNAME}", "paths": [{ "path": "/", "pathType": "Prefix", "port": "http"}]}]' \ + --set worker.image.repository="${IMAGE}" \ + --set worker.image.tag="${TAG}" \ + --set worker.image.registry="${REGISTRY_HOSTNAME}" \ + --set worker.env.variables.PG_HOST="${PG_HOST}" \ + --set worker.env.variables.NODE_ENV="${NODE_ENV}" \ + --set worker.env.variables.PORT="${PORT}" \ + --set worker.env.variables.CORS_WHITELIST_REGEXP="${CORS_WHITELIST_REGEXP}" \ + --set worker.env.variables.GLOBAL_THROTTLE_TTL="${GLOBAL_THROTTLE_TTL}" \ + --set worker.env.variables.GLOBAL_THROTTLE_LIMIT="${GLOBAL_THROTTLE_LIMIT}" \ + --set worker.env.variables.GLOBAL_CACHE_TTL="${GLOBAL_CACHE_TTL}" \ + --set worker.env.variables.SENTRY_DSN="${SENTRY_DSN}" \ + --set worker.env.variables.LOG_LEVEL="${LOG_LEVEL}" \ + --set worker.env.variables.LOG_FORMAT="${LOG_FORMAT}" \ + --set worker.env.variables.KEYS_API_HOST="${KEYS_API_HOST}" \ + --set worker.env.variables.EL_API_URLS="${EL_API_URLS}" \ + --set worker.env.variables.CHAIN_ID="${CHAIN_ID}" \ + --set worker.env.variables.DEVNET_GENESIS_FORK_VERSION="${DEVNET_GENESIS_FORK_VERSION}" \ + --set worker.env.variables.LIDO_DEVNET_ADDRESS="${LIDO_DEVNET_ADDRESS}" +endef + + +# Lint Helm chart +.PHONY: debug +debug: + echo "\n" \ + echo ${pwd} \ + echo "HELM_CHART_PATH=[$(HELM_CHART_PATH)]\n" && \ + echo "HELM_VALUES_PATH=[$(HELM_VALUES_PATH)]\n" && \ + echo "HELM_CHART_VALUES_OVERRIDES=[$(HELM_CHART_VALUES_OVERRIDES)]\n" + +# Lint Helm chart +.PHONY: lint +lint: + helm lint $(HELM_CHART_PATH) $(HELM_VALUES) $(HELM_CHART_VALUES_OVERRIDES) + +# Print rendered Helm chart templates to stdout +.PHONY: template +template: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) $(HELM_DEBUG_FLAG) + +# Dry-run Helm chart install into K8s +.PHONY: dry-run +dry-run: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + | kubectl apply --dry-run=client -f - + +# Helm chart install into K8s +.PHONY: install +install: + helm install $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart upgrade existing installation +.PHONY: upgrade +upgrade: + helm upgrade $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart uninstall +.PHONY: uninstall +uninstall: + helm uninstall $(HELM_RELEASE) --namespace $(NAMESPACE) --ignore-not-found diff --git a/workspaces/no-widget/Makefile b/workspaces/no-widget/Makefile new file mode 100644 index 00000000..952549c5 --- /dev/null +++ b/workspaces/no-widget/Makefile @@ -0,0 +1,118 @@ +# Makefile for Helm Operations +# This Makefile provides convenient commands for managing Helm charts + +# Default namespace for Helm operations +NAMESPACE ?= test-namespace + +# Helm chart paths +HELM_CHART_ROOT_PATH ?= ../../helm +HELM_CHART_PATH = $(HELM_CHART_ROOT_PATH)/lido/lido-no-widget +HELM_VALUES_PATH ?= + +ifneq ($(HELM_VALUES_PATH),) + HELM_VALUES = -f $(HELM_VALUES_PATH) +else + HELM_VALUES = +endif + +# Release names +HELM_RELEASE ?= lido-no-widget + +# Default Helm timeout +TIMEOUT ?= 5m + +# Default Helm flags +HELM_DEBUG ?= true +ifeq ($(HELM_DEBUG), true) + HELM_DEBUG_FLAG = --debug +else + HELM_DEBUG_FLAG = +endif + +# Default values for chart versions (empty means latest) +HELM_CHART_VERSION ?= + +# Version flags (only add if version is specified) +ifneq ($(HELM_CHART_VERSION),) + HELM_VERSION_FLAG = --version $(HELM_CHART_VERSION) +else + HELM_VERSION_FLAG = +endif + +# Default ENV Variables specific to chart +INGRESS_HOSTNAME ?= no-widget.devnet.testnet.fi +IMAGE ?= lido/lido-no-widget +TAG ?= dev +REGISTRY_HOSTNAME ?= docker.io +CHAIN_ID ?= 32382 + +# HELM overrides +# Provides a easy way to override values in helm chart by they path in YAML file +define HELM_CHART_VALUES_OVERRIDES + --set web.image.repository="${IMAGE}" \ + --set web.image.tag="${TAG}" \ + --set web.image.registry="${REGISTRY_HOSTNAME}" \ + --set web.env.variables.NODE_ENV="${NODE_ENV}" \ + --set web.env.variables.EL_RPC_URLS_17000="${EL_RPC_URLS_17000}" \ + --set web.env.variables.BACKEND_URL_17000="${BACKEND_URL_17000}" \ + --set web.env.variables.SUPPORTED_CHAINS="${SUPPORTED_CHAINS}" \ + --set web.env.variables.DEFAULT_CHAIN="${DEFAULT_CHAIN}" \ + --set-json web.ingress.hosts='[{ "host": "${INGRESS_HOSTNAME}", "paths": [{ "path": "/", "pathType": "Prefix", "port": "http"}]}]' \ + +endef + + +# Lint Helm chart +.PHONY: debug +debug: + echo "\n" \ + echo ${pwd} \ + echo "HELM_CHART_PATH=[$(HELM_CHART_PATH)]\n" && \ + echo "HELM_VALUES_PATH=[$(HELM_VALUES_PATH)]\n" && \ + echo "HELM_CHART_VALUES_OVERRIDES=[$(HELM_CHART_VALUES_OVERRIDES)]\n" + +# Lint Helm chart +.PHONY: lint +lint: + helm lint $(HELM_CHART_PATH) $(HELM_VALUES) $(HELM_CHART_VALUES_OVERRIDES) + +# Print rendered Helm chart templates to stdout +.PHONY: template +template: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) $(HELM_DEBUG_FLAG) + +# Dry-run Helm chart install into K8s +.PHONY: dry-run +dry-run: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + | kubectl apply --dry-run=client -f - + +# Helm chart install into K8s +.PHONY: install +install: + helm install $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart upgrade existing installation +.PHONY: upgrade +upgrade: + helm upgrade $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart uninstall +.PHONY: uninstall +uninstall: + helm uninstall $(HELM_RELEASE) --namespace $(NAMESPACE) --ignore-not-found diff --git a/workspaces/oracle-v6/Dockerfile b/workspaces/oracle-v6/Dockerfile deleted file mode 100644 index 36d6f12c..00000000 --- a/workspaces/oracle-v6/Dockerfile +++ /dev/null @@ -1,71 +0,0 @@ -# Based on branch feat/oracle-v6 -# https://github.com/lidofinance/lido-oracle/commit/e5f44e619116ded46dc7e240ebbc57fdbb0a28cb -# -# Changes: -# 1. Removed dev-stage -# 2. Added debugpy (+ 5678 port exposion) - -FROM python:3.12.4-slim AS base - -ARG POETRY_VERSION=1.3.2 -ARG SOURCE_DATE_EPOCH - -RUN apt-get update && apt-get install -y --no-install-recommends -qq \ - libffi-dev=3.4.4-1 \ - g++=4:12.2.0-3 \ - curl=7.88.1-10+deb12u12 \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && rm -rf /var/cache/* \ - && rm -rf /var/log/* - -ENV PYTHONUNBUFFERED=1 \ - PYTHONDONTWRITEBYTECODE=1 \ - PIP_NO_CACHE_DIR=off \ - PIP_DISABLE_PIP_VERSION_CHECK=on \ - PIP_DEFAULT_TIMEOUT=100 \ - POETRY_VIRTUALENVS_IN_PROJECT=false \ - POETRY_NO_INTERACTION=1 \ - VENV_PATH="/opt/venv" \ - # Building reproducible .so files by enforcing consistent CFLAGS across builds - CFLAGS="-g0 -O2 -ffile-prefix-map=/src=." - -ENV PATH="$VENV_PATH/bin:$PATH" - -FROM base AS builder - -ARG POETRY_VERSION -RUN pip install --no-cache-dir poetry==${POETRY_VERSION} - -WORKDIR / -COPY pyproject.toml poetry.lock ./ - -RUN python3 -m venv "$VENV_PATH" && \ - VIRTUAL_ENV="$VENV_PATH" poetry install --only main --no-root --no-cache && \ - VIRTUAL_ENV="$VENV_PATH" poetry add debugpy==1.8.13 --no-cache && \ - find "$VENV_PATH" -type d -name '.git' -exec rm -rf {} + && \ - find "$VENV_PATH" -name '*.dist-info' -exec rm -rf {}/RECORD \; && \ - find "$VENV_PATH" -name '*.dist-info' -exec rm -rf {}/WHEEL \; && \ - find "$VENV_PATH" -name '__pycache__' -exec rm -rf {} + - -FROM base AS production - -COPY --from=builder $VENV_PATH $VENV_PATH -WORKDIR /app -COPY . . - -RUN apt-get clean && find /var/lib/apt/lists/ -type f -delete && chown -R www-data /app/ - -ENV PROMETHEUS_PORT 9000 -ENV HEALTHCHECK_SERVER_PORT 9010 - -EXPOSE $PROMETHEUS_PORT -EXPOSE 5678 -USER www-data - -HEALTHCHECK --interval=10s --timeout=3s \ - CMD curl -f http://localhost:$HEALTHCHECK_SERVER_PORT/healthcheck || exit 1 - -WORKDIR /app/ - -ENTRYPOINT ["/opt/venv/bin/python3", "-Xfrozen_modules=off", "-m", "debugpy", "--listen", "0.0.0.0:5678", "-m", "src.main"] diff --git a/workspaces/oracle-v6/DEBUG.md b/workspaces/oracle/DEBUG.md similarity index 100% rename from workspaces/oracle-v6/DEBUG.md rename to workspaces/oracle/DEBUG.md diff --git a/workspaces/oracle/Makefile b/workspaces/oracle/Makefile new file mode 100644 index 00000000..80cd1775 --- /dev/null +++ b/workspaces/oracle/Makefile @@ -0,0 +1,123 @@ +# Makefile for Helm Operations +# This Makefile provides convenient commands for managing Helm charts + +# Default namespace for Helm operations +NAMESPACE ?= test-namespace + +# Helm chart paths +HELM_CHART_ROOT_PATH ?= ../../helm +HELM_CHART_PATH = $(HELM_CHART_ROOT_PATH)/lido/lido-oracle +HELM_VALUES_PATH ?= + +ifneq ($(HELM_VALUES_PATH),) + HELM_VALUES = -f $(HELM_VALUES_PATH) +else + HELM_VALUES = +endif + +# Release names +HELM_RELEASE ?= oracle-accounting-1 + +# Default Helm timeout +TIMEOUT ?= 5m + +# Default Helm flags +HELM_DEBUG ?= true +ifeq ($(HELM_DEBUG), true) + HELM_DEBUG_FLAG = --debug +else + HELM_DEBUG_FLAG = +endif + +# Default values for chart versions (empty means latest) +HELM_CHART_VERSION ?= + +# Version flags (only add if version is specified) +ifneq ($(HELM_CHART_VERSION),) + HELM_VERSION_FLAG = --version $(HELM_CHART_VERSION) +else + HELM_VERSION_FLAG = +endif + +# Default ENV Variables specific to chart +IMAGE ?= lidofinance/oracle +TAG ?= dev +REGISTRY_HOSTNAME ?= docker.io +PROVIDERS_URLS ?= http://execution +CL_API_URLS ?= http://consensus +CHAIN_ID ?= 32382 + +# HELM overrides +# Provides a easy way to override values in helm chart by they path in YAML file +define HELM_CHART_VALUES_OVERRIDES + --set lido-app.image.repository="${IMAGE}" \ + --set lido-app.image.tag="${TAG}" \ + --set lido-app.image.registry="${REGISTRY_HOSTNAME}" \ + --set "lido-app.args={${COMMAND}}" \ + --set lido-app.env.variables.EXECUTION_CLIENT_URI="${EXECUTION_CLIENT_URI}" \ + --set lido-app.env.variables.CONSENSUS_CLIENT_URI="${CONSENSUS_CLIENT_URI}" \ + --set lido-app.env.variables.KEYS_API_URI="${KEYS_API_URI}" \ + --set lido-app.env.variables.MEMBER_PRIV_KEY="${MEMBER_PRIV_KEY}" \ + --set lido-app.env.variables.LIDO_LOCATOR_ADDRESS="${LIDO_LOCATOR_ADDRESS}" \ + --set lido-app.env.variables.CSM_MODULE_ADDRESS="${CSM_MODULE_ADDRESS}" \ + --set lido-app.env.variables.CSM_ORACLE_MAX_CONCURRENCY="${CSM_ORACLE_MAX_CONCURRENCY}" \ + --set lido-app.env.variables.SUBMIT_DATA_DELAY_IN_SLOTS="${SUBMIT_DATA_DELAY_IN_SLOTS}" \ + --set lido-app.env.variables.ALLOW_REPORTING_IN_BUNKER_MODE="${ALLOW_REPORTING_IN_BUNKER_MODE}" \ + --set lido-app.env.variables.PINATA_JWT="${PINATA_JWT}" \ + --set lido-app.env.variables.KUBO_HOST="${KUBO_HOST}" +endef + +# Lint Helm chart +.PHONY: debug +debug: + echo "\n" \ + echo ${pwd} \ + echo "HELM_CHART_PATH=[$(HELM_CHART_PATH)]\n" && \ + echo "HELM_VALUES_PATH=[$(HELM_VALUES_PATH)]\n" && \ + echo "HELM_CHART_VALUES_OVERRIDES=[$(HELM_CHART_VALUES_OVERRIDES)]\n" + +# Lint Helm chart +.PHONY: lint +lint: + helm lint $(HELM_CHART_PATH) $(HELM_VALUES) $(HELM_CHART_VALUES_OVERRIDES) + +# Print rendered Helm chart templates to stdout +.PHONY: template +template: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) $(HELM_DEBUG_FLAG) + +# Dry-run Helm chart install into K8s +.PHONY: dry-run +dry-run: + helm template $(HELM_RELEASE) $(HELM_CHART_PATH) $(HELM_VALUES) --namespace $(NAMESPACE) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + | kubectl apply --dry-run=client -f - + +# Helm chart install into K8s +.PHONY: install +install: + helm install $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --create-namespace \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart upgrade existing installation +.PHONY: upgrade +upgrade: + helm upgrade $(HELM_RELEASE) $(HELM_CHART_PATH) \ + $(HELM_VALUES) \ + $(HELM_CHART_VALUES_OVERRIDES) \ + --namespace $(NAMESPACE) \ + --timeout $(TIMEOUT) \ + $(HELM_VERSION_FLAG) \ + $(HELM_DEBUG_FLAG) + +# Helm chart uninstall +.PHONY: uninstall +uninstall: + helm uninstall $(HELM_RELEASE) --namespace $(NAMESPACE) --ignore-not-found diff --git a/workspaces/oracle-v6/docker-compose.devnet.yml b/workspaces/oracle/docker-compose.devnet.yml similarity index 100% rename from workspaces/oracle-v6/docker-compose.devnet.yml rename to workspaces/oracle/docker-compose.devnet.yml diff --git a/yarn.lock b/yarn.lock index adfbd043..514875c9 100644 --- a/yarn.lock +++ b/yarn.lock @@ -12,6 +12,13 @@ __metadata: languageName: node linkType: hard +"@adraffy/ens-normalize@npm:^1.11.0": + version: 1.11.0 + resolution: "@adraffy/ens-normalize@npm:1.11.0" + checksum: 10c0/5111d0f1a273468cb5661ed3cf46ee58de8f32f84e2ebc2365652e66c1ead82649df94c736804e2b9cfa831d30ef24e1cc3575d970dbda583416d3a98d8870a6 + languageName: node + linkType: hard + "@aws-crypto/crc32@npm:5.2.0": version: 5.2.0 resolution: "@aws-crypto/crc32@npm:5.2.0" @@ -951,14 +958,18 @@ __metadata: resolution: "@devnet/command@workspace:packages/command" dependencies: "@devnet/cl-client": "workspace:*" + "@devnet/logger": "workspace:*" "@devnet/service": "workspace:*" "@devnet/state": "workspace:*" + "@devnet/types": "workspace:*" + "@devnet/ui": "workspace:*" "@fastify/swagger": "npm:^9.4.2" "@fastify/swagger-ui": "npm:^5.2.1" "@oclif/core": "npm:^4.0.37" "@oclif/plugin-help": "npm:^6.2.19" "@types/dockerode": "npm:^3.3.34" "@types/node": "npm:^22.10.5" + "@types/ws": "npm:^8.18.0" dockerode: "npm:^4.0.4" eslint: "npm:^8" eslint-config-oclif: "npm:^5" @@ -975,6 +986,84 @@ __metadata: languageName: unknown linkType: soft +"@devnet/docker@workspace:*, @devnet/docker@workspace:packages/docker": + version: 0.0.0-use.local + resolution: "@devnet/docker@workspace:packages/docker" + dependencies: + "@devnet/logger": "workspace:*" + "@devnet/utils": "workspace:*" + "@oclif/core": "npm:^4.0.37" + "@oclif/plugin-help": "npm:^6.2.19" + "@types/node": "npm:^22.10.5" + dockerode: "npm:^4.0.4" + dotenv: "npm:^17.2.1" + eslint: "npm:^8" + eslint-config-oclif: "npm:^5" + eslint-config-oclif-typescript: "npm:^3" + eslint-config-prettier: "npm:^9" + eslint-plugin-prettier: "npm:^5.2.2" + execa: "npm:^9.5.2" + prettier: "npm:^3.4.2" + typescript: "npm:^5" + languageName: unknown + linkType: soft + +"@devnet/fp@workspace:*, @devnet/fp@workspace:packages/fp": + version: 0.0.0-use.local + resolution: "@devnet/fp@workspace:packages/fp" + dependencies: + "@oclif/core": "npm:^4.0.37" + "@oclif/plugin-help": "npm:^6.2.19" + "@types/node": "npm:^22.10.5" + eslint: "npm:^8" + eslint-config-oclif: "npm:^5" + eslint-config-oclif-typescript: "npm:^3" + eslint-config-prettier: "npm:^9" + eslint-plugin-prettier: "npm:^5.2.2" + fp-ts: "npm:^2.16.11" + prettier: "npm:^3.4.2" + typescript: "npm:^5" + languageName: unknown + linkType: soft + +"@devnet/helm@workspace:packages/helm": + version: 0.0.0-use.local + resolution: "@devnet/helm@workspace:packages/helm" + dependencies: + "@oclif/core": "npm:^4.0.37" + "@oclif/plugin-help": "npm:^6.2.19" + "@types/node": "npm:^22.10.5" + dotenv: "npm:^17.2.1" + eslint: "npm:^8" + eslint-config-oclif: "npm:^5" + eslint-config-oclif-typescript: "npm:^3" + eslint-config-prettier: "npm:^9" + eslint-plugin-prettier: "npm:^5.2.2" + prettier: "npm:^3.4.2" + typescript: "npm:^5" + languageName: unknown + linkType: soft + +"@devnet/k8s@workspace:*, @devnet/k8s@workspace:packages/k8s": + version: 0.0.0-use.local + resolution: "@devnet/k8s@workspace:packages/k8s" + dependencies: + "@devnet/command": "workspace:*" + "@kubernetes/client-node": "npm:^1.3.0" + "@oclif/core": "npm:^4.0.37" + "@oclif/plugin-help": "npm:^6.2.19" + "@types/node": "npm:^22.10.5" + dotenv: "npm:^17.2.1" + eslint: "npm:^8" + eslint-config-oclif: "npm:^5" + eslint-config-oclif-typescript: "npm:^3" + eslint-config-prettier: "npm:^9" + eslint-plugin-prettier: "npm:^5.2.2" + prettier: "npm:^3.4.2" + typescript: "npm:^5" + languageName: unknown + linkType: soft + "@devnet/key-manager-api@workspace:*, @devnet/key-manager-api@workspace:packages/key-manager-api": version: 0.0.0-use.local resolution: "@devnet/key-manager-api@workspace:packages/key-manager-api" @@ -1011,10 +1100,32 @@ __metadata: languageName: unknown linkType: soft +"@devnet/logger@workspace:*, @devnet/logger@workspace:packages/logger": + version: 0.0.0-use.local + resolution: "@devnet/logger@workspace:packages/logger" + dependencies: + "@devnet/ui": "workspace:*" + "@oclif/core": "npm:^4.0.37" + "@oclif/plugin-help": "npm:^6.2.19" + "@types/node": "npm:^22.10.5" + eslint: "npm:^8" + eslint-config-oclif: "npm:^5" + eslint-config-oclif-typescript: "npm:^3" + eslint-config-prettier: "npm:^9" + eslint-plugin-prettier: "npm:^5.2.2" + prettier: "npm:^3.4.2" + typescript: "npm:^5" + languageName: unknown + linkType: soft + "@devnet/service@workspace:*, @devnet/service@workspace:packages/services": version: 0.0.0-use.local resolution: "@devnet/service@workspace:packages/services" dependencies: + "@devnet/docker": "workspace:*" + "@devnet/logger": "workspace:*" + "@devnet/ui": "workspace:*" + "@devnet/utils": "workspace:*" "@fastify/swagger": "npm:^9.4.2" "@fastify/swagger-ui": "npm:^5.2.1" "@oclif/core": "npm:^4.0.37" @@ -1025,8 +1136,10 @@ __metadata: eslint-config-oclif-typescript: "npm:^3" eslint-config-prettier: "npm:^9" eslint-plugin-prettier: "npm:^5.2.2" + execa: "npm:^9.5.2" prettier: "npm:^3.4.2" typescript: "npm:^5" + viem: "npm:^2.36.0" languageName: unknown linkType: soft @@ -1035,6 +1148,8 @@ __metadata: resolution: "@devnet/state@workspace:packages/state" dependencies: "@devnet/keygen": "workspace:*" + "@devnet/types": "workspace:*" + "@devnet/utils": "workspace:*" "@types/dockerode": "npm:^3.3.34" "@types/node": "npm:^22.10.5" eslint: "npm:^8" @@ -1048,6 +1163,59 @@ __metadata: languageName: unknown linkType: soft +"@devnet/types@workspace:*, @devnet/types@workspace:packages/types": + version: 0.0.0-use.local + resolution: "@devnet/types@workspace:packages/types" + dependencies: + "@oclif/core": "npm:^4.0.37" + "@oclif/plugin-help": "npm:^6.2.19" + "@types/node": "npm:^22.10.5" + dotenv: "npm:^17.2.1" + eslint: "npm:^8" + eslint-config-oclif: "npm:^5" + eslint-config-oclif-typescript: "npm:^3" + eslint-config-prettier: "npm:^9" + eslint-plugin-prettier: "npm:^5.2.2" + prettier: "npm:^3.4.2" + typescript: "npm:^5" + languageName: unknown + linkType: soft + +"@devnet/ui@workspace:*, @devnet/ui@workspace:packages/ui": + version: 0.0.0-use.local + resolution: "@devnet/ui@workspace:packages/ui" + dependencies: + "@oclif/core": "npm:^4.0.37" + "@oclif/plugin-help": "npm:^6.2.19" + "@types/node": "npm:^22.10.5" + eslint: "npm:^8" + eslint-config-oclif: "npm:^5" + eslint-config-oclif-typescript: "npm:^3" + eslint-config-prettier: "npm:^9" + eslint-plugin-prettier: "npm:^5.2.2" + prettier: "npm:^3.4.2" + typescript: "npm:^5" + languageName: unknown + linkType: soft + +"@devnet/utils@workspace:*, @devnet/utils@workspace:packages/utils": + version: 0.0.0-use.local + resolution: "@devnet/utils@workspace:packages/utils" + dependencies: + "@oclif/core": "npm:^4.0.37" + "@oclif/plugin-help": "npm:^6.2.19" + "@types/node": "npm:^22.10.5" + dotenv: "npm:^17.2.1" + eslint: "npm:^8" + eslint-config-oclif: "npm:^5" + eslint-config-oclif-typescript: "npm:^3" + eslint-config-prettier: "npm:^9" + eslint-plugin-prettier: "npm:^5.2.2" + prettier: "npm:^3.4.2" + typescript: "npm:^5" + languageName: unknown + linkType: soft + "@eslint-community/eslint-utils@npm:^4.2.0, @eslint-community/eslint-utils@npm:^4.4.0": version: 4.4.1 resolution: "@eslint-community/eslint-utils@npm:4.4.1" @@ -1602,6 +1770,48 @@ __metadata: languageName: node linkType: hard +"@jsep-plugin/assignment@npm:^1.3.0": + version: 1.3.0 + resolution: "@jsep-plugin/assignment@npm:1.3.0" + peerDependencies: + jsep: ^0.4.0||^1.0.0 + checksum: 10c0/d749554dc691798116eb068eebe2d9bcb0b0d89ef6c7cc7c2a9f37d03da15fdbf8053407e97008090cd1bd6f256ea6c26abbada7399cf79f0b6b502e164b084b + languageName: node + linkType: hard + +"@jsep-plugin/regex@npm:^1.0.4": + version: 1.0.4 + resolution: "@jsep-plugin/regex@npm:1.0.4" + peerDependencies: + jsep: ^0.4.0||^1.0.0 + checksum: 10c0/bec7eb7ea6ab453a2672edc808644c5be3dc06b2a9d77182e18cd595b37deba6dcdb3760849d8684afc5779a86b7d2604dd525cb612a548f9ed9f31a8032ec24 + languageName: node + linkType: hard + +"@kubernetes/client-node@npm:^1.3.0": + version: 1.3.0 + resolution: "@kubernetes/client-node@npm:1.3.0" + dependencies: + "@types/js-yaml": "npm:^4.0.1" + "@types/node": "npm:^22.0.0" + "@types/node-fetch": "npm:^2.6.9" + "@types/stream-buffers": "npm:^3.0.3" + form-data: "npm:^4.0.0" + hpagent: "npm:^1.2.0" + isomorphic-ws: "npm:^5.0.0" + js-yaml: "npm:^4.1.0" + jsonpath-plus: "npm:^10.3.0" + node-fetch: "npm:^2.6.9" + openid-client: "npm:^6.1.3" + rfc4648: "npm:^1.3.0" + socks-proxy-agent: "npm:^8.0.4" + stream-buffers: "npm:^3.0.2" + tar-fs: "npm:^3.0.8" + ws: "npm:^8.18.2" + checksum: 10c0/9cecb4d585594554fd98775fee793c60a8c157112872d921ca94e9f598a3909aedcf05ce9478c8692f46324006b2ef908d96bff3f810cf736feea78b08621bcc + languageName: node + linkType: hard + "@lukeed/csprng@npm:^1.0.0": version: 1.1.0 resolution: "@lukeed/csprng@npm:1.1.0" @@ -1676,6 +1886,13 @@ __metadata: languageName: node linkType: hard +"@noble/ciphers@npm:^1.3.0": + version: 1.3.0 + resolution: "@noble/ciphers@npm:1.3.0" + checksum: 10c0/3ba6da645ce45e2f35e3b2e5c87ceba86b21dfa62b9466ede9edfb397f8116dae284f06652c0cd81d99445a2262b606632e868103d54ecc99fd946ae1af8cd37 + languageName: node + linkType: hard + "@noble/curves@npm:1.2.0": version: 1.2.0 resolution: "@noble/curves@npm:1.2.0" @@ -1694,6 +1911,24 @@ __metadata: languageName: node linkType: hard +"@noble/curves@npm:1.9.6": + version: 1.9.6 + resolution: "@noble/curves@npm:1.9.6" + dependencies: + "@noble/hashes": "npm:1.8.0" + checksum: 10c0/e462875ad752d2cdffc3c7b27b6de3adcff5fae0731e94138bd9e452c5f9b7aaf4c01ea6c62d3c0544b4e7419662535bb2ef1103311de48d51885c053206e118 + languageName: node + linkType: hard + +"@noble/curves@npm:^1.9.1, @noble/curves@npm:~1.9.0": + version: 1.9.7 + resolution: "@noble/curves@npm:1.9.7" + dependencies: + "@noble/hashes": "npm:1.8.0" + checksum: 10c0/150014751ebe8ca06a8654ca2525108452ea9ee0be23430332769f06808cddabfe84f248b6dbf836916bc869c27c2092957eec62c7506d68a1ed0a624017c2a3 + languageName: node + linkType: hard + "@noble/hashes@npm:1.3.2": version: 1.3.2 resolution: "@noble/hashes@npm:1.3.2" @@ -1708,6 +1943,13 @@ __metadata: languageName: node linkType: hard +"@noble/hashes@npm:1.8.0, @noble/hashes@npm:^1.8.0, @noble/hashes@npm:~1.8.0": + version: 1.8.0 + resolution: "@noble/hashes@npm:1.8.0" + checksum: 10c0/06a0b52c81a6fa7f04d67762e08b2c476a00285858150caeaaff4037356dd5e119f45b2a530f638b77a5eeca013168ec1b655db41bae3236cb2e9d511484fc77 + languageName: node + linkType: hard + "@noble/hashes@npm:^1.0.0, @noble/hashes@npm:^1.3.0, @noble/hashes@npm:~1.7.1": version: 1.7.1 resolution: "@noble/hashes@npm:1.7.1" @@ -2043,6 +2285,13 @@ __metadata: languageName: node linkType: hard +"@scure/base@npm:~1.2.5": + version: 1.2.6 + resolution: "@scure/base@npm:1.2.6" + checksum: 10c0/49bd5293371c4e062cb6ba689c8fe3ea3981b7bb9c000400dc4eafa29f56814cdcdd27c04311c2fec34de26bc373c593a1d6ca6d754398a488d587943b7c128a + languageName: node + linkType: hard + "@scure/bip32@npm:1.4.0": version: 1.4.0 resolution: "@scure/bip32@npm:1.4.0" @@ -2054,6 +2303,17 @@ __metadata: languageName: node linkType: hard +"@scure/bip32@npm:1.7.0, @scure/bip32@npm:^1.7.0": + version: 1.7.0 + resolution: "@scure/bip32@npm:1.7.0" + dependencies: + "@noble/curves": "npm:~1.9.0" + "@noble/hashes": "npm:~1.8.0" + "@scure/base": "npm:~1.2.5" + checksum: 10c0/e3d4c1f207df16abcd79babcdb74d36f89bdafc90bf02218a5140cc5cba25821d80d42957c6705f35210cc5769714ea9501d4ae34732cdd1c26c9ff182a219f7 + languageName: node + linkType: hard + "@scure/bip39@npm:1.3.0": version: 1.3.0 resolution: "@scure/bip39@npm:1.3.0" @@ -2064,6 +2324,16 @@ __metadata: languageName: node linkType: hard +"@scure/bip39@npm:1.6.0, @scure/bip39@npm:^1.6.0": + version: 1.6.0 + resolution: "@scure/bip39@npm:1.6.0" + dependencies: + "@noble/hashes": "npm:~1.8.0" + "@scure/base": "npm:~1.2.5" + checksum: 10c0/73a54b5566a50a3f8348a5cfd74d2092efeefc485efbed83d7a7374ffd9a75defddf446e8e5ea0385e4adb49a94b8ae83c5bad3e16333af400e932f7da3aaff8 + languageName: node + linkType: hard + "@scure/bip39@npm:^1.0.0": version: 1.5.4 resolution: "@scure/bip39@npm:1.5.4" @@ -2735,6 +3005,15 @@ __metadata: languageName: node linkType: hard +"@types/bcryptjs@npm:^3.0.0": + version: 3.0.0 + resolution: "@types/bcryptjs@npm:3.0.0" + dependencies: + bcryptjs: "npm:*" + checksum: 10c0/5d61ce381736f8252627cf32f2bbc17003c0361c6cf63ac23034a651b9c1edfdbf8f786730816e0b5257a927ffa369658562183a68495eb07ca2ddc77fbb171c + languageName: node + linkType: hard + "@types/docker-modem@npm:*": version: 3.0.6 resolution: "@types/docker-modem@npm:3.0.6" @@ -2763,6 +3042,13 @@ __metadata: languageName: node linkType: hard +"@types/js-yaml@npm:^4.0.1": + version: 4.0.9 + resolution: "@types/js-yaml@npm:4.0.9" + checksum: 10c0/24de857aa8d61526bbfbbaa383aa538283ad17363fcd5bb5148e2c7f604547db36646440e739d78241ed008702a8920665d1add5618687b6743858fae00da211 + languageName: node + linkType: hard + "@types/json-schema@npm:^7.0.12": version: 7.0.15 resolution: "@types/json-schema@npm:7.0.15" @@ -2786,6 +3072,16 @@ __metadata: languageName: node linkType: hard +"@types/node-fetch@npm:^2.6.9": + version: 2.6.13 + resolution: "@types/node-fetch@npm:2.6.13" + dependencies: + "@types/node": "npm:*" + form-data: "npm:^4.0.4" + checksum: 10c0/6313c89f62c50bd0513a6839cdff0a06727ac5495ccbb2eeda51bb2bbbc4f3c0a76c0393a491b7610af703d3d2deb6cf60e37e59c81ceeca803ffde745dbf309 + languageName: node + linkType: hard + "@types/node@npm:*, @types/node@npm:>=13.7.0, @types/node@npm:^22.10.5": version: 22.10.7 resolution: "@types/node@npm:22.10.7" @@ -2813,6 +3109,15 @@ __metadata: languageName: node linkType: hard +"@types/node@npm:^22.0.0": + version: 22.18.0 + resolution: "@types/node@npm:22.18.0" + dependencies: + undici-types: "npm:~6.21.0" + checksum: 10c0/02cce4493eee8408e66e76fcad164f33c0600ed0854ad08e5519a76a06402da5b589b278cf71bc975c9e014f2668bdf758bc3be7fed63bdbfd0900495372797c + languageName: node + linkType: hard + "@types/node@npm:^22.5.5": version: 22.13.4 resolution: "@types/node@npm:22.13.4" @@ -2845,6 +3150,15 @@ __metadata: languageName: node linkType: hard +"@types/stream-buffers@npm:^3.0.3": + version: 3.0.7 + resolution: "@types/stream-buffers@npm:3.0.7" + dependencies: + "@types/node": "npm:*" + checksum: 10c0/c27f2b698a63aa6c0a4023ac47aad174bd601963d65a419f7422970ec7f946e65ed6920c71540fc8a9c1044642e5da769ea51e370abbc5d614f3ab16ff08529f + languageName: node + linkType: hard + "@types/wrap-ansi@npm:^3.0.0": version: 3.0.0 resolution: "@types/wrap-ansi@npm:3.0.0" @@ -2852,6 +3166,15 @@ __metadata: languageName: node linkType: hard +"@types/ws@npm:^8.18.0": + version: 8.18.1 + resolution: "@types/ws@npm:8.18.1" + dependencies: + "@types/node": "npm:*" + checksum: 10c0/61aff1129143fcc4312f083bc9e9e168aa3026b7dd6e70796276dcfb2c8211c4292603f9c4864fae702f2ed86e4abd4d38aa421831c2fd7f856c931a481afbab + languageName: node + linkType: hard + "@typescript-eslint/eslint-plugin@npm:^6.21.0": version: 6.21.0 resolution: "@typescript-eslint/eslint-plugin@npm:6.21.0" @@ -3049,6 +3372,36 @@ __metadata: languageName: node linkType: hard +"abitype@npm:1.0.8": + version: 1.0.8 + resolution: "abitype@npm:1.0.8" + peerDependencies: + typescript: ">=5.0.4" + zod: ^3 >=3.22.0 + peerDependenciesMeta: + typescript: + optional: true + zod: + optional: true + checksum: 10c0/d3393f32898c1f0f6da4eed2561da6830dcd0d5129a160fae9517214236ee6a6c8e5a0380b8b960c5bc1b949320bcbd015ec7f38b5d7444f8f2b854a1b5dd754 + languageName: node + linkType: hard + +"abitype@npm:^1.0.8": + version: 1.0.9 + resolution: "abitype@npm:1.0.9" + peerDependencies: + typescript: ">=5.0.4" + zod: ^3 >=3.22.0 + peerDependenciesMeta: + typescript: + optional: true + zod: + optional: true + checksum: 10c0/8707fcc80d3edaea14717b0c9ebe15cef1f11825a9c5ffcdbd3ac8e53ae34aaa4c5eb9fb4c352d598d0013fcf71370eeda8eac2fc575f04fa0699a43477ae52b + languageName: node + linkType: hard + "abstract-logging@npm:^2.0.1": version: 2.0.1 resolution: "abstract-logging@npm:2.0.1" @@ -3364,6 +3717,13 @@ __metadata: languageName: node linkType: hard +"b4a@npm:^1.6.4": + version: 1.6.7 + resolution: "b4a@npm:1.6.7" + checksum: 10c0/ec2f004d1daae04be8c5a1f8aeb7fea213c34025e279db4958eb0b82c1729ee25f7c6e89f92a5f65c8a9cf2d017ce27e3dda912403341d1781bd74528a4849d4 + languageName: node + linkType: hard + "balanced-match@npm:^1.0.0": version: 1.0.2 resolution: "balanced-match@npm:1.0.2" @@ -3371,6 +3731,62 @@ __metadata: languageName: node linkType: hard +"bare-events@npm:^2.2.0, bare-events@npm:^2.5.4": + version: 2.6.1 + resolution: "bare-events@npm:2.6.1" + checksum: 10c0/948aabf7380120445f7f7b01bd3911c28ad72a8eaa08f6e308bd470b303593d3639309d1a4e5e5c1ab99503a45a18152f474f065be3698bfe68a27ca21f64e37 + languageName: node + linkType: hard + +"bare-fs@npm:^4.0.1": + version: 4.2.1 + resolution: "bare-fs@npm:4.2.1" + dependencies: + bare-events: "npm:^2.5.4" + bare-path: "npm:^3.0.0" + bare-stream: "npm:^2.6.4" + peerDependencies: + bare-buffer: "*" + peerDependenciesMeta: + bare-buffer: + optional: true + checksum: 10c0/16cb6593b69d277bceb03710533682e8677dd8598ebc757cf406faa1f6178446f534726d845519fc77469ad8d86265e8c9f5b419fd93a8c7e30aacc1722ee05d + languageName: node + linkType: hard + +"bare-os@npm:^3.0.1": + version: 3.6.2 + resolution: "bare-os@npm:3.6.2" + checksum: 10c0/7d917bc202b7efbb6b78658403fac04ae4e91db98d38cbd24037f896a2b1b4f4571d8cd408d12bed6a4c406d6abaf8d03836eacbcc4c75a0b6974e268574fc5a + languageName: node + linkType: hard + +"bare-path@npm:^3.0.0": + version: 3.0.0 + resolution: "bare-path@npm:3.0.0" + dependencies: + bare-os: "npm:^3.0.1" + checksum: 10c0/56a3ca82a9f808f4976cb1188640ac206546ce0ddff582afafc7bd2a6a5b31c3bd16422653aec656eeada2830cfbaa433c6cbf6d6b4d9eba033d5e06d60d9a68 + languageName: node + linkType: hard + +"bare-stream@npm:^2.6.4": + version: 2.7.0 + resolution: "bare-stream@npm:2.7.0" + dependencies: + streamx: "npm:^2.21.0" + peerDependencies: + bare-buffer: "*" + bare-events: "*" + peerDependenciesMeta: + bare-buffer: + optional: true + bare-events: + optional: true + checksum: 10c0/3acd840b7b288dc066226c36446ff605fba2ecce98f1a0ce6aa611b81aabbcd204046a3209bce172373d17eaeaa5b7d35a85649c18ffcb9f2c783242854e99bd + languageName: node + linkType: hard + "base64-js@npm:^1.3.1": version: 1.5.1 resolution: "base64-js@npm:1.5.1" @@ -3394,6 +3810,15 @@ __metadata: languageName: node linkType: hard +"bcryptjs@npm:*, bcryptjs@npm:^3.0.2": + version: 3.0.2 + resolution: "bcryptjs@npm:3.0.2" + bin: + bcrypt: bin/bcrypt + checksum: 10c0/a0923cac99f83e913f8f4e4f42df6a27c6593b24d509900331d1280c4050b1544e602a0ac67b43f7bb5c969991c3ed77fd72f19b7dc873be8ee794da3d925c7e + languageName: node + linkType: hard + "bl@npm:^4.0.3, bl@npm:^4.1.0": version: 4.1.0 resolution: "bl@npm:4.1.0" @@ -3575,6 +4000,19 @@ __metadata: languageName: node linkType: hard +"cdk8s@npm:^2.70.15": + version: 2.70.15 + resolution: "cdk8s@npm:2.70.15" + dependencies: + fast-json-patch: "npm:^3.1.1" + follow-redirects: "npm:^1.15.11" + yaml: "npm:2.8.1" + peerDependencies: + constructs: ^10 + checksum: 10c0/02aef3f434402abe00b219614915a40d9fb9e89caf33e0631e21831efa5488466d3d46916712a89bb4a21a392e5c3952132eb96de401749d29b68bfd6b058954 + languageName: node + linkType: hard + "chalk@npm:4.1.2, chalk@npm:^4, chalk@npm:^4.0.0, chalk@npm:^4.0.2, chalk@npm:^4.1.0, chalk@npm:^4.1.1": version: 4.1.2 resolution: "chalk@npm:4.1.2" @@ -3825,6 +4263,13 @@ __metadata: languageName: node linkType: hard +"constructs@npm:^10.4.2": + version: 10.4.2 + resolution: "constructs@npm:10.4.2" + checksum: 10c0/dcd5edd631c7313964f89fffb7365e1eebaede16cbc9ae69eab5337710353913684b860ccc4b2a3dfaf147656f48f0ae7853ca94cb51833e152b46047ac7a4ff + languageName: node + linkType: hard + "content-disposition@npm:^0.5.4": version: 0.5.4 resolution: "content-disposition@npm:0.5.4" @@ -4118,6 +4563,13 @@ __metadata: languageName: node linkType: hard +"dotenv@npm:^17.2.1": + version: 17.2.1 + resolution: "dotenv@npm:17.2.1" + checksum: 10c0/918dd2f9d8b8f86b0afabad9534793d51de3718c437f9e7b6525628cf68c1d4ae768cc37a5afff38c066f58a8ecf549f4ac6cd5617485bd328e826112cc2650a + languageName: node + linkType: hard + "dunder-proto@npm:^1.0.0, dunder-proto@npm:^1.0.1": version: 1.0.1 resolution: "dunder-proto@npm:1.0.1" @@ -4815,6 +5267,13 @@ __metadata: languageName: node linkType: hard +"eventemitter3@npm:5.0.1": + version: 5.0.1 + resolution: "eventemitter3@npm:5.0.1" + checksum: 10c0/4ba5c00c506e6c786b4d6262cfbce90ddc14c10d4667e5c83ae993c9de88aa856033994dd2b35b83e8dc1170e224e66a319fa80adc4c32adcd2379bbc75da814 + languageName: node + linkType: hard + "execa@npm:^9.5.2": version: 9.5.2 resolution: "execa@npm:9.5.2" @@ -4874,6 +5333,13 @@ __metadata: languageName: node linkType: hard +"fast-fifo@npm:^1.2.0, fast-fifo@npm:^1.3.2": + version: 1.3.2 + resolution: "fast-fifo@npm:1.3.2" + checksum: 10c0/d53f6f786875e8b0529f784b59b4b05d4b5c31c651710496440006a398389a579c8dbcd2081311478b5bf77f4b0b21de69109c5a4eabea9d8e8783d1eb864e4c + languageName: node + linkType: hard + "fast-glob@npm:^3.2.9, fast-glob@npm:^3.3.2": version: 3.3.3 resolution: "fast-glob@npm:3.3.3" @@ -4887,6 +5353,13 @@ __metadata: languageName: node linkType: hard +"fast-json-patch@npm:^3.1.1": + version: 3.1.1 + resolution: "fast-json-patch@npm:3.1.1" + checksum: 10c0/8a0438b4818bb53153275fe5b38033610e8c9d9eb11869e6a7dc05eb92fa70f3caa57015e344eb3ae1e71c7a75ad4cc6bc2dc9e0ff281d6ed8ecd44505210ca8 + languageName: node + linkType: hard + "fast-json-stable-stringify@npm:^2.0.0": version: 2.1.0 resolution: "fast-json-stable-stringify@npm:2.1.0" @@ -5126,6 +5599,16 @@ __metadata: languageName: node linkType: hard +"follow-redirects@npm:^1.15.11": + version: 1.15.11 + resolution: "follow-redirects@npm:1.15.11" + peerDependenciesMeta: + debug: + optional: true + checksum: 10c0/d301f430542520a54058d4aeeb453233c564aaccac835d29d15e050beb33f339ad67d9bddbce01739c5dc46a6716dbe3d9d0d5134b1ca203effa11a7ef092343 + languageName: node + linkType: hard + "follow-redirects@npm:^1.15.6": version: 1.15.9 resolution: "follow-redirects@npm:1.15.9" @@ -5173,6 +5656,26 @@ __metadata: languageName: node linkType: hard +"form-data@npm:^4.0.4": + version: 4.0.4 + resolution: "form-data@npm:4.0.4" + dependencies: + asynckit: "npm:^0.4.0" + combined-stream: "npm:^1.0.8" + es-set-tostringtag: "npm:^2.1.0" + hasown: "npm:^2.0.2" + mime-types: "npm:^2.1.12" + checksum: 10c0/373525a9a034b9d57073e55eab79e501a714ffac02e7a9b01be1c820780652b16e4101819785e1e18f8d98f0aee866cc654d660a435c378e16a72f2e7cac9695 + languageName: node + linkType: hard + +"fp-ts@npm:^2.16.11": + version: 2.16.11 + resolution: "fp-ts@npm:2.16.11" + checksum: 10c0/9a263a577964adbb754221c48449a64a6962c91fa6af136e73b043ee70ee41c7b856fe1e6cc21fb4f0264dd9a75c53bd381b1bb486b6caa99a71f33d1bb24c2a + languageName: node + linkType: hard + "fs-constants@npm:^1.0.0": version: 1.0.0 resolution: "fs-constants@npm:1.0.0" @@ -5596,6 +6099,13 @@ __metadata: languageName: node linkType: hard +"hpagent@npm:^1.2.0": + version: 1.2.0 + resolution: "hpagent@npm:1.2.0" + checksum: 10c0/505ef42e5e067dba701ea21e7df9fa73f6f5080e59d53680829827d34cd7040f1ecf7c3c8391abe9df4eb4682ef4a4321608836b5b70a61b88c1b3a03d77510b + languageName: node + linkType: hard + "http-cache-semantics@npm:^4.1.1": version: 4.1.1 resolution: "http-cache-semantics@npm:4.1.1" @@ -6149,6 +6659,24 @@ __metadata: languageName: node linkType: hard +"isomorphic-ws@npm:^5.0.0": + version: 5.0.0 + resolution: "isomorphic-ws@npm:5.0.0" + peerDependencies: + ws: "*" + checksum: 10c0/a058ac8b5e6efe9e46252cb0bc67fd325005d7216451d1a51238bc62d7da8486f828ef017df54ddf742e0fffcbe4b1bcc2a66cc115b027ed0180334cd18df252 + languageName: node + linkType: hard + +"isows@npm:1.0.7": + version: 1.0.7 + resolution: "isows@npm:1.0.7" + peerDependencies: + ws: "*" + checksum: 10c0/43c41fe89c7c07258d0be3825f87e12da8ac9023c5b5ae6741ec00b2b8169675c04331ea73ef8c172d37a6747066f4dc93947b17cd369f92828a3b3e741afbda + languageName: node + linkType: hard + "iterare@npm:1.2.1": version: 1.2.1 resolution: "iterare@npm:1.2.1" @@ -6192,6 +6720,13 @@ __metadata: languageName: node linkType: hard +"jose@npm:^6.0.12": + version: 6.0.13 + resolution: "jose@npm:6.0.13" + checksum: 10c0/e33510e784a0772718ec83e2a724e3f0f7cf79a8000267819b11c4c7981e2a49e6a2a5aea5261cf23697f4531f7e0dfaecd112e5e95ae3fa2b995fb54b1edc73 + languageName: node + linkType: hard + "js-tokens@npm:^4.0.0": version: 4.0.0 resolution: "js-tokens@npm:4.0.0" @@ -6217,6 +6752,13 @@ __metadata: languageName: node linkType: hard +"jsep@npm:^1.4.0": + version: 1.4.0 + resolution: "jsep@npm:1.4.0" + checksum: 10c0/fe60adf47e050e22eadced42514a51a15a3cf0e2d147896584486acd8ee670fc16641101b9aeb81f4aaba382043d29744b7aac41171e8106515b14f27e0c7116 + languageName: node + linkType: hard + "jsesc@npm:^3.0.2": version: 3.1.0 resolution: "jsesc@npm:3.1.0" @@ -6333,6 +6875,20 @@ __metadata: languageName: node linkType: hard +"jsonpath-plus@npm:^10.3.0": + version: 10.3.0 + resolution: "jsonpath-plus@npm:10.3.0" + dependencies: + "@jsep-plugin/assignment": "npm:^1.3.0" + "@jsep-plugin/regex": "npm:^1.0.4" + jsep: "npm:^1.4.0" + bin: + jsonpath: bin/jsonpath-cli.js + jsonpath-plus: bin/jsonpath-cli.js + checksum: 10c0/f5ff53078ecab98e8afd1dcdb4488e528653fa5a03a32d671f52db1ae9c3236e6e072d75e1949a80929fd21b07603924a586f829b40ad35993fa0247fa4f7506 + languageName: node + linkType: hard + "keyv@npm:^4.5.3": version: 4.5.4 resolution: "keyv@npm:4.5.4" @@ -6357,15 +6913,22 @@ __metadata: resolution: "lido-local-devnet@workspace:." dependencies: "@devnet/command": "workspace:*" + "@devnet/fp": "workspace:*" + "@devnet/k8s": "workspace:*" "@devnet/key-manager-api": "workspace:*" "@devnet/keygen": "workspace:*" + "@devnet/utils": "workspace:*" "@fastify/swagger": "npm:^9.4.2" "@fastify/swagger-ui": "npm:^5.2.1" "@oclif/core": "npm:^4.0.37" "@oclif/plugin-help": "npm:^6.2.19" + "@types/bcryptjs": "npm:^3.0.0" "@types/dockerode": "npm:^3.3.34" "@types/node": "npm:^22.10.5" + bcryptjs: "npm:^3.0.2" + cdk8s: "npm:^2.70.15" chalk: "npm:^5.4.1" + constructs: "npm:^10.4.2" dockerode: "npm:^4.0.4" eslint: "npm:^8" eslint-config-oclif: "npm:^5" @@ -6377,6 +6940,7 @@ __metadata: fastify: "npm:^5.2.1" oclif: "npm:^4.17.30" prettier: "npm:^3.4.2" + ps-list: "npm:^7.2.0" ts-node: "npm:^10.9.2" typescript: "npm:^5" yaml: "npm:^2.6.1" @@ -6843,7 +7407,7 @@ __metadata: languageName: node linkType: hard -"node-fetch@npm:^2.6.1": +"node-fetch@npm:^2.6.1, node-fetch@npm:^2.6.9": version: 2.7.0 resolution: "node-fetch@npm:2.7.0" dependencies: @@ -6928,6 +7492,13 @@ __metadata: languageName: node linkType: hard +"oauth4webapi@npm:^3.7.0": + version: 3.7.0 + resolution: "oauth4webapi@npm:3.7.0" + checksum: 10c0/eb357ceade4e26f0acacd2dc9821d4128d85fe803d853cfc80979cbc2862f1a854743b77f923a08f1e81f4fd2b7823d0c3f3ba196a2bf6a1eff421c6a4f1c430 + languageName: node + linkType: hard + "object-inspect@npm:^1.13.3": version: 1.13.3 resolution: "object-inspect@npm:1.13.3" @@ -7057,6 +7628,16 @@ __metadata: languageName: node linkType: hard +"openid-client@npm:^6.1.3": + version: 6.6.4 + resolution: "openid-client@npm:6.6.4" + dependencies: + jose: "npm:^6.0.12" + oauth4webapi: "npm:^3.7.0" + checksum: 10c0/645cd5f79691acfc84aa92ed64da07823af12a6840676422f6cef3a7ed1baa233bbd6db4affcee9dc4dcb2e8bef74780412c0434e78616fc478281f90f7661cc + languageName: node + linkType: hard + "optionator@npm:^0.9.3": version: 0.9.4 resolution: "optionator@npm:0.9.4" @@ -7106,6 +7687,27 @@ __metadata: languageName: node linkType: hard +"ox@npm:0.9.1": + version: 0.9.1 + resolution: "ox@npm:0.9.1" + dependencies: + "@adraffy/ens-normalize": "npm:^1.11.0" + "@noble/ciphers": "npm:^1.3.0" + "@noble/curves": "npm:^1.9.1" + "@noble/hashes": "npm:^1.8.0" + "@scure/bip32": "npm:^1.7.0" + "@scure/bip39": "npm:^1.6.0" + abitype: "npm:^1.0.8" + eventemitter3: "npm:5.0.1" + peerDependencies: + typescript: ">=5.4.0" + peerDependenciesMeta: + typescript: + optional: true + checksum: 10c0/1678b0cb3e0f1b0986e8a944f64789ca00eb9e3d02fcb4a613f43fa9d75d9fb782f15a18133779fe6095f190fec2fd8be7a3c9df7ecf9c038fb0037ef542256d + languageName: node + linkType: hard + "p-cancelable@npm:^3.0.0": version: 3.0.0 resolution: "p-cancelable@npm:3.0.0" @@ -7513,6 +8115,13 @@ __metadata: languageName: node linkType: hard +"ps-list@npm:^7.2.0": + version: 7.2.0 + resolution: "ps-list@npm:7.2.0" + checksum: 10c0/1c9dd310713a6f3c595acc329c71407069abcc15794fc0bd905de06ebc58af9907e409d7dc357c5d52dfe12e5d598f79f51ee7f9a36417c89f34588886fb57e9 + languageName: node + linkType: hard + "pump@npm:^3.0.0": version: 3.0.2 resolution: "pump@npm:3.0.2" @@ -7787,6 +8396,13 @@ __metadata: languageName: node linkType: hard +"rfc4648@npm:^1.3.0": + version: 1.5.4 + resolution: "rfc4648@npm:1.5.4" + checksum: 10c0/8683e82ed9c3cb23844720d04eaeee12025146bfdfdf250b1cce80d56e16c6431530ba3033cbb0e7ca3a25223107847f14c6cac11a255ea7d219dc7ba11cd43d + languageName: node + linkType: hard + "rfdc@npm:^1.1.4, rfdc@npm:^1.2.0, rfdc@npm:^1.3.1": version: 1.4.1 resolution: "rfdc@npm:1.4.1" @@ -8121,7 +8737,7 @@ __metadata: languageName: node linkType: hard -"socks-proxy-agent@npm:^8.0.3, socks-proxy-agent@npm:^8.0.5": +"socks-proxy-agent@npm:^8.0.3, socks-proxy-agent@npm:^8.0.4, socks-proxy-agent@npm:^8.0.5": version: 8.0.5 resolution: "socks-proxy-agent@npm:8.0.5" dependencies: @@ -8285,6 +8901,27 @@ __metadata: languageName: node linkType: hard +"stream-buffers@npm:^3.0.2": + version: 3.0.3 + resolution: "stream-buffers@npm:3.0.3" + checksum: 10c0/d052e6344fba340b27dfbe8d6568f600b7f81fdc57b2659e82c8d58a3ef855a4852c56736b1078a511a7f4458db96ee89b11c42c96d116b9073a99deb29a6f05 + languageName: node + linkType: hard + +"streamx@npm:^2.15.0, streamx@npm:^2.21.0": + version: 2.22.1 + resolution: "streamx@npm:2.22.1" + dependencies: + bare-events: "npm:^2.2.0" + fast-fifo: "npm:^1.3.2" + text-decoder: "npm:^1.1.0" + dependenciesMeta: + bare-events: + optional: true + checksum: 10c0/b5e489cca78ff23b910e7d58c3e0059e692f93ec401a5974689f2c50c33c9d94f64246a305566ad52cdb818ee583e02e4257b9066fd654cb9f576a9692fdb976 + languageName: node + linkType: hard + "string-width-cjs@npm:string-width@^4.2.0, string-width@npm:^4.0.0, string-width@npm:^4.1.0, string-width@npm:^4.2.0, string-width@npm:^4.2.3": version: 4.2.3 resolution: "string-width@npm:4.2.3" @@ -8451,6 +9088,23 @@ __metadata: languageName: node linkType: hard +"tar-fs@npm:^3.0.8": + version: 3.1.0 + resolution: "tar-fs@npm:3.1.0" + dependencies: + bare-fs: "npm:^4.0.1" + bare-path: "npm:^3.0.0" + pump: "npm:^3.0.0" + tar-stream: "npm:^3.1.5" + dependenciesMeta: + bare-fs: + optional: true + bare-path: + optional: true + checksum: 10c0/760309677543c03fbc253b5ef1ab4bb2ceafb554471b6cbe4930d1633f35662ec26a1414c66fa6754f5aa7e8c65003f73849242f624c322d3dcba7a8888a6915 + languageName: node + linkType: hard + "tar-fs@npm:~2.0.1": version: 2.0.1 resolution: "tar-fs@npm:2.0.1" @@ -8476,6 +9130,17 @@ __metadata: languageName: node linkType: hard +"tar-stream@npm:^3.1.5": + version: 3.1.7 + resolution: "tar-stream@npm:3.1.7" + dependencies: + b4a: "npm:^1.6.4" + fast-fifo: "npm:^1.2.0" + streamx: "npm:^2.15.0" + checksum: 10c0/a09199d21f8714bd729993ac49b6c8efcb808b544b89f23378ad6ffff6d1cb540878614ba9d4cfec11a64ef39e1a6f009a5398371491eb1fda606ffc7f70f718 + languageName: node + linkType: hard + "tar@npm:^7.4.3": version: 7.4.3 resolution: "tar@npm:7.4.3" @@ -8490,6 +9155,15 @@ __metadata: languageName: node linkType: hard +"text-decoder@npm:^1.1.0": + version: 1.2.3 + resolution: "text-decoder@npm:1.2.3" + dependencies: + b4a: "npm:^1.6.4" + checksum: 10c0/569d776b9250158681c83656ef2c3e0a5d5c660c27ca69f87eedef921749a4fbf02095e5f9a0f862a25cf35258379b06e31dee9c125c9f72e273b7ca1a6d1977 + languageName: node + linkType: hard + "text-table@npm:^0.2.0": version: 0.2.0 resolution: "text-table@npm:0.2.0" @@ -8826,6 +9500,13 @@ __metadata: languageName: node linkType: hard +"undici-types@npm:~6.21.0": + version: 6.21.0 + resolution: "undici-types@npm:6.21.0" + checksum: 10c0/c01ed51829b10aa72fc3ce64b747f8e74ae9b60eafa19a7b46ef624403508a54c526ffab06a14a26b3120d055e1104d7abe7c9017e83ced038ea5cf52f8d5e04 + languageName: node + linkType: hard + "unicorn-magic@npm:^0.3.0": version: 0.3.0 resolution: "unicorn-magic@npm:0.3.0" @@ -8941,6 +9622,27 @@ __metadata: languageName: node linkType: hard +"viem@npm:^2.36.0": + version: 2.36.0 + resolution: "viem@npm:2.36.0" + dependencies: + "@noble/curves": "npm:1.9.6" + "@noble/hashes": "npm:1.8.0" + "@scure/bip32": "npm:1.7.0" + "@scure/bip39": "npm:1.6.0" + abitype: "npm:1.0.8" + isows: "npm:1.0.7" + ox: "npm:0.9.1" + ws: "npm:8.18.3" + peerDependencies: + typescript: ">=5.0.4" + peerDependenciesMeta: + typescript: + optional: true + checksum: 10c0/b46e19ffaf85b0e80d168a707d7b295cf58ebefde27cfb8626510dfdf845bcd25c0154a02b488fc545b07397fd880d87543622d2be6973b35ab94012092b512c + languageName: node + linkType: hard + "wcwidth@npm:>=1.0.1, wcwidth@npm:^1.0.1": version: 1.0.1 resolution: "wcwidth@npm:1.0.1" @@ -9127,6 +9829,21 @@ __metadata: languageName: node linkType: hard +"ws@npm:8.18.3, ws@npm:^8.18.2": + version: 8.18.3 + resolution: "ws@npm:8.18.3" + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: ">=5.0.2" + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + checksum: 10c0/eac918213de265ef7cb3d4ca348b891a51a520d839aa51cdb8ca93d4fa7ff9f6ccb339ccee89e4075324097f0a55157c89fa3f7147bde9d8d7e90335dc087b53 + languageName: node + linkType: hard + "y18n@npm:^5.0.5": version: 5.0.8 resolution: "y18n@npm:5.0.8" @@ -9148,6 +9865,15 @@ __metadata: languageName: node linkType: hard +"yaml@npm:2.8.1": + version: 2.8.1 + resolution: "yaml@npm:2.8.1" + bin: + yaml: bin.mjs + checksum: 10c0/7c587be00d9303d2ae1566e03bc5bc7fe978ba0d9bf39cc418c3139d37929dfcb93a230d9749f2cb578b6aa5d9ebebc322415e4b653cb83acd8bc0bc321707f3 + languageName: node + linkType: hard + "yaml@npm:^2.4.1, yaml@npm:^2.4.2, yaml@npm:^2.6.1": version: 2.7.0 resolution: "yaml@npm:2.7.0"