diff --git a/cmd/operator/app/main.go b/cmd/operator/app/main.go
index b67b98e18..cce4c71fb 100644
--- a/cmd/operator/app/main.go
+++ b/cmd/operator/app/main.go
@@ -73,7 +73,7 @@ func Run() {
initClickHouse(ctx)
initClickHouseReconcilerMetricsExporter(ctx)
- initKeeper(ctx)
+ keeperErr := initKeeper(ctx)
var wg sync.WaitGroup
wg.Add(3)
@@ -88,7 +88,17 @@ func Run() {
}()
go func() {
defer wg.Done()
- runKeeper(ctx)
+ if keeperErr == nil {
+ log.Info("Starting keeper")
+ keeperErr = runKeeper(ctx)
+ if keeperErr == nil {
+ log.Info("Starting keeper OK")
+ } else {
+ log.Warning("Starting keeper FAILED with err: %v", keeperErr)
+ }
+ } else {
+ log.Warning("Starting keeper skipped due to failed initialization with err: %v", keeperErr)
+ }
}()
// Wait for completion
diff --git a/cmd/operator/app/thread_keeper.go b/cmd/operator/app/thread_keeper.go
index b72abd20d..eef7f9d41 100644
--- a/cmd/operator/app/thread_keeper.go
+++ b/cmd/operator/app/thread_keeper.go
@@ -2,13 +2,11 @@ package app
import (
"context"
- "os"
"github.com/go-logr/logr"
apps "k8s.io/api/apps/v1"
apiMachineryRuntime "k8s.io/apimachinery/pkg/runtime"
- utilRuntime "k8s.io/apimachinery/pkg/util/runtime"
clientGoScheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
ctrlRuntime "sigs.k8s.io/controller-runtime"
@@ -22,24 +20,28 @@ import (
controller "github.com/altinity/clickhouse-operator/pkg/controller/chk"
)
-var scheme = apiMachineryRuntime.NewScheme()
-
-func init() {
- utilRuntime.Must(clientGoScheme.AddToScheme(scheme))
- utilRuntime.Must(api.AddToScheme(scheme))
-}
-
var (
+ scheme *apiMachineryRuntime.Scheme
manager ctrlRuntime.Manager
logger logr.Logger
)
-func initKeeper(ctx context.Context) {
+func initKeeper(ctx context.Context) error {
+ var err error
+
ctrl.SetLogger(zap.New(zap.UseDevMode(true)))
logger = ctrl.Log.WithName("keeper-runner")
- var err error
+ scheme = apiMachineryRuntime.NewScheme()
+ if err = clientGoScheme.AddToScheme(scheme); err != nil {
+ logger.Error(err, "init keeper - unable to clientGoScheme.AddToScheme")
+ return err
+ }
+ if err = api.AddToScheme(scheme); err != nil {
+ logger.Error(err, "init keeper - unable to api.AddToScheme")
+ return err
+ }
manager, err = ctrlRuntime.NewManager(ctrlRuntime.GetConfigOrDie(), ctrlRuntime.Options{
Scheme: scheme,
@@ -48,7 +50,8 @@ func initKeeper(ctx context.Context) {
},
})
if err != nil {
- os.Exit(1)
+ logger.Error(err, "init keeper - unable to ctrlRuntime.NewManager")
+ return err
}
err = ctrlRuntime.
@@ -62,12 +65,19 @@ func initKeeper(ctx context.Context) {
},
)
if err != nil {
- os.Exit(1)
+ logger.Error(err, "init keeper - unable to ctrlRuntime.NewControllerManagedBy")
+ return err
}
+
+ // Initialization successful
+ return nil
}
-func runKeeper(ctx context.Context) {
+func runKeeper(ctx context.Context) error {
if err := manager.Start(ctx); err != nil {
- os.Exit(1)
+ logger.Error(err, "run keeper - unable to manager.Start")
+ return err
}
+ // Run successful
+ return nil
}
diff --git a/config/config.yaml b/config/config.yaml
index 2d55947ab..cb972728b 100644
--- a/config/config.yaml
+++ b/config/config.yaml
@@ -99,22 +99,33 @@ clickhouse:
# ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
- version: "*"
rules:
+ # see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
+ # to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
+
- settings/*: "yes"
+
+ # single values
+ - settings/access_control_path: "no"
- settings/dictionaries_config: "no"
- - settings/logger: "no"
- - settings/macros/*: "no"
- settings/max_server_memory_*: "no"
- settings/max_*_to_drop: "no"
- settings/max_concurrent_queries: "no"
- settings/models_config: "no"
- settings/user_defined_executable_functions_config: "no"
+ # structured XML
+ - settings/logger/*: "no"
+ - settings/macros/*: "no"
+ - settings/remote_servers/*: "no"
+ - settings/user_directories/*: "no"
+
- zookeeper/*: "yes"
- files/*.xml: "yes"
- files/config.d/*.xml: "yes"
- files/config.d/*dict*.xml: "no"
+ # exceptions in default profile
- profiles/default/background_*_pool_size: "yes"
- profiles/default/max_*_for_server: "yes"
- version: "21.*"
diff --git a/deploy/builder/templates-config/config.yaml b/deploy/builder/templates-config/config.yaml
index 45b232695..8735d9998 100644
--- a/deploy/builder/templates-config/config.yaml
+++ b/deploy/builder/templates-config/config.yaml
@@ -93,22 +93,33 @@ clickhouse:
# ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
- version: "*"
rules:
+ # see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
+ # to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
+
- settings/*: "yes"
+
+ # single values
+ - settings/access_control_path: "no"
- settings/dictionaries_config: "no"
- - settings/logger: "no"
- - settings/macros/*: "no"
- settings/max_server_memory_*: "no"
- settings/max_*_to_drop: "no"
- settings/max_concurrent_queries: "no"
- settings/models_config: "no"
- settings/user_defined_executable_functions_config: "no"
+ # structured XML
+ - settings/logger/*: "no"
+ - settings/macros/*: "no"
+ - settings/remote_servers/*: "no"
+ - settings/user_directories/*: "no"
+
- zookeeper/*: "yes"
- files/*.xml: "yes"
- files/config.d/*.xml: "yes"
- files/config.d/*dict*.xml: "no"
+ # exceptions in default profile
- profiles/default/background_*_pool_size: "yes"
- profiles/default/max_*_for_server: "yes"
- version: "21.*"
diff --git a/deploy/helm/Chart.yaml b/deploy/helm/Chart.yaml
index 6b89ea834..a0e47af07 100644
--- a/deploy/helm/Chart.yaml
+++ b/deploy/helm/Chart.yaml
@@ -12,8 +12,8 @@ description: |-
kubectl apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
```
type: application
-version: 0.23.3
-appVersion: 0.23.3
+version: 0.23.4
+appVersion: 0.23.4
home: https://github.com/Altinity/clickhouse-operator
icon: https://logosandtypes.com/wp-content/uploads/2020/12/altinity.svg
maintainers:
diff --git a/deploy/helm/README.md b/deploy/helm/README.md
index 320d6ad52..d81cc7ca5 100644
--- a/deploy/helm/README.md
+++ b/deploy/helm/README.md
@@ -1,6 +1,6 @@
# altinity-clickhouse-operator
-![Version: 0.23.3](https://img.shields.io/badge/Version-0.23.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.23.3](https://img.shields.io/badge/AppVersion-0.23.3-informational?style=flat-square)
+![Version: 0.23.4](https://img.shields.io/badge/Version-0.23.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.23.4](https://img.shields.io/badge/AppVersion-0.23.4-informational?style=flat-square)
Helm chart to deploy [altinity-clickhouse-operator](https://github.com/Altinity/clickhouse-operator).
@@ -50,6 +50,7 @@ For upgrade please install CRDs separately:
| operator.image.tag | string | `""` | image tag (chart's appVersion value will be used if not set) |
| operator.resources | object | `{}` | custom resource configuration, look `kubectl explain pod.spec.containers.resources` for details |
| podAnnotations | object | `{"clickhouse-operator-metrics/port":"9999","clickhouse-operator-metrics/scrape":"true","prometheus.io/port":"8888","prometheus.io/scrape":"true"}` | annotations to add to the clickhouse-operator pod, look `kubectl explain pod.spec.annotations` for details |
+| podLabels | object | `{}` | labels to add to the clickhouse-operator pod |
| podSecurityContext | object | `{}` | |
| rbac.create | bool | `true` | specifies whether cluster roles and cluster role bindings should be created |
| secret.create | bool | `true` | create a secret with operator credentials |
diff --git a/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml b/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml
index 093e55fc9..f3eff2751 100644
--- a/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml
+++ b/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
diff --git a/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml b/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml
index 837b2f286..d8ef8ba5e 100644
--- a/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml
+++ b/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
diff --git a/deploy/helm/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml b/deploy/helm/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml
index 1c9f55dd2..07fdeca31 100644
--- a/deploy/helm/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml
+++ b/deploy/helm/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml
@@ -1,13 +1,13 @@
# Template Parameters:
#
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.23.3
+ clickhouse-keeper.altinity.com/chop: 0.23.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
diff --git a/deploy/helm/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml b/deploy/helm/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
index 4f9697edb..b53ef91d1 100644
--- a/deploy/helm/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
+++ b/deploy/helm/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
@@ -7,7 +7,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
diff --git a/deploy/helm/templates/_helpers.tpl b/deploy/helm/templates/_helpers.tpl
index 9d6fc81fa..ebac8ebf7 100644
--- a/deploy/helm/templates/_helpers.tpl
+++ b/deploy/helm/templates/_helpers.tpl
@@ -40,6 +40,9 @@ helm.sh/chart: {{ include "altinity-clickhouse-operator.chart" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
+{{- if .Values.podLabels }}
+{{ toYaml .Values.podLabels }}
+{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
diff --git a/deploy/helm/templates/generated/Deployment-clickhouse-operator.yaml b/deploy/helm/templates/generated/Deployment-clickhouse-operator.yaml
index bb60facfc..03b52fc56 100644
--- a/deploy/helm/templates/generated/Deployment-clickhouse-operator.yaml
+++ b/deploy/helm/templates/generated/Deployment-clickhouse-operator.yaml
@@ -2,9 +2,9 @@
#
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.23.3
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.23.4
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.23.3
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.23.4
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -21,7 +21,7 @@ spec:
matchLabels: {{ include "altinity-clickhouse-operator.selectorLabels" . | nindent 6 }}
template:
metadata:
- labels: {{ include "altinity-clickhouse-operator.selectorLabels" . | nindent 8 }}
+ labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 8 }}
annotations:
{{ toYaml .Values.podAnnotations | nindent 8 }}
checksum/files: {{ include (print $.Template.BasePath "/generated/ConfigMap-etc-clickhouse-operator-files.yaml") . | sha256sum }}
diff --git a/deploy/helm/templates/generated/Secret-clickhouse-operator.yaml b/deploy/helm/templates/generated/Secret-clickhouse-operator.yaml
index 2765c5155..a5f68b9cc 100644
--- a/deploy/helm/templates/generated/Secret-clickhouse-operator.yaml
+++ b/deploy/helm/templates/generated/Secret-clickhouse-operator.yaml
@@ -3,7 +3,7 @@
# Template parameters available:
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
diff --git a/deploy/helm/values.yaml b/deploy/helm/values.yaml
index 59e44140e..fbbf89724 100644
--- a/deploy/helm/values.yaml
+++ b/deploy/helm/values.yaml
@@ -45,6 +45,8 @@ metrics:
# possible value format [{"name":"your-secret-name"}]
# look `kubectl explain pod.spec.imagePullSecrets` for details
imagePullSecrets: []
+# podLabels -- labels to add to the clickhouse-operator pod
+podLabels: {}
# podAnnotations -- annotations to add to the clickhouse-operator pod, look `kubectl explain pod.spec.annotations` for details
podAnnotations:
prometheus.io/port: '8888'
@@ -269,19 +271,27 @@ configs:
# ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
- version: "*"
rules:
+ # see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
+ # to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
- settings/*: "yes"
+ # single values
+ - settings/access_control_path: "no"
- settings/dictionaries_config: "no"
- - settings/logger: "no"
- - settings/macros/*: "no"
- settings/max_server_memory_*: "no"
- settings/max_*_to_drop: "no"
- settings/max_concurrent_queries: "no"
- settings/models_config: "no"
- settings/user_defined_executable_functions_config: "no"
+ # structured XML
+ - settings/logger/*: "no"
+ - settings/macros/*: "no"
+ - settings/remote_servers/*: "no"
+ - settings/user_directories/*: "no"
- zookeeper/*: "yes"
- files/*.xml: "yes"
- files/config.d/*.xml: "yes"
- files/config.d/*dict*.xml: "no"
+ # exceptions in default profile
- profiles/default/background_*_pool_size: "yes"
- profiles/default/max_*_for_server: "yes"
- version: "21.*"
diff --git a/deploy/operator/clickhouse-operator-install-ansible.yaml b/deploy/operator/clickhouse-operator-install-ansible.yaml
index dd32f4bad..f84a2c198 100644
--- a/deploy/operator/clickhouse-operator-install-ansible.yaml
+++ b/deploy/operator/clickhouse-operator-install-ansible.yaml
@@ -11,14 +11,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1237,14 +1237,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2466,7 +2466,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2881,14 +2881,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.23.3
+ clickhouse-keeper.altinity.com/chop: 0.23.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3157,7 +3157,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
---
# Template Parameters:
#
@@ -3183,7 +3183,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
rules:
#
@@ -3402,7 +3402,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -3424,7 +3424,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
config.yaml: |
@@ -3529,22 +3529,33 @@ data:
# ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
- version: "*"
rules:
+ # see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
+ # to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
+
- settings/*: "yes"
+
+ # single values
+ - settings/access_control_path: "no"
- settings/dictionaries_config: "no"
- - settings/logger: "no"
- - settings/macros/*: "no"
- settings/max_server_memory_*: "no"
- settings/max_*_to_drop: "no"
- settings/max_concurrent_queries: "no"
- settings/models_config: "no"
- settings/user_defined_executable_functions_config: "no"
+ # structured XML
+ - settings/logger/*: "no"
+ - settings/macros/*: "no"
+ - settings/remote_servers/*: "no"
+ - settings/user_directories/*: "no"
+
- zookeeper/*: "yes"
- files/*.xml: "yes"
- files/config.d/*.xml: "yes"
- files/config.d/*dict*.xml: "no"
+ # exceptions in default profile
- profiles/default/background_*_pool_size: "yes"
- profiles/default/max_*_for_server: "yes"
- version: "21.*"
@@ -3785,7 +3796,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
---
@@ -3801,7 +3812,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -3900,7 +3911,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -4000,7 +4011,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -4055,7 +4066,7 @@ data:
# Template parameters available:
# NAMESPACE={{ namespace }}
# COMMENT=
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN={{ password }}
#
@@ -4065,7 +4076,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
type: Opaque
stringData:
@@ -4076,9 +4087,9 @@ stringData:
#
# NAMESPACE={{ namespace }}
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.23.3
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.23.4
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.23.3
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.23.4
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -4089,7 +4100,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
spec:
replicas: 1
@@ -4125,7 +4136,7 @@ spec:
name: etc-clickhouse-operator-usersd-files
containers:
- name: clickhouse-operator
- image: altinity/clickhouse-operator:0.23.3
+ image: altinity/clickhouse-operator:0.23.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -4193,7 +4204,7 @@ spec:
name: metrics
- name: metrics-exporter
- image: altinity/metrics-exporter:0.23.3
+ image: altinity/metrics-exporter:0.23.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -4276,7 +4287,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml
index 1a321fc87..dd57a874a 100644
--- a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml
+++ b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1220,14 +1220,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2437,7 +2437,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2842,14 +2842,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.23.3
+ clickhouse-keeper.altinity.com/chop: 0.23.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3117,7 +3117,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
# Template Parameters:
#
@@ -3142,7 +3142,7 @@ metadata:
name: clickhouse-operator-kube-system
#namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
rules:
#
# Core API group
@@ -3351,7 +3351,7 @@ metadata:
name: clickhouse-operator-kube-system
#namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -3373,7 +3373,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
config.yaml: |
@@ -3478,22 +3478,33 @@ data:
# ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
- version: "*"
rules:
+ # see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
+ # to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
+
- settings/*: "yes"
+
+ # single values
+ - settings/access_control_path: "no"
- settings/dictionaries_config: "no"
- - settings/logger: "no"
- - settings/macros/*: "no"
- settings/max_server_memory_*: "no"
- settings/max_*_to_drop: "no"
- settings/max_concurrent_queries: "no"
- settings/models_config: "no"
- settings/user_defined_executable_functions_config: "no"
+ # structured XML
+ - settings/logger/*: "no"
+ - settings/macros/*: "no"
+ - settings/remote_servers/*: "no"
+ - settings/user_directories/*: "no"
+
- zookeeper/*: "yes"
- files/*.xml: "yes"
- files/config.d/*.xml: "yes"
- files/config.d/*dict*.xml: "no"
+ # exceptions in default profile
- profiles/default/background_*_pool_size: "yes"
- profiles/default/max_*_for_server: "yes"
- version: "21.*"
@@ -3733,7 +3744,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
---
@@ -3749,7 +3760,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -3843,7 +3854,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -3941,7 +3952,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -3995,7 +4006,7 @@ data:
# Template parameters available:
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
@@ -4005,7 +4016,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
type: Opaque
stringData:
@@ -4016,9 +4027,9 @@ stringData:
#
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.23.3
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.23.4
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.23.3
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.23.4
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -4029,7 +4040,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
spec:
replicas: 1
@@ -4065,7 +4076,7 @@ spec:
name: etc-clickhouse-operator-usersd-files
containers:
- name: clickhouse-operator
- image: altinity/clickhouse-operator:0.23.3
+ image: altinity/clickhouse-operator:0.23.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -4131,7 +4142,7 @@ spec:
- containerPort: 9999
name: metrics
- name: metrics-exporter
- image: altinity/metrics-exporter:0.23.3
+ image: altinity/metrics-exporter:0.23.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -4213,7 +4224,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-bundle.yaml b/deploy/operator/clickhouse-operator-install-bundle.yaml
index 3664f18e2..f07a1c75f 100644
--- a/deploy/operator/clickhouse-operator-install-bundle.yaml
+++ b/deploy/operator/clickhouse-operator-install-bundle.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1230,14 +1230,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2459,7 +2459,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2874,14 +2874,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.23.3
+ clickhouse-keeper.altinity.com/chop: 0.23.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3150,7 +3150,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
---
# Template Parameters:
#
@@ -3176,7 +3176,7 @@ metadata:
name: clickhouse-operator-kube-system
#namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
rules:
#
@@ -3395,7 +3395,7 @@ metadata:
name: clickhouse-operator-kube-system
#namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -3417,7 +3417,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
config.yaml: |
@@ -3522,22 +3522,33 @@ data:
# ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
- version: "*"
rules:
+ # see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
+ # to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
+
- settings/*: "yes"
+
+ # single values
+ - settings/access_control_path: "no"
- settings/dictionaries_config: "no"
- - settings/logger: "no"
- - settings/macros/*: "no"
- settings/max_server_memory_*: "no"
- settings/max_*_to_drop: "no"
- settings/max_concurrent_queries: "no"
- settings/models_config: "no"
- settings/user_defined_executable_functions_config: "no"
+ # structured XML
+ - settings/logger/*: "no"
+ - settings/macros/*: "no"
+ - settings/remote_servers/*: "no"
+ - settings/user_directories/*: "no"
+
- zookeeper/*: "yes"
- files/*.xml: "yes"
- files/config.d/*.xml: "yes"
- files/config.d/*dict*.xml: "no"
+ # exceptions in default profile
- profiles/default/background_*_pool_size: "yes"
- profiles/default/max_*_for_server: "yes"
- version: "21.*"
@@ -3778,7 +3789,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
---
@@ -3794,7 +3805,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -3893,7 +3904,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -3993,7 +4004,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -4048,7 +4059,7 @@ data:
# Template parameters available:
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
@@ -4058,7 +4069,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
type: Opaque
stringData:
@@ -4069,9 +4080,9 @@ stringData:
#
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.23.3
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.23.4
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.23.3
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.23.4
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -4082,7 +4093,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
spec:
replicas: 1
@@ -4118,7 +4129,7 @@ spec:
name: etc-clickhouse-operator-usersd-files
containers:
- name: clickhouse-operator
- image: altinity/clickhouse-operator:0.23.3
+ image: altinity/clickhouse-operator:0.23.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -4186,7 +4197,7 @@ spec:
name: metrics
- name: metrics-exporter
- image: altinity/metrics-exporter:0.23.3
+ image: altinity/metrics-exporter:0.23.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -4269,7 +4280,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml
index 18553eab8..e07ac0848 100644
--- a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml
+++ b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1220,14 +1220,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2437,7 +2437,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2842,14 +2842,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.23.3
+ clickhouse-keeper.altinity.com/chop: 0.23.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3117,7 +3117,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
# Template Parameters:
#
@@ -3142,7 +3142,7 @@ metadata:
name: clickhouse-operator-${OPERATOR_NAMESPACE}
#namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
rules:
#
# Core API group
@@ -3351,7 +3351,7 @@ metadata:
name: clickhouse-operator-${OPERATOR_NAMESPACE}
#namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -3373,7 +3373,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
config.yaml: |
@@ -3478,22 +3478,33 @@ data:
# ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
- version: "*"
rules:
+ # see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
+ # to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
+
- settings/*: "yes"
+
+ # single values
+ - settings/access_control_path: "no"
- settings/dictionaries_config: "no"
- - settings/logger: "no"
- - settings/macros/*: "no"
- settings/max_server_memory_*: "no"
- settings/max_*_to_drop: "no"
- settings/max_concurrent_queries: "no"
- settings/models_config: "no"
- settings/user_defined_executable_functions_config: "no"
+ # structured XML
+ - settings/logger/*: "no"
+ - settings/macros/*: "no"
+ - settings/remote_servers/*: "no"
+ - settings/user_directories/*: "no"
+
- zookeeper/*: "yes"
- files/*.xml: "yes"
- files/config.d/*.xml: "yes"
- files/config.d/*dict*.xml: "no"
+ # exceptions in default profile
- profiles/default/background_*_pool_size: "yes"
- profiles/default/max_*_for_server: "yes"
- version: "21.*"
@@ -3733,7 +3744,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
---
@@ -3749,7 +3760,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -3843,7 +3854,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -3941,7 +3952,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -3995,7 +4006,7 @@ data:
# Template parameters available:
# NAMESPACE=${OPERATOR_NAMESPACE}
# COMMENT=
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
@@ -4005,7 +4016,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
type: Opaque
stringData:
@@ -4029,7 +4040,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
spec:
replicas: 1
@@ -4213,7 +4224,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml
index a93366ed6..e81406fc2 100644
--- a/deploy/operator/clickhouse-operator-install-template.yaml
+++ b/deploy/operator/clickhouse-operator-install-template.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1230,14 +1230,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2459,7 +2459,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2874,14 +2874,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.23.3
+ clickhouse-keeper.altinity.com/chop: 0.23.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3150,7 +3150,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
---
# Template Parameters:
#
@@ -3176,7 +3176,7 @@ metadata:
name: clickhouse-operator-${OPERATOR_NAMESPACE}
#namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
rules:
#
@@ -3395,7 +3395,7 @@ metadata:
name: clickhouse-operator-${OPERATOR_NAMESPACE}
#namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -3417,7 +3417,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
config.yaml: |
@@ -3522,22 +3522,33 @@ data:
# ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
- version: "*"
rules:
+ # see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
+ # to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
+
- settings/*: "yes"
+
+ # single values
+ - settings/access_control_path: "no"
- settings/dictionaries_config: "no"
- - settings/logger: "no"
- - settings/macros/*: "no"
- settings/max_server_memory_*: "no"
- settings/max_*_to_drop: "no"
- settings/max_concurrent_queries: "no"
- settings/models_config: "no"
- settings/user_defined_executable_functions_config: "no"
+ # structured XML
+ - settings/logger/*: "no"
+ - settings/macros/*: "no"
+ - settings/remote_servers/*: "no"
+ - settings/user_directories/*: "no"
+
- zookeeper/*: "yes"
- files/*.xml: "yes"
- files/config.d/*.xml: "yes"
- files/config.d/*dict*.xml: "no"
+ # exceptions in default profile
- profiles/default/background_*_pool_size: "yes"
- profiles/default/max_*_for_server: "yes"
- version: "21.*"
@@ -3778,7 +3789,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
---
@@ -3794,7 +3805,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -3893,7 +3904,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -3993,7 +4004,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -4048,7 +4059,7 @@ data:
# Template parameters available:
# NAMESPACE=${OPERATOR_NAMESPACE}
# COMMENT=
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
@@ -4058,7 +4069,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
type: Opaque
stringData:
@@ -4082,7 +4093,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
spec:
replicas: 1
@@ -4269,7 +4280,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-tf.yaml b/deploy/operator/clickhouse-operator-install-tf.yaml
index 17ad2b28d..9623fca78 100644
--- a/deploy/operator/clickhouse-operator-install-tf.yaml
+++ b/deploy/operator/clickhouse-operator-install-tf.yaml
@@ -11,14 +11,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1237,14 +1237,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2466,7 +2466,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2881,14 +2881,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.23.3
+ clickhouse-keeper.altinity.com/chop: 0.23.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3157,7 +3157,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
---
# Template Parameters:
#
@@ -3183,7 +3183,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
rules:
#
@@ -3402,7 +3402,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -3424,7 +3424,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
config.yaml: |
@@ -3529,22 +3529,33 @@ data:
# ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
- version: "*"
rules:
+ # see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
+ # to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
+
- settings/*: "yes"
+
+ # single values
+ - settings/access_control_path: "no"
- settings/dictionaries_config: "no"
- - settings/logger: "no"
- - settings/macros/*: "no"
- settings/max_server_memory_*: "no"
- settings/max_*_to_drop: "no"
- settings/max_concurrent_queries: "no"
- settings/models_config: "no"
- settings/user_defined_executable_functions_config: "no"
+ # structured XML
+ - settings/logger/*: "no"
+ - settings/macros/*: "no"
+ - settings/remote_servers/*: "no"
+ - settings/user_directories/*: "no"
+
- zookeeper/*: "yes"
- files/*.xml: "yes"
- files/config.d/*.xml: "yes"
- files/config.d/*dict*.xml: "no"
+ # exceptions in default profile
- profiles/default/background_*_pool_size: "yes"
- profiles/default/max_*_for_server: "yes"
- version: "21.*"
@@ -3785,7 +3796,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
---
@@ -3801,7 +3812,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -3900,7 +3911,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -4000,7 +4011,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -4055,7 +4066,7 @@ data:
# Template parameters available:
# NAMESPACE=${namespace}
# COMMENT=
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=${password}
#
@@ -4065,7 +4076,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
type: Opaque
stringData:
@@ -4076,9 +4087,9 @@ stringData:
#
# NAMESPACE=${namespace}
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.23.3
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.23.4
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.23.3
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.23.4
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -4089,7 +4100,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
spec:
replicas: 1
@@ -4125,7 +4136,7 @@ spec:
name: etc-clickhouse-operator-usersd-files
containers:
- name: clickhouse-operator
- image: altinity/clickhouse-operator:0.23.3
+ image: altinity/clickhouse-operator:0.23.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -4193,7 +4204,7 @@ spec:
name: metrics
- name: metrics-exporter
- image: altinity/metrics-exporter:0.23.3
+ image: altinity/metrics-exporter:0.23.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -4276,7 +4287,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/parts/crd.yaml b/deploy/operator/parts/crd.yaml
index d8e426320..8887b99d1 100644
--- a/deploy/operator/parts/crd.yaml
+++ b/deploy/operator/parts/crd.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1230,14 +1230,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2459,7 +2459,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.23.3
+ clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2874,14 +2874,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.23.3
+# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.23.3
+ clickhouse-keeper.altinity.com/chop: 0.23.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
diff --git a/deploy/operatorhub/0.23.4/clickhouse-operator.v0.23.4.clusterserviceversion.yaml b/deploy/operatorhub/0.23.4/clickhouse-operator.v0.23.4.clusterserviceversion.yaml
new file mode 100644
index 000000000..981669fc3
--- /dev/null
+++ b/deploy/operatorhub/0.23.4/clickhouse-operator.v0.23.4.clusterserviceversion.yaml
@@ -0,0 +1,1636 @@
+apiVersion: operators.coreos.com/v1alpha1
+kind: ClusterServiceVersion
+metadata:
+ name: clickhouse-operator.v0.23.4
+ namespace: placeholder
+ annotations:
+ capabilities: Full Lifecycle
+ categories: Database
+ containerImage: docker.io/altinity/clickhouse-operator:0.23.4
+ createdAt: '2024-03-21T17:20:00Z'
+ support: Altinity Ltd. https://altinity.com
+ description: ClickHouse Operator manages full lifecycle of ClickHouse clusters.
+ repository: https://github.com/altinity/clickhouse-operator
+ certified: 'false'
+ alm-examples: |
+ [
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseInstallation",
+ "metadata": {
+ "name": "simple-01"
+ },
+ "spec": {
+ "configuration": {
+ "users": {
+ "test_user/password_sha256_hex": "10a6e6cc8311a3e2bcc09bf6c199adecd5dd59408c343e926b129c4914f3cb01",
+ "test_user/password": "test_password",
+ "test_user/networks/ip": [
+ "0.0.0.0/0"
+ ]
+ },
+ "clusters": [
+ {
+ "name": "simple"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseInstallation",
+ "metadata": {
+ "name": "use-templates-all",
+ "labels": {
+ "target-chi-label-manual": "target-chi-label-manual-value",
+ "target-chi-label-auto": "target-chi-label-auto-value"
+ }
+ },
+ "spec": {
+ "useTemplates": [
+ {
+ "name": "chit-01"
+ },
+ {
+ "name": "chit-02"
+ }
+ ],
+ "configuration": {
+ "clusters": [
+ {
+ "name": "c1"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseOperatorConfiguration",
+ "metadata": {
+ "name": "chop-config-01"
+ },
+ "spec": {
+ "watch": {
+ "namespaces": []
+ },
+ "clickhouse": {
+ "configuration": {
+ "file": {
+ "path": {
+ "common": "config.d",
+ "host": "conf.d",
+ "user": "users.d"
+ }
+ },
+ "user": {
+ "default": {
+ "profile": "default",
+ "quota": "default",
+ "networksIP": [
+ "::1",
+ "127.0.0.1"
+ ],
+ "password": "default"
+ }
+ },
+ "network": {
+ "hostRegexpTemplate": "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$"
+ }
+ },
+ "access": {
+ "username": "clickhouse_operator",
+ "password": "clickhouse_operator_password",
+ "secret": {
+ "namespace": "",
+ "name": ""
+ },
+ "port": 8123
+ }
+ },
+ "template": {
+ "chi": {
+ "path": "templates.d"
+ }
+ },
+ "reconcile": {
+ "runtime": {
+ "reconcileCHIsThreadsNumber": 10,
+ "reconcileShardsThreadsNumber": 1,
+ "reconcileShardsMaxConcurrencyPercent": 50
+ },
+ "statefulSet": {
+ "create": {
+ "onFailure": "ignore"
+ },
+ "update": {
+ "timeout": 300,
+ "pollInterval": 5,
+ "onFailure": "rollback"
+ }
+ },
+ "host": {
+ "wait": {
+ "exclude": "true",
+ "include": "false"
+ }
+ }
+ },
+ "annotation": {
+ "include": [],
+ "exclude": []
+ },
+ "label": {
+ "include": [],
+ "exclude": [],
+ "appendScope": "no"
+ },
+ "statefulSet": {
+ "revisionHistoryLimit": 0
+ },
+ "pod": {
+ "terminationGracePeriod": 30
+ },
+ "logger": {
+ "logtostderr": "true",
+ "alsologtostderr": "false",
+ "v": "1",
+ "stderrthreshold": "",
+ "vmodule": "",
+ "log_backtrace_at": ""
+ }
+ }
+ }
+ ]
+spec:
+ version: 0.23.4
+ minKubeVersion: 1.12.6
+ maturity: alpha
+ replaces: clickhouse-operator.v0.23.3
+ maintainers:
+ - email: support@altinity.com
+ name: Altinity
+ provider:
+ name: Altinity
+ displayName: Altinity Operator for ClickHouse
+ keywords:
+ - "clickhouse"
+ - "database"
+ - "oltp"
+ - "timeseries"
+ - "time series"
+ - "altinity"
+ customresourcedefinitions:
+ owned:
+ - description: ClickHouse Installation - set of ClickHouse Clusters
+ displayName: ClickHouseInstallation
+ group: clickhouse.altinity.com
+ kind: ClickHouseInstallation
+ name: clickhouseinstallations.clickhouse.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ - description: ClickHouse Installation Template - template for ClickHouse Installation
+ displayName: ClickHouseInstallationTemplate
+ group: clickhouse.altinity.com
+ kind: ClickHouseInstallationTemplate
+ name: clickhouseinstallationtemplates.clickhouse.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ - description: ClickHouse Operator Configuration - configuration of ClickHouse operator
+ displayName: ClickHouseOperatorConfiguration
+ group: clickhouse.altinity.com
+ kind: ClickHouseOperatorConfiguration
+ name: clickhouseoperatorconfigurations.clickhouse.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ - description: ClickHouse Keeper Installation - ClickHouse Keeper cluster instance
+ displayName: ClickHouseKeeperInstallation
+ group: clickhouse-keeper.altinity.com
+ kind: ClickHouseKeeperInstallation
+ name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ description: |-
+ ## ClickHouse
+ [ClickHouse](https://clickhouse.yandex) is an open source column-oriented database management system capable of real time generation of analytical data reports.
+ Check [ClickHouse documentation](https://clickhouse.yandex/docs/en) for more complete details.
+ ## The Altinity Operator for ClickHouse
+ The [Altinity Operator for ClickHouse](https://github.com/altinity/clickhouse-operator) automates the creation, alteration, or deletion of nodes in your ClickHouse cluster environment.
+ Check [operator documentation](https://github.com/Altinity/clickhouse-operator/tree/master/docs) for complete details and examples.
+ links:
+ - name: Altinity
+ url: https://altinity.com/
+ - name: Operator homepage
+ url: https://www.altinity.com/kubernetes-operator
+ - name: Github
+ url: https://github.com/altinity/clickhouse-operator
+ - name: Documentation
+ url: https://github.com/Altinity/clickhouse-operator/tree/master/docs
+ icon:
+ - mediatype: image/png
+ base64data: |-
+ iVBORw0KGgoAAAANSUhEUgAAASwAAAEsCAYAAAB5fY51AAAAAXNSR0IArs4c6QAAQABJREFUeAHs
+ vQmgZ2lVH3j/r6p676abpSNLE2TrRlwSQBoVtVFHiUGFLKOEKCQTTUzGmTExhmhiSJBoTMZEs2KQ
+ BsWNREWdyCTOGOMEQZbI0i0NyCaIxAB203tXvfef33LO+c697/+qqqu3V1Xvvq773fOd3/md5Tvf
+ 9+57/erVajq4DiqwoQLnvf4PH3Pe4ekx29P0oNVqfTEgl6xWE8eLV6uVxgnjCjLHab0TI+ZW00PW
+ 6zWgq0+up/XNkG+edjiubt6Zppu31tOnd1brm7fwvL1e3by1Bf165+b1+vDN02r7Jhh+6MZnXfYh
+ jAfXQQVmFUC/HVxnbQV+aX3Bg875xJOOracrD21NV00705U4XK6aVtMTVqut81WX6BAcNpOOIIye
+ x4iJFMfDhmry8CIwRh5mZBHfppH61XT7znp672pavQeH3g07W1s3rLbX77npQQ+6YXra6rYNXg6m
+ zoIKRPedBZmerSmu16vzfvnGRx9a3XXlar2+CscEDqXVVTgSrtzaWj2SZRmHURRJh0uf9/EycNE2
+ cVpxPg+jLLMOJczPRiiPd0j1Q634eMgtr/X693CIvWe9OnQD+G/goXbXzs4Nt3/FZR8BxwaDJcGB
+ fLpW4ODAOl1Xbq+4cUBd+Mv/4/Om9bFrcF5cs1qvvgSvM5fpMIDN8tCxzDcaHjoCiFkyn+psur/f
+ sOaHneLfdHgxRs7zcNxZfwrjryOPX5u2pl+78VmXvgsyvgo9uM6UChwcWKf7Sq7XWxf+8u997s72
+ 1jWHpvU169X6S/Dl3GVKi4cQrjp8JO11aGnPGxGHlw+ztPeh5jOtTjHhfdj50AgX8zcrHib8Mg9K
+ 2W8a49DJw2c2Jmkf85Aib/LnGPxw9ik8/joyODjAeu1O4+eDA+t0WzwcUBf84keeslof4uF0DRbw
+ mTgJ8I1xHArIRYdHjD4c4piAntdm3JnxhjU75BaHF7PH98Q+hfHgAFMnnJ43d/HpGfvZEXUcUOtt
+ fHm3tb4Gp9Izp62tBzF5HU4+pVQLnkn90AIg5ufLvPnQIp/gfgDRHHf6vWExHR/abWxvcsgIB9hO
+ HGBbB19CxvLv5yFbdD/HeFbGdu7PffSJ07T9l7ZWqxegAI/YdJioMFLsPkzqsMkvxNrh1Q81486O
+ N6xNh5fyzy8rd3Y+tl5NP74znfPKm7/ikveelY23z5M+OLD20wL9Xx++7Nw7tl+wNW29EBvnadxM
+ vOrwydVq8/FKFbiDN6xT+l6Zqje/gectWINXr849/JM3ffGlfzjXHkgPVAVyCzxQ/g/8vnZ9zjmr
+ D/0JLMQ34bh5ztbW1jkqSjuU/EYUpeI8JvIw89dxB29YqkP7co/yyRxeszcs2vcLMip7Fwr+Szs7
+ 61fffM5DXz89a3WsQw6e798KHBxY92+9y9uRf//Bq7d2dl6IDfH1+HmoB0uhw+g4h0+uVjvMDt6w
+ ol44XOrwQTF3ffmHOZaPh9iuw03FX9wC1w89PP8BiH9ie3341bc++7J3LCwOxPuhArkF7gdXBy6m
+ 137wM86Zdl6EHfNN+N7Uk+JVaVaY+ZuT36S0+XKlDt6wZvWSsOkQupfesDa+qcEfjsd3Y0lefezI
+ zqtvfdblH98d1MHMfVGB3Ab3BfcBZ1bgtR958Dnru/43NP//ii9UHqI3AehYfB9GsQwHb1h8Bbr7
+ b0B5OOWYdd00ngp/8GBwfHrwbWe988nVtPXPp/U5//zTz34Qf+7r4LoPK3BwYN2HxZ1+9n2POnJs
+ 62+gyN+MQ+pCHk/jsIrjiodUuw7esHgmtC/v4hCqL+Narepx0yF0koeX1qP5K04+BG//slCrxvn6
+ dBO4abp1Z1r/yLHtwz94+1c/5KM0P7ju/QrMd8u9z39WMp772g9cubOz/bfwd9z+PPr6SB1C0eTj
+ 0OIR5i/7VCgeXrl52nzhc7XikBOvCYZ5s9Mm77JQ9tf9buQHYMxrmy5kEYvRccggPGw+dMwytvpM
+ jsMhD4nZWKztoR8meTjlCJjy2zRu8tNo67HzB490nG+XDzP+0C4OWczvrNdHkeGPT+vVD9z8Jx72
+ ngY9eLwXKsAaH1z3UgWO/NT78aMI67+Nln4uvkeF357SN7G3Zx0C+Rk6Dp8MQZufQjuUtPlypTgv
+ 2pgQrr25Le0Wfsr/DGd78na/iqnczH+SXrhZehmgrOa3xSGx642FbvFH7jkCrzjbaH9EbLgW/HnY
+ nZKfTh+8u3g4XxHjMeQ8tCjiz860Wv/89tbW99/2lQ97a6c9eD71Chyny06d9GyzPPxT7//y1Wr7
+ b+OM+vI8o9zS8Zk3Dods8jo0UCjhUs8RnV762aFSZ0k9EDc/ZFKMZW32fU1Oih+BzXG749IhAmLH
+ IYNys+nQYVSuy4aRuzzy3zUWa3sI/L3ip9HWY+fHJOPWxfl2+TAbb1iUkQj+GBc0/w9+LOL7bvnq
+ z/jVZnrweAoViM4+Bcuz3eQl661DV33guVvrHRxU09O8yWLPoTb4chB3NG0cGtnEdQjs0rug2vx8
+ bIeNtkCulDY11TGhcfdhspefmp/x296niXkH/4jLcTS/s/QyQONn99i19+jNR3kzgg3Xgv8e+en0
+ wetDqR2ynM/1Iz7k/oZlmog34Ov1zlumaev7bn725b+ABTz4LRIu0t26H6fL7hbPWQU+8tPv+Tz8
+ FpeX48u+q3OvqADaVD5r3KMHb1izNyAUqW91Nl/JWchN46buCtyMH/XfdbjA9oR+TsQfcQpGv+2y
+ vxO+YUVcjg8B/cb26tC33fbsh/23RnXweBIVODiwTqJIBcGPJxzZvvNl+Ez5Lfhhz63abQRsOKy0
+ HTmvy9um3nByG5U+UCnHWPiiwQP2zHgDWvAu7RZ+Bp8JLR+8YakOi8Nozzc14Vx3rVrIJ37DQp3x
+ wUMOF355xPrlq3Mu/Lv4e4uf9Oof3E9UAXftiVBnux5f/h258n3fggZ7GX4h3oN5JujNAA/svTgj
+ Nh5aauIBQCXbl2+SFocPCDcfKgs/sCUuAtEKDTGWNfwKJ4RvJ8Ufh2LmOYs78+n8s0IAnXn0Ee7F
+ t2lM+01ji70eA3ev+CnS9tD5Mc24dXG+Xaf4hsUCgQXrtLPzyenQ9F03P/vhr8CCHnyZ2Gq76TE6
+ e5PqYI4VOPJT770azfVyNBN+i6cPiTqEoudUqTgtYtBnUrV5bm42JwjqsAgZEzLPWx0uMV/4hIWD
+ Oa7xLu0WfgafCS3b3qfJmHdejmxpp7hVj4h8kUfmozE2f57u3uQ+BOgty1gj8PLXRvsjYsO14L9H
+ fjp98O6Kl/NZV+JDVl+kyHllFgMSrcMNeC1j8mDE7zb7b9s7h77t9uf8kd+Q6cFtYwXcnRtVZ/nk
+ j/3O5UcOb/9jvLd/I75XxXbjadWH2FSeVrWWejR1HW4G4N4OF0k+BId905MP1zgsJJbDDEtxCafw
+ hBey2YdlTDOu4Xcjv9LtuN1xDb+sS9QnHGlzwv9shE5+N41pv2kMztkQuBl/+tvEjzlWk3jF3ccZ
+ cQidn3aJ4Xy75D/XGfPib4fZcIP6EacJAXHLutFOpFCvf2xaHX7xrX/y4K/7qCKLm3fEYvKsFv/z
+ +vDWx977bfghqpegOJd4U2aTe5PXIcQmywrycBgwTFMREyqo5TocdulddR1CfGyHjdzs8hMTwu0+
+ TPbyU/Mzftv7NDHviGPE5Tia31l6GaDxs/vYtcqLm5Zo8W0aqUd8wsVYh8yMOIQFv3Z/2m/ix5z8
+ b/LT+YN3V7ycrwzowPI9ecMindyRblp/Gs9/79YLH/4vpoPfDtFXRFWfTZzNwuGffN+XTjvb/wab
+ Bf+qTG4a7rHYXhxjk6pFtSm0B122pR7lTZ4AYAhePAVr8HPCXavNKpEI+7c/ieVQcTVFuJ/zhX1Y
+ ajgpfuXJ+O1/Fjcd8YrRccjA87j3w0b+sAMrX+pp3kftVsxsGoHbdQXuXvGzixwTnZ9iYjjfLh9m
+ sc6YpzwyKxrXg/0gXgGNC7mm05Pd/Pb2tP7m25/zyIMvE1EtXtF5Fs7a+2vwd/5W2z+Ipvmz3sw+
+ VGK3oizonjgNduujaqV3cx+8YbVu0m4ch5E3edZpwwh8HXKoqzd52Dfaelzww0DrdUp+ihQPe/Fw
+ vo7bwPEwwgc3lNQYnVkMCp9656N2SR75CXeCDx54orODLxNRBF6s79l9/eT1Tz60fegX0ECPc7ex
+ 16P5tFksq2/UZZTdRd5UllXEpZ7NySbmvAG4W+4tX3rZN33YOZ6FHzDJTkTmD/fDX7O3f98HX9ox
+ zgU/Jua43XEBIELHIYNyo8MC+tkIrfxsGrVpwbdpLNb2ELgZf/rbxI85Rku84u5jo63Hzk+7VHC+
+ XfKf64x58WcjSU53qB9x4g0FcSHXdHoqN3yA3c76evxWiK+/5Wsffj0mztoL36o5e69DP/7ubzm0
+ c+gt6PPHae+hN7xJvTnZO9qMflDXzjax9FE/EoSMQc3JCdsTo+0S/KlP/uBA1ya+j+KjOa/0o2YP
+ WX7kfre9cGTwNYsfU5bpF4JgmYcdaj7ycBwRSMVBO2gMtJPgpaA8FnmJB7rZGPYzfLNb8qe8F955
+ wf/J+Mk4Mdal/LweGSd18idQ1scedBht1GNS5eEnhVjfkEPRB8SbvHJCRhGstlZPXk/bb7nglz72
+ zak5G0dX52zL/LXXX3ToztWPo/me610TZXBvqCmzubwXSzFvLjT1bK+qydnUgqn5ksclNs+uzUQD
+ XjJsmyTmCx8w4QRPR1aU386XOPLHNfjSjpvJ7gUJojlud1zzOLQL04XeJGQfh47fRLIuG8Ys5Kax
+ WNtD4MSLeGcjYMpn03gq/MEj77Rvl/OKwwjzlPOQIWy4Q/3wIT3LnjgBAsdpRa4H3HiZz36sB8/r
+ brnoyDdOz7r8FmPOnrs79uzJd5p+4t1P3dpe/wx+Uv1x7oXctNE0bH58cLMNPZpmtom7PopX+mwx
+ dWU/BQBsmz4+c+amzyWQXwrk08B4SpzFEQAMjXdpt/AzP4RItylfz5tf98D1edcn3DkuQ3ffx64V
+ bmw+iED3LS4ZeMXVRtWDPJuuBX+eEqfkp/MH7y4exZGRwyBk9U2K4ol4I0Hz+NBi3SirAvJjGrMi
+ /108MghW6d+/s9r6+tue84i3afIsuZ09XxLiL/9t/fi7//rWsZ034ueqHpe94marnlMT9c+Eu5sq
+ mwnNNnqoui16D5vQzWh7dtOQsyk1q0Ci23h4hNzHWfOGA/GXgnax+Zf29FunCsNs8TMqybbXLhHe
+ 846P99hkgedMOmRWmw6THj/1XXb+ES/NRScm4xKfI31EXnzU1fNMXI4AVJ54nvnbZBd48eaNuOBL
+ e6oyDzwJaRn54aPnMfSAQeF4og4hh6IP4rEf0eNGP61+8kMZ33Pd3n7j+b/40W9P5NkwssZn/vUT
+ H77s0M5tP41V/srxhoG01QsuAXqTTYAejebTBDAcCQx5tz7KV/pssWYniOXyn9tI/MGBgXrDPRY+
+ pnscA4fNAjuns7Bb+NmMs/28HhlHhjPnH3FYLzkedw2x+aPAGGJzA0gviruP1DOfNtofkRuuBf89
+ 8tPpg3dXvJyvyPEY8ji0kiTiDXg/tLjMlMUjP6ZxPaI+YV4VEp4S9a4PPUF+3W1bF//F6Tln/j9H
+ Fl2ZBT4Dxx+7/plb6+knsQGuyB4bm1arzVPCibtbvFnYFNo0VJdCvaNmg8XQR91CUXo2VfB0B+Uf
+ k2pZ8YsQE+FXouMqfIQJx6JTXCYIeQNf4xEo5O53Iz8AY975z2URidJxyCBdaDMKn/lwhFZ+N43c
+ jCrchrFY20Pg+6FW/jbxY07+N/lptPXY+YNPOs63qw4hrjPm6xBSplrOSIv6OGQMBBoP8lMDrIK/
+ 3PAhDm/y46N4IOeF37f1kdXhQ887079EPHO/JOSXgK+6/nu2ptWvoZGv8KbinuAhwhG3ehiy9WgK
+ bR7jBWyyzdhsYceuKX3QshnZXHIkQMmylz75qceF5k18H1uYww/tS0G7FOl38LK5tSk063mbDZwP
+ VQCoCN7hn3OOq9ulQ7HE5iYyr2Fv/10WD4CzMeokXNYhR5JWHcPDCfDkmfGnv012onc9gt3+wn/y
+ UJd5qE4loz74EK7izPoCBIXjifUNORR9EI/98M6LPGEniX6GTAQvfE/2iunYMX+JiN737Jl3PzMT
+ ++X3nbv1B3e+arW19Q3+DBZpeu2jedhEWFB3mVd2pndTeC+WYt5c3BRqvmgMEoYsWjVxby7z7NpM
+ 2eSyD7+gzM1ReJrzCrz0Lf5wX3YznC3DfL65NvIrj47bHdeMf1YIlCE3ex/h3XXZMKb9prHFXo+B
+ u1f8FGl76PyYZty6ON+u/fKG5Tqw7jzi1j9965ErXjR99erOFuoZ8Zhb4IxIRkn82DsuXK0P/RJ+
+ XdWz+mmiTYnVnG1O7R6XIM6K0MchU3p2AXAauIm7PkpX+tyM5A07QSyX//jMmZs+WOTf8IwrD41A
+ lB/rbd/zWtgt/JR/8uCy3PMZ8wboHjjjPQ/cLD3bGb24x+bP9fAmzzptGHOd2qg8ybPpWvDfIz+d
+ P3h3xas4mG/EE7K/XMNshZl6QFkuKPLLOS1j8nCUXgNYiVvyBEBeBw/Ecc15/vOt5x35mumrPuPW
+ ATj9n86sLwnxGxZWO4d+FV8GPovN0ZvccjZNNEPritE7rRnUI2y6aJaQbTaaTG0wbxY1He3k1wDx
+ uGnhHx+8hp7qgc/5whvuwBkH7R0IDbVJKq7Gaxw1vgbf8O9NBL1g5h3+aee4On86FIt3nx3EfdiT
+ b56XeMTKPOx2I77ZyV/3oAWL+iUuR+Aqz+TPcZNd4DGMS+vZ8g5NxunIwz/rg49ZfZQZ9TCEwvFE
+ HUIORR/EQ1cRJp9EkH7tZ9STWF7Si7fq+azzbz/6q9PPffxyI86MO2t8ZlyvfNcVq0MrHlaP1xKj
+ ebXqHHHpTQJdMN4wMGlg6BMezSd7GRoY8u43EpnTgfjKzO3reXsQT/nfpTcP9Ya3uDGR08NPx/W8
+ FnYLP+U/CHfnM+wdR4bDuriOngcOE7O4DN19j82f6+HNG+UHWrx9zHVqo/MmcsO14L9Hfjp98O6K
+ l/NZV+JDHodWkkS8ATePDxvWTYcMeeSnBtQjD7/OEzi6k373obWLx/H/znq99WW3P/eRH0m203k8
+ M96wrn3356AB3oA3hcdXc3mx3AVc5GqK1gzoAS1yDtFE+druXqpuU4/O+cvcD6P31FQ0cFOyRcxT
+ 9pJpFk1NCJJIuY8tzOGH9qWgXYrmK3vhCPTFeZsN3MEb1mIdWCoV1OuherX6RSU1uM7A4aPXFQSh
+ xwCF6x7rG3Io+iAeGo62IE/YcV5+hkwsL8UhXtGFrInHT9P2G8553Uc+x8jT+46MTvPr1dc/fbXe
+ +Y/4ntWltZhISW8q0XTMUG8S3Kw8FCTjVg/Uu0k2v3EAKEDwsGlCVpfagfjGNEsbdtS7nYf/kMUr
+ vW/iFdxLM4s350UbS0eHEY/TWdgt/Ay+juv5jHlFZHHELX/QRKIpLvOQbd5yHWKsQ5M0+KO4+5jr
+ 1Eb7I3LDteDXbkdgp+Sn0+8VL+crcjyGPA6tJIl4A94PLZUveeTHNK5HHn6dBxrhWS/qdx9azrvx
+ AK/1lt36xqPrra86+rwr3pysp+N4er9hvfpdz8Fh9atYvEv1GSYWUYuuJmiLLJmL2ZrBQK2b1lTw
+ 1gwhqzl32aMLB311ScDUVAQ4Lrpw15Z/yY6nGic2mdAKaBGvFfZL+4qfflKkn8HL5u6HSfmf8dte
+ fMGbcRvmPOzO/OlQknef/OZt2Nt/l8UD4Gxs8TBe4XMkaeiTP+UZruE5P+NPf5v8iD7ySgfEBV/y
+ UJV54ElIy4gXH8JVnKkHDArHk3kljxShz3o0O3sQQfrd67CSftA5zlk9pksPTzu/et7PfvRZoj1N
+ b6fvgXXtdd+wWk8/j8PqQtbebyZoGi5SytF0pYdi6DFrINXsTcnWm4f65BVgZs/uCLskCBkDetSE
+ thdADsq/9MlPPS7Em/g+io90vBgo/dC+FLRL0cCyF45AX+VfCdu/NgHNBDNR2YtuUVdSVRy0Y0D2
+ SxWvYU++eV4KG5jZ2OIpfLNb8qcsP4nLMfzP+NPfJj8tXjz6Un4t75rOPD06T+DwIX9Vh9TDEAri
+ XGfWI+ujh5CzHs1OPim3+snPkCMs13vQhTxwsR4XrraOvf78n/vgN6Td6TaengfWq971HVj+n0ST
+ HM6C12c6Ni0m/RmHzYFVLNnz1mOyHgImOO3BTruQ46Fkm6Ve9CYQPmjZlckjyJBlL73jCQbAwy8m
+ nI9H+wtUxqWuFxA32oXfni/VwpHBF3nNx3iS3/ZSBO/wT5Tj6nbpUCyxuYnMa9gzsHle4gFwNva4
+ E58jSUOf/CnLT+JyFDzyxPMJ/QS+uPlAf8GX9p523VjZISM/fAhXcaYeKChc96hDyKHog3jMK3rc
+ yNPqJz9DLpTijbBp1eInxv75sDoXP1L9k+f93IdPy19Tc9odWFuvetc/xv8J/Mf4jMH+qMsimoZN
+ hlnJsWgEWfZoPSbrgfOWbW8e6mXnB+Gt73xkx0UC4YM2tontBcCtxSd98lOPC/Emvo8tzOFHuwA2
+ EXi4320vHBl8zeLHlGX6hSBY5sGJoO9xCxgK5ktQqzNFXj3+ZV7KB5jZGLyyyzrkaELex3UCfOUJ
+ ixP6ASbjLQfkD/9pT93AZX04on74EC7r48pUXR1PrC+A5tGDApQ78nc7OpTc+gJyvalJ75v4Bp35
+ W/3sn37xBz8Jj+FHzvvZD31XozgtHk+rA2vr2uu+H2vwHaosHvqlzyhcTC4SFPkZhk3Hy7JH6zFZ
+ D5y3bHvzUC87PwhvfecjOy4SCB+0sU1sLwBuLT7pk596XGijxPexhTn80L4UtEuRiQzegzesqAvL
+ 2+uigu2uN2unq+llV9OuLyocMI7gwUfnH3qpta7Hf3PPOJM3HMpPi1N+hlyoZf+1+IkZfQuBxxV5
+ VtPLzvvZD34/9afLxchPj+va6/4P/FPL/9TF3h1yfQZT83BNkFosGtHSQ+6fadwLLsGAR/NpQoa4
+ oYlCLj+lJzuu0guNKNwUmjcA9+Z/l14gxWd4xsV4TF/zCsd68zdexiF82C38jPw7zvbdEXEmigGy
+ 48h5jFEW42Le8Pk916FtIqLFt2nMdWqj60uLDdeCP9e9NilMNvpr/KoLefoVvLt4hEtGGIQ8Dq0k
+ Cb5I1DyoI/Bql+ThCLoYUBfok1ZURRAS9eZJT1YseORn4CqP9KsV4D9Bvf72O//0Z/6zGdc+FViX
+ /X+96rrno6qv4T8P78XO1V2E7y7QptcSh+xNjTTVBdz8ufkwZ6BrMNN7E6uJyEOg+GowT0yLYKln
+ U+GjNn8EUP7LfeQh+/ALXdoVPmCKI/Ut/nBfdjOcAvRt8JlwyAGqPLJOzn/gWrwVRy8EN1/Wr43A
+ KtxNowtNwyhwGyOs2RC4e8XPjDiEzo8pxq2L8+3yIRDrjHnKSIBPQgWN68F+0ISAxoVc0+mp3JjP
+ fsiKeiaPPPgmPdwWjx5iQu5oZ/14oAz+Nf796Wn9wrv+zGNf0yj35SNz2N/Xte+6Bl9z/wr+IvNh
+ r8bmkL2ZYjGREeWOlx6LMzYdQOoF8w14O2RKj4fZJjaP7aN8pQ9ad4XtBKGf5n+X3jyKm4/k05CH
+ hsRZHAHA0HiXdgs/I//O3/MZ8+a336XdqIf1GW9I86FvHsQ3Nh/SAVJl7mOuUxvtj8gN14I/1/2U
+ /HT64N3Fw/mKHI8hb34zot5w80T+SJyyeOTHNK4H1iNpMTaCkKg3jybytuSBrHWTH/sz7zx+x7E+
+ Nq22vuqOP/WYX026/Tju7+9hXXvd52Oxf5GHlYoaza5CxiJkUXPxOdaicPO2xaJi6GFp4KArfTRD
+ yPOm6vxF7wfhs5XdFI6LLoYst5IdjwLgreWXdrN4iWE+9EN7ESVvipQHr3EE+hp8A+fmh14w8w7/
+ tGuHTNa94qCaAZmPaF7Dnnxjc5V/YBR+jmEvu8TnaELex3UC/N3yA9aMtxwov5Z3KAZu1E/1wXoo
+ n6pD6mGo8lB/vL7KejQ7+aTc6ic/Q46wHH8sQ8XR6lf1EJ0QZke8lEB7eL2z87ojr/3A05NzP46K
+ dT8GNr3ynY9Fo78Zv874IdoMKH6NGwL2G0A0BfSUOz4/0xhHPUC1eJbdo2gGNoXsRWRgyLbveqh5
+ lT5o1QZwIEcC4GY7t2AEUHpiCOe8HmJgPJ2GeS1xjTfsK89FHDU/w/V87H/EoTAUl+No8SkO60ee
+ IfchNn+uR20eYMgm3j5yEyE+4WIUP3k2XQv+e+Sn8wfvrngVR0YOg5BP5zcsZaM8pk+uDx2++s7n
+ Pfr9vRT75Xl/vmH96A2PQIF+BcE9hM3CZp2NrJ6LW3WUHu1fzRV2ibM9zbAZ0rwequdCn/6M17YS
+ X7dnXGGX8YQs2tiGjksA3Fp80ic/9bgyTzym3SxeYjIOnVohi1fhzOwEF44R+Rp83vyWGRf0gmVe
+ qaddi1tATFUcVEfihMbV41/m5fpEvMBLDl7ZZR1yJGf6Df6U98I7L5glf46b/Ije+Sa982t5hyLz
+ MjPDsodxWCWPx1ALt5/fsCqP9fSQ6ejR/zj9wu9yD+67Sy26r6L60Rsunrbuwj8UsfVkNV1uhj5u
+ CNhvDjyM0GTQ6w2B3UK7kJPPekzWg2GG0375xhE8GpZ60QdBDYpDDsI/NHLoOClZzviCxXFT6HEP
+ sRwov8LFG4lwLV/KCz/lf8a/zJduzCPz4HW5ch5jlIVhZLx6Xt5yHWLUOgAjvk0jcIqzjeKn/aYr
+ ePOQyfGU/HT+4N3FozhYh4gn5HFoJUnqIbNcwOWhxfJSDkUfwEoc9Z1HBpqw3odpIqyAQcBsP+pI
+ feWRfiP++TyBwl5357mXfOH0dQ+7Wdz75La/3rCu/eB509bRX8QPhvqwiiKzWbW4ObJ4YzVVylz8
+ WfHdFUOvxWzN4FUNPYbSpz8vcu+COX8Lg/HIXoOajk+Oiy7YfZbltjULtbpafmk3/AUm/dC+4idv
+ ivSTcdMrFQT6GnwD500EvWAmGv5p1+LOulccVMMw5wnHNeztv8sKG5jZGPbCZR1yNCHv4zoBnjwz
+ flhK3mQHXcZXDogL/8lD3cCN+pF5HDKeB1JUcgcCx+M6MhDz6EGByR35u50ZcA+70OehJ3XcxDfo
+ zN/qZ//0SzoAy0/uBymsJ+dq+uxz7/z06ybuyX107Z8D6yVr/JjVLf8OAV3jmrIJWFuvwmxkAVX0
+ UUm/EcRnlGaXONubb7ZmwZNrSJyajxMAJq94Qh5xtTAK71bINxvbK2DcWnzsWlxDT7X1fd7xbPCj
+ rhcBDTO84ktexaEuJav9Vfwl2575AqEEyl5htrhVKBkaRxPtNudDkdewJ988L/kHZjYGr+wSn6MJ
+ eR/XCfDkmfHDUvImO+gy3nJAXPhPHuoGzvlaRn746Pyqo/C4QeF4og4hh6IP4rEf3nnRT6uf/AxZ
+ EKIUL0fRhTxw9m+91ku8thtxE29G9+/0ZedcuP0zE/fmPrn2TSDTFe/8l1ur6TkqHoozPiNodSWz
+ mv7MJMCshJ63XkuMZvPqkdF8XKPBi0kDQ2/Z+vATeAHF1+3JT1nmfghZtG7fEW+Th958waB4M78+
+ Cr/0o10ASwXAeDMdA8teODL4cn7Dr2Xbu4dNVPaiW9SVVFkPPmuX2C9FXsOefFHPmHf+ES/mnJ/t
+ ZZf4HE3I+7ii8HvhOX/SfsCa8ZYD5dfyDsXAtXjh6Ux7w9KbHgqI/7723Cd94IerLg/wA9f0gb+u
+ fddfwcn5r9kC1WTRrP7M4ab3Jtkccn0GU/OAB/YdLz2bmLz0Mxwp/wGP5tNEA4ZcfkovcxNmmDTL
+ TIjTZYflf5c+UImPsfBFgwf5iQnhel6eL7uFn5qf8dt+FCbqx5DKTdatJiIOx+2CxvNyiM2f67Hn
+ YRLupEd8fRQ/eTZdC/575KfzB++ueBUH6xDxhDwOrSRJPWTAzZN5WQ5FH8Aah22lywcRiNh686Qn
+ K4ALmKJDXFpvxZf+m9+Iv/IL3EgrA8BPlq5X33r06x/3b2b+HgCBeT2w1yuv/yz8RsS3IJALajMh
+ olgij4vmJS6bchZ8zBfPEqdVxJqSD4ZUjwdOWLY+F9t4AcVnO9uzaYIHpt5UNYCOfuIwoD4clH/M
+ JI/Vzqv0CpB8LV7RMFDP9/gjPOEDMHB68m3wDZ6qByGVZ/p1YZZ2wgnuuFMmRT9sZMfNw3niN41Z
+ yE0j8LuuwN0rfnaRY6LzU0wM59tVhxAQykv6zLBoXA8giA8gWIALuabTU7nhg+3MSj/Bs4xj0IWf
+ mADOcdod3crvbH63H9ErH/71nem21aH10+/6M0+4XvMP0O2B/ZLw5R+7YDXt/Ds09O7Dqjapi86m
+ Z5E1sliU2+V561X6wCfO9jTLTQhjA8UiOsjWh5+QBRRf6j3SXnZkKH3QuivEJwdNllvJwWPALD/n
+ Y73wmW76oX0pGG+KBpa9cAT6GvkPnA9V6AUzUdkLtqgrqSoO2sGQcruGPfmintCXfzwr/BzDXnaJ
+ z5G8C/6U98LfLT+in8cv/vCfcTqMxHmUf2TiT07Ojzg8+c5B5WHfRR1CDkUfxCPrdCOeVj/IxSMP
+ vrkOLpPi1fo0O8ieZzx6kmHVSfUlPvjolzD5x797uJouwM/C/8z0Sx+7wIgH5v7AHlhHPvGvUBW8
+ YbGGKBZH/NGYMovLYvfRBrzXJT0siyfwXhzzk3joYWpH4iB86NNf2FnR9J0vQpA/zgdtZOK4iLGD
+ 8u9uUDzBAMPwS7QC8tjCLAds2hE/7VKkn2YvHBl8lf8Zv+3FF7zDP+0cl+MwfzqUxC4PvnAzi3+Z
+ l3jECrMcWzyFb/VY8qesOBOXIzgrz+TPcZOfwGMYF3HBl/FSmXVx5CmjPviY1UeZUQ8jlYd61zHl
+ UPRBPPbDOy8TpF/7CR4DjFK89ldxbKqH6EZGVacIVIO8Mn9S08C8+CHuJx+55Rbs2QfueuAOrGvf
+ +SLU44X+DIXasLioQ5ayZMxz1YXLkfXifLvMY5zXxHaJs33zMxyJRXRcI8WR/ixr0RRH6mNe+Aii
+ 9F5iNiefHBcxQ3aelJNfjyPPNu94iAtM+iGfE4WCflKc8yqO7MLgtdnA+TO2aIgQUcZtv4u6ApUO
+ xUL+CpDKaTrv0Gr6zsecO7356RdPz3vYYai7P3nZtd60Ey7XOUcreB9X49vUH1U3WLjeG+rT+DO+
+ ckD+0Kc9dQM38iHzmf6GpXIgz62trRce+an3vqjqdD8/uOr3s9OJ37daH3sL/o7gBfoUtKE5skk0
+ Qq/PBG0sux77kifk2kzMFoTVzCFv1scmDXwY9sE80kcQ8kd+waKJzWOEHZZ/hyMe6TfFD0Xhac6L
+ OA3pKOXwG/oZTha+Db60i08WFiuBOW6PQ6viGIX42ocdmf7BY8+drjhvfD78zZuOTX/zvbdP1926
+ wyNxfohQxuGgeDeNDnt+D9yyL+qNAeiT9jNnttT5MYPoxnw+Y7Q/1o+HVuRRnilnWtS7jwMINCwE
+ qEE8oh8OhRt5NR4BfXMdGs9wbDrIjk90BuKxeImP+Fy3CEDzc15obju6Pvzk6fmf+SEY3a8XY7t/
+ r5e/9ch05Jy34/Xys1zk2CyIwiWLEZsu9W3VvfobIvbmisWEnnLZhZx88uNVKb4Bj+bThAxxg0XI
+ 5af0UPMqfeYRDjivy3IdAm6f8h8gx00h7ApfNHhQODEhHPLG6Lw8X/LCT83P+G0fBI429DJXOHP+
+ nm8YTE84f2v6oSvPm65+UP3maqnyhm/cTq/5+F3TSz9wx/Spo9ziSBN/do3cXMynjfZH5IaLm4rx
+ LkbZA76LH3Py2/jT34w9+HbxcL4ipwPL49BKFs7jCrh5Mi+alUJ0cic4+zhoaT8IQqLePJrIWxAU
+ Dx56XpUHAS3++TxUGbZwJPdEjx/Pbz566OgXT//zk+8i4v66xqfA+8vjkSP89cY6rNhkLAKvKhqe
+ q5livnAN7yahpS/zRDNgSnI2ccnNT61Z+gdIPd+aIeTeTRUn45be/hVPyBiwxHbguIgZ8tBHnEFR
+ eRK9rIvDpMJ+ySei5E3RwLIXjkBfs/gxZZl1gyCYectedIu6kqrimKZLjmxNP/D4c6c3fP5Fex5W
+ NME3bqdvevg509uuvnj61ivOnY5ATrfDH/1HX+SY/jjm1eqzCV95As+0juuH+uBLeufX8g7FwKkw
+ YQccPuSneFIPQ+aJ+TpkQg5FH8RDV0UTkadf+4n6REzGs262qzha/ew/eLkvxEu5x01ek6p/CSsc
+ 6Wd+n37k2JEfMPr+uyuk+83dK9/5dXizep2KFMXUZwAEkCWsselVRRaZ1VSxd0dsnmgK8i3w0nNx
+ yCs9bvVgWtNTb9zQh18NS33EIn/BQ/7MqOJ1ZuV/l948ipuPYVd4mvMqPzEhXM/L82W38FPzM/5l
+ vnST/Ok265bzq4mf7f7CI8+d/ja+V/Vgnj5383r/bdvTd+DLxP9y43ZG6THXqY3Omwu24fLCjf4I
+ uTYjTBidlruPjV91oV2/9uIRLhlJaOZxaCVJ8IVjx4M6Aq9lTB6OoIvB/Ze0oiqCkGCPD/LMriUP
+ 5J5X1UN2I/75PBiDdvB7wrjmFzzb0/pPbT//yp+fxXEfCoz6/rmuffdjpvVdv4VCX1rN51WLTRjF
+ RTRZyhqBU7Ha6NVdhB98s02pRQxcEA49nKkXNulzsRleGQovN4xT8Th8FTEUpWdT4cP2RJin/GPG
+ 7tM/xmwyokkUYw8Tiprv8Zff0M9wsvCt/DeeXfyYmOPa+sDu6Zccmv7ZVRdMV114qDGf2uOvfPLo
+ 9F2/c/v0/tvhNNerj5toQ7/sC2+qqCvslFcfO68KRsSGq/NDXSjOt6s2MRBcFcq5zoQNd9THZjfQ
+ OAECR3x6KjfmG3k1HjqIS3rwBp3jaPmVfYVnB2N+tx9RK58FLxRph1+vfOOx1Tl//P76fpY7P7O+
+ r0Z+3+qcc9+A3xz6+doEKMJshF+tYR9RbC9CrkIbN8TpzRWLSZ5cLI4hJ5+WphymPhaFftl8spch
+ brAIufyUnuy4Si+0mrPsDMA98iY8M6Zdu+SXcszbX4nNT9gJ13iXdgs/gy/z3pQv3SS/g0u7R553
+ aPrex583Pffyc1rU9/wR39KafuSjd04/8KE7p5uP4Rvz8F/rpV2IeGLz7PLW9Yw75NxU3opajayG
+ R+B2+enke/FwvpjwGPLZ+IbFcu3srN987H1P/ILpJasdyvfldf98D4vft8JhxUTYRNyMszHmuUWq
+ FYCTvAfeTUJGX+KDRTVp2CXO/uzfvLCrh+q5sM/4Il4CxdftmUfYMYTSZys7E8clgBxWfMrUfNTq
+ yrpASLvCk45X+tEhGrLyznQMLHvhmKivwTdw/swPvWCYV16pp91qOhdzL/7M86e3PuPie/2wogd+
+ RfnX8H2tt+L7W9/4iHMjz1gHAph3v0JWnlm3HAWP/sEz06K1xk12gccwLtW59VNosq5mZFhmHodV
+ xukx1ML1N6y0q/UE3HE2O/mkPOpgP0POgF0Hl6ny3FQP0WVFHH/h5ceM9KOAVLnkHX6zDopna/X0
+ w0947/dkLPflyFjv2+tH3/W0abXzptVq6xAXZ/kZLT9zZwlrjGJT70Vt44aIzcMmRVGhn9mFzCKX
+ v+FIbMMN7Y0LIui5ePRv3rle5k0vmOIoO3sQT/lXNwRvUHBQ3H7QbOEZL68WR8kRb+UtWNRh4Wfw
+ mdDyMt8exzR9Hd6mXvqEC2Y/piDf9+Htulu2p2+/4bbpv92Cfx6Buz4XaJPPrk8cRq038KrLpjH7
+ oY0z+uDdxcP5rCsNQlZfpMh5eY6B7UM/+NAYsnjkxzRclc08MiBp6M2jibwteegv6iA7+XccPf7K
+ j/YCxpBy5NHjN854zuNp+9g0ffb05668wdb3zf1+eMNa/2sskg4rpsDk2HyzMea9WFFK4CTvgQfB
+ rCLiy2ZofhJnf/ZvXoDqgfOWicumoj55BQjZZswj7BiJ4qxBTUWA7QWQA/MD15qAWl1ZFwhpV/hM
+ N/3Q3oEATT8pGlj2whHoa/ANnPOFXjATEcfvT73+qRdPr/qci+7Xw4qRfvZFh6ZfedrF07990vnT
+ I85FmzLBfoWsPLNuOQJXeeJZZcpxk13gMYyLuOBLeyrlT6hRP3oYh0zGmXqAQeB4su+TR4rQaxCP
+ /cgJbuQZfWQ/Qy6U4iVv8LT4ial6iG5kNObTjxnVn4TJf/IOv1kH49hB06HD6+lf2Pq+uyuk+4z+
+ R/HT7KvpWvHjgdXME382AsBAXLIYs1nCjs3j1dgccr0pcHHJt8Dv8lcOzTfgtHecQeTIBDDvXA81
+ r9JnHuGA87osO07mu9QHKvExFr5o8IAElR9NhIu6SjSw7BZ+an7GP8/3ksOr6Xsef8H0Fx513oQf
+ WH/Ar9u319MPfeSu6Yc+fMd0J3+Ya3nF5qz+aJuV4c/6qssb+nFGvRcP57OuNAhZfZEi5+U5Bi4b
+ /eFDY8jikR/TOF7igpY0mYH8UBo8UudtyUN/WOc6XCiTTTz2RFPHlfOc4GyTY8K4wQfDBY5/SXr1
+ F7Zf8MRXSXEf3Bj1fXPxVx1Pd34ABXvoLgfcLEx2MdZmgkFviSx6H8u+kwdf8XR+4mKNhj4dRRlm
+ +lxshlkKBSZa0uEh0yC986kB7qDHh+0FwC14CZdkHmpt3/Tya73qEWEKJzgmmqLHVXyJ04RvI38T
+ pnwILzJ/8VHnT9/9uAumy07hxxSai/vk8aN37Ezf8/47ptf9AX5WMQqvTaR1yPXCCO+z/ulyLlgf
+ N0Xb+cNeMM63qzYx1xXzlMfKVpiaz8MqgMbJT+Bor8j1gBsv89mP9cVjgFHg0foDXnHkBFmo14hb
+ PfT53X6CuIbiFU/WOfJV3vpHWT+xfdH02OnrrrpPfrXyffcl4erOl2IjjMOKxcOlzaviQs4x5iN1
+ LxGb8Dj43LTk5CVeWOTmSz+Js2yceWFUD5y3bHvzUC87PwhvfcxLT++41Bw1oCdMaHsBcGvxSZ/8
+ 1ONa1INTwx8lXOmH9hU/eVOk38GrOKKZct5mA8dN8oxLD09veMal0z+56sJ9eVgx9kfhr/q88skX
+ TK9/ykXTlfHjFKpv1i3HyF954nk2aqGjPgs8fdSlgvb1sma+nlln4PDR6wqNDOQOCq+j+Qg0jx4U
+ YK1ftzMD7mGHJ/sZsiCcD4Li0cPA2T9xpFOkMh3zUlhPPsZBWIunf/J1/IkTUHz4OcuHHr5leqmE
+ ++Dmqt7bxPxG+7Ttb7Rv4o5iqjosXitulnI2Qq/PEG1U0WnXryVPyG0VojmyuWAsR8HDQeFQ78V2
+ eKUIfZoxLoevMOQv0sHE7uYyTzVJuU//GEFY+shvyPJSDtU0Lf5wL/uKh3EET1jP+B+BQ+D7rrxo
+ eu5n4P/MnUYXvzL88d+/c3rp+2+fPoXv9s76A3moLJvGXLA+bso79PVmkhjOt8v66BfMU+bKOwLK
+ Xq7CaUJA40KuaUVe5vHAPkNfSOJoGWJd0rt9jBNhTABV9hWe8xjzVpRccSQusiJv58tKt3mot4+t
+ VvfJN+DvozcsfKOd/1ewX23zcRW92WIErjYlnlU64HPchIdBZzcfLIqH+ly04Cfh0Jcj8Yiu9Bmf
+ 8YpIfN2e/JQjjNJH/NFeytMe5LD8S5/8wZF1gZh2hV/6ob0LFLwpGlj2whHoK/nOx9d/L378hdNv
+ ffGDT7vDipnwr/m8ED/+8LYveND0rY/CX/PhxKJ+Kg+wszEWTPVZ4F2huGs9Wz/VtOuLFdKM6wwc
+ PrwcSz1gULjux+urjDN5w6H8hB2mdn8SNM750E/wtPiJsH/rtS/E2+fpl36Cj3omVDiKLY4AMh7z
+ hZ3nDx3eWd8n34CP8OzsXrnnN9rbYbGLN4rp6mo1lXQVFQZa/ByB92eQMapIUbTiD97i6X4IYrbV
+ PLl4nI8yzPReHKdRirCP+GA3S1P+TOf42cRjkTOAis/hqJnwGIbhV6LjKnyEmfFy3oVKXIoBjLyE
+ kwPfnvvw86eXXXnhdMX5888pDXLaPfKv+fwN/jWfTx3NZd485oL1cVO2oR9vHAHifLusj3XGPOVc
+ Z8KGG6wrPqTn8iROgMBxWguqB9x4mW/E0XgMMAo8agfATc8HPMkPB9qFWA99frefIK6heMUTfBHf
+ bj/IZDXhG/BPepUI7qUbY7j3rpe/56HT4TvejQ0yvneV7FE8bT4V14snGRgVo4/Aq8hZ9D4mZxvN
+ E4tJngV+5ld63LRGLsGAR/NpogFDLj+lB4ZX6YM2M+K8Lmdoe+YbGZc+UCnHWPiiwYN6MSaEY7My
+ 7sg7Rrfg8HMVflTghz/7kukZl927P6XuyPfH/T/hr/m8+L23TR+6AxsmNynrgT+uRxuXfQh5dlFm
+ XZc8wiUjic2sT04piij4rA6e7GualUIBUjRrO1yKBxrhGX/0eciC8BYExYMH9UXgKg/JI/75PHnM
+ 6PhErAnjHH9M1Lwe4pZ8+Gs7n9i+c/XY6X+5974Bf+9+SXjkzu9FzD6sokiVSCtaNkGNAGWSsYRD
+ pl00TY0kXfC7uK25wi5x0mvNWzNozbw6oit9LErIvZsqTvG3MJos2tgejksB49bii64YeqpHM+T8
+ 8EcOXOmH9hU/7VLMfGIEjr9N4Z981sXTG5/50DP6sGJ5vvIhR6Y3Xf2g6e8/7rzpYvxMBqvg9dhQ
+ nw31JocuFbSvV067rliIgNnDOKyWesCij/obltdXitBnnMlrf/bT+gJ+iychGMU36EJudshHdVC4
+ epL16K/II8JnPjKoPCnO+UhgHPl8dT7MPnTrnJ179Rvww1N6PNXxJL/Rnif+bITPLGGN+ZkNo3dj
+ GzfE6DeMWEzydbuQVUzySsatHohPN9SDRxMNGHL5KT0wvEoftJkR53VxNK/c7tIHKvEx2p/pTcNA
+ Iz9OCNd4mx0f/9KjL5z+zhP5f/7u3c9NjnZ/3//grp3pe/G7t17zsTsVqOue64ORmxhF6uMsow2H
+ llaR87l+NAh5HFrJQhyugHszpz/7F4/8mMZdgrhoFuaNIOioN48m8rbkgZz5EWL/Hnv883kCTcj5
+ eNLQ44+JmjfO984Hhu3tra177Rvw92IX7+gb7ZVkJRuphCx9NAk3W+IrScCzKXIsXMO31ZQD80Qz
+ YCb9JM5yX7RyFPaWHUfEhQCSVzwhj7iol7kfpM9WoGLkB0DJso+uMH9wtPxy3vFs8EN7BxK8KTqg
+ Z1x2RG9U/+eTLz4rDytW9PJztqYfxm+U+LXPv2S6Gj+2cR6+Md/rWn0FbM7TThcXNtbD65XTrq/X
+ M+3Aiw8vx1IPOyi8jsfrq1i/6gv7s58Wt/wMuVCKl35aHyz6abTLyGj0F+Mmrxn95sRnT5h3+M16
+ GUc+X3M+zK2nQ4e2t++1b8APT+nxVMZXvPO5q63p512tPShRPOrzxJ+N8EkrlyzGbJawY/Mcj198
+ uZjkW+B3+SuHfCA+6bEo5NFEKBhZyOWn9DJv+swjHBCny7LtAc+MSx+olGMsfNHgQeHEhHBRV1D8
+ 0QsOTy97Ev6CMr6xfnDNK/CxO3emv48fg3gtfuupVmNDP84stEu9SYWHMu3iyXDioBmHVrJwHpfV
+ aF8easEHIm96PYhO7gQnjnoa8yqCkAaPJvIWBMWDB/VPEM0PE3kwH3F4cjx8MGHJMWF7xx+GMVSg
+ Jc/5WJlDz9v+xitfZ+ZTv9/zN6w1/l/A1vrvKjlsnkoyilShtaJx8y/xLkYtjfSV9AY8AEXNB/uN
+ ZkiZm3nm1zjzAlQPARO8NUPIvZsqTvJKT++4mizaaDLHJQBuLb7WBNTqyjwhpN3wF5j0Q3s5Yh1W
+ 0/l4e/h7V148ve2ahx0cVlGq5cC/k/jyz7oQP3h68XTlBWj9DfUuG9W5r5c1uS5YIU1YBg4fXg7P
+ Dz1g6pMT9VUsZ/FmJF7f9Gs/jisRHKWXn+Bp8ad+tIueZE47z6cfTSsfKVo8edgmn0bquc/imvMx
+ LiqgX2+/ODH3ZByeTpXl2nc8G78F5/UKOoq0kSoOjzzxZyMMVLQ+RjMR58OgjRsciA/FU1HJ0+1C
+ VjHJKxm3enDNHX40n+wbMOTyU3pgeJU+aDMjzutyhrZnvpb7YhOmuP1gK9i3MJsf8xL/9fjrNC99
+ 0iXTw/G7qg6uk6sA/nri9GO/d8f0vR/E75fH97q0Lt5dgyD6uTYhNFo14WL9iA55HFpJwZXDpQUk
+ bBxaapfk4Qi6GADPQ8TmjSDoBk8irFjw0B/7R37Sv8fIJMzSX4sXmrSz/7QffAp4hhOd7EadBMBt
+ NeHne581feOTfs2oU7vf8zesnenF3HRKLkfGEkWqsFrRNuG9mC5NJpvjJvyS38V1HN0ucY4vix7h
+ GagQFR5kx5H5WO7dVHHSQPjIsMmijS51XMSwGVp8kpM/OFr90m74C0z6gf0fu/TI9Otf/NDpFX/8
+ soPDKspzsgP/Ujf/cvfbnvGg6ZuvOA+/7jk2axKozn29rMh18Xrm+gGHD6077XR5lKg+aYdMyNVA
+ kiWJh+ZFo7iiHzkvP0OWK+Hhr/O0+ImZ9RH6jEzzecrk1bT8KKHCUTv8Zh0YD/dnXsNP8lMDO3wc
+ Wq9fkrhTHYenU2H4t++6Bv989X9Wlgw6irSRKvR54s9GGGQJawReyXfe4/CLD0U5G96wHoZvJr/s
+ yZdOf+6KC1S3jfU+mLxbFXjvrfz9W7dOv3Ej3gPyapu++hI6b9ac0QRu3pSajU2fhwL3NBfKmzn7
+ uvHID+XcB3n4wU5XEYTUDr9AWGGC4sFD7jPq54fJiH8+T6DYIk9ZaqLHHxM1r4e47eZz/JzfXq3u
+ 0VvWPXvD2tp5MYM4mTcgrQYS2gtfSQLjRc9FOzl+8bJpuEjNz9xvX7RyhAfOW7a9ebLJotti6Pxh
+ lwRqyog/utRxCSAHFV90xdBTHX7xmPOFR3z86yd/4wkXT9d9xcOnFxwcVizqvXY9EX+Z+j889ZLp
+ 1fm7v1pfj6091gVP8u11wrrho/rOmtBjUF9Qf7y+6n3T+kp+Wl/Iz5DlBDfF0fuvxU9M7yO/EY34
+ R9zkNaPfnGSpCdFl/MFnLQz4UhHX8JP8VICXeQB26B5+L2t4So8nO77yHU9BDG/FCe4s+5vQJo7Q
+ 54k/G5WSW0DFoyxaLHLnjUXYTO+inKlvWF/7iPOnf4i3qsdeuPnf/NtUk4O5U6sAf+fWD+N3b/3g
+ B2+f8APzY7ODTv2pXZ2dismQx6GVfr1puVdp6M3M7cK+thyKPmhz20/nkYEm7Mc8ibDCh4K2CSbs
+ Z+Dmh4k8hNnisM2wlRchnujxh2HZ6yFucz+YzPpgxP+iW++st54+fdNVb+02J/t86m9Y+NVESBm/
+ qp1FiqLkSO+VbIQS8l74ShJwL1YrYvLmuIFfvLAsnogr47DfWMQMz44UoMKDbPvMx3Lvpjl/S1P+
+ LIs2ulR+7QH3Fl9rAql5a/ml3RPwmf8/PvNh008//aEHh1UV6r59OBdvsn+Tv7/+Cy+dnodfD+31
+ tM9cF6y0JixjXfHhdordXnrATqqvBBMPiWO78EkE6Xevw0p6+Qme6P+yg+z4SKcnPNBPzqcfTTsO
+ wloe9YYYdtbCTnyUlnyUOev6MAAcGPj1w9t/h7Oncimku23It6tp9VY6V7BRHEXXgp/xcp7Fwagi
+ 9RFABuKSxdj0xZt+ZsQWxAuGM+UN65JzDk0veRK+IfzYi/fFb/3cUPKzZupNNx6dvv3dt07vxve5
+ 1KfahdmxKEPI49DK0mi3VmP7cMj+p1l0vPraNGbNQ6TzQCM86aLPQ06U90njgT73GzHLw4lMu+cJ
+ 1HTEJ4QmevwxUfN6iNvcDyYVp/NmAXiYnWQAAEAASURBVKHnW9ZV0wuf9N5udzLPp/aGtbP+Tnhl
+ bZ1UHC48vBisrhwtRdB74ytJ4LVoLDaexZe8OZJzwW+/0QzNLnHmMZ95AaqHoIPsOGbFNZD+Sm+c
+ Zah5lT4PX9ah1UNd0OJrzSJ73pAf/2/VX37cxdO7v/IR01/BuB9+RXHFd5Y+PAP/N5a/3PCf4pcb
+ 4gfm43KfV99h3dxO0f+1voBX3xyvr3rfRD/K07yP9jqsRn8Hj/px9J/7OnjRZ3gyO3AjbuI1DS0e
+ qCgcxTmftcQJKMPhJ/k5Dbvksz/8nPn2d8ngbt6Gp5M1vPa3nzBt3/Xbq62tw96koIji1LiJi0kx
+ WIxKqo/AMxCmWGPTF2/62cB/JrxhPfOh507/4ikPna68+MiGDA+m9kMFbsQ/oPh9H7htesVH7pi2
+ 2Y+8NHpTqn9j2h1NPf6o/Xk4ZP/TrBShz/7PQ4TkvBpO0uCROm/aH+QNHjzkfiNkfpjkTlvOE2hC
+ xydLTdje8cdEzeshbnM/mIw8xRcFwgF2bGd96Ml39y3r7r9hbR/9bhThcCbjICKJOGQii55DBO3i
+ 8ESe2SmnWCQ8O6dcNFZ/N95FGC4cj3FpT7vE2Z/9R83SkUiipoormyqbTMDoAvIM/qL3Q7iTPprM
+ cdGFm67soysoPxp/nea1X3D59Ctf+vCDw2os6b58uhS/6/4f4XeJvRG/OPCL8Lrl9UXf4aP6QpF7
+ 1598X0U7Vl9k+qNvOGM/sR8Swvnqz2zrtm9C7/ggcF+Unx43eaGSlvZ+0l3i8Ou8HY/5iHIcvQ7m
+ c32iQAGcDm9NO3/dwsnfFdJJw1/+2w+fDt31uwjwME9uZbdp3EQYuDzxZyPwSrKPwLMoG/1s4Bef
+ mobF4Zq0+EJOPusxWQ9eQ605/ZJH9jI0MOTyU3qoeZU+aDMjzutyhrZnvquJ/8PvxU+6bPq2J1wy
+ 8Ru9B9fpV4Ff+oM7p+96z63TR27Hv5/IvkEKuendCZzAHyjUf3hwH1oORR/24BGBCrTXoSXHAXMc
+ 7mP642X/za8CW84TKLjw8aShxx8TNW+c73M/mJN/590LtLPeuX197Mjj8fuyPtbtj/d8996wDh39
+ W9iY+EegMukYsSlVlBzpcaya/beicXMv8ZUk0Mmf4yb8kl982Qxyj6rzsJj5dbzmLUeKL2qquO7r
+ NywW/QWPuWi67tlXTN9x5YMODiutwOl5+5rLz53e8kWXTd/9+POnC/j7t6rv85BAXmpDHmbZ97Fv
+ rNh1WLESRROnXfLudVhJH+3u/nb/lx0IR9/rSQWnvvCKT9M6NKWoQ41hjvyKl3rus7jmfJkH7IQD
+ KBIjbmvaOn9ra/s70/ZkxuHpROiXv/XIdPjIR2FwOZdi9gbDIPJwaMHPKEOvNwwWCbKS40g+/JmN
+ Ta8kw74Xp/P7zSWagnwL/C5/5dAlGHAX1/YicmQCmFdNE7IXNXBZBoqZEXG67PApl+H7VE996MTx
+ 4DqzKvAx/DNkf/e9t07//vfviMTY0biisb2Zs++5d0uhDaBtJHgeIrLuBJrY69DyPvGZoG5r+4yG
+ 9t/8KrDlfLgLPO2A8J18+HDcnB7zAsRt7idxYefAhBTPev3fd+6884rpLz/taOfY6/nk37AOHfqT
+ 8OXDSjG0YLEp5TxHeotkynFPLnE5Bp+3tNZOfFl0HlIn4pc+itntMg7b98WBUwMVosKDTFwtSsi9
+ m6w3jvaVJh9CxoAlZn3G4n4G/nWaa59++fSGr3jkwWGlip95N/4LRD/6uRdP/+/Vl+JfzfbWOvm+
+ inaswyHrM++jvQ6r0d/Bo34c/Tf6lm3pDqWHMZ9+7Ff9S1iLp/ZF2FnLvheQ4oKPMmcRB3nkVhOB
+ 48Tqj0znXPAniTqZ6+QPrGn1DXQlnxwjyHyTmY303JKgmPIMh2w6z5JfMnkSl2PjE7foHRn5ut3c
+ r+O2Hkb1EOFBtn3EFbKAiiP1HrUGdMur9EEblToP/zrN38L3qa7/E4+env9HL9KsDQ7uZ2oFnoYf
+ g3jjFz1k+iH8WuoH4+99uk+8ad3/0T/ZQOqz3jduJ9dn9DVlHRqwy32TNRy8wdP3De1qX0DQKeLG
+ HfPpx4z0w/DoUXeJw2/6N07AwLX9R2uZwy75PBHxkA//AtK0/Q0yPombozkR8F9ef9F0ztHfX22t
+ LmJoNJqNLEYcJjmqKBHcLnrOs2iLsYp3qvzBVzydn0FE4EOfjqIMM70Xx2GWQomLlnR4yDRI73xq
+ mJ73qIum7/+8B0+PufDgxxRUn7PwdhN+DOIfvv/W6Uc+fCv/GXftE/dh7CA1UGwH1Mdv5nqIahlX
+ b0I8rPBBuV/ed40nGzNwZR9u1bh0A726W7jYx5zXDhdAbkQXeE6UnXBg2OXH8ZXdLr6RBzA3r3cO
+ P/Jk/rGKk3vDOm/7z+w6rLhrcXHTMtjZaIX0dTsBvg4RGKimPAzwPONNPyQNPj7yEg4WxRNxJc48
+ xpkXRvUQdJBtn/kMXvGUPuYly70JQr7qkiPTr1zziOmnv/CPHBxWUZ6zdXgQfwziqoum38Qb1xde
+ dlj9xb7Lfh19Fe2o48Pt5JrpOAk85nlYRZ/3mo7+zrZu+xJA93Xw8hQpP22fiTe90p7P9G+77tfx
+ U2s/AgnX+WxHouO9YWGrXjxtbf/p5DjeeHIH1rR+0ThRnQJlXhp5uFDO0Qrp63YC/CZ+lmrGexx+
+ 4VCW4ol4MKEQzGM+82K6Hjhv2faZT/jPRdHasPgZV9jRAwgefO6h6Yfwg59v+6pHT19y+cGvKGZZ
+ Di5X4IkXHZ5ef/WDp9f88UumKy44pD5VA6pP3UfqK58S2bYwdmO6vyl586ec9R39nW0934/ua/vR
+ IVl+Wj9r/5hRb1gMqHDkjX3B2dxX1GNf5jX8MO7wRzvhakL25hMMf8dw56S+LByebLf7/mPvuHw6
+ On0cwJVL5xR8okewKjoQOe5mcVLQ66RfjsAzkCW/kmcxkjfHDfz+DBKLSb5uF3LyyU855EOF5/gQ
+ ie1DoWIzDvPWZ5oMOAg+9DV/dHr4+fV3Nzh7Vl//7HfvnF7z8aPT33z0OdPXf8aZ+28h3t1F/oWP
+ 3zF909tvik2f/R1tDjIfSpbNHY3G/i/9ODyMoYINWoP4c7/JDnp2uw8bPcl0eciwz3kZp6eS+6El
+ fzOcYPbb5xX3eAnodo5Hvo6uj01XTN/8Of/dLJvvJ37DunPnz4FUv5UhU7STKJ6KlMG0IirI5jRk
+ FSHflHKM5Jb85SdxOZJ2wZ+LMCs+8ImzXy+CecFRDwETPA495WV874I5f9Hr4ZIjB7+imEvznz55
+ bHrKb948vfSDd04fwL/I/Fffc8f0rLfePL395m2qz/prHCLRn2PQYcUCjfbmPhv7yofZkLOYo7+z
+ rXl4DdzoW9Kp8WU65tOPGeuNKE4vb4c5H5HGkc/XnC/zgB155JZ+OJ/7zHZQ4cemVs+3tPf9xAfW
+ 1upFSoXJgydT1RsI5TgUZiP9qSh8iCvkGY5Bt/klf/rl6s3sSLngN49x3S5xtne81oOjHoIOMnFq
+ CvKHHA8l2yz18/xCOiuHD+GnvZ/3jlun51932/Rh/iIpXLlu77x1PX3F226ZvvWG26eP41+vOduv
+ 7Ff1p/os2lE7LPpRRfKOMx7z+Kj+bEV0nW03+nPD/hKdEGZXv8c6gTm3Ff2w/+lRd4lzPmupEDBw
+ cU4EkQfYJV/NZx4yU+B4K3pRSHsOxz+wrr3+jyHoz2PIdXLiOWWycp5ZzkYreB9XJDXDpR1Qm/jL
+ T+JyJGsrkkUvQvFEXImz3+bH8OIRnWrv4na8MhZft2feZd4eGM3Zdd2Cf9HhJfgHS5/xllumX7/R
+ b1GqH8rgOro/WPKfwT+x9bQ33zL9U/yCvLP53HJ9ooHGoMOI3TPae/S15nmo4CPryzlerrPtQDer
+ e+o9Dwn7CAhOC1d48WracVBROLb78Jv+7603LATCsD5vuvadVzmCzffjH1jbR7+BJ7dKlmOkUCc+
+ k5ezNtIX5/sVsj8TMLg5vvzAZuav49IPeRf8jse8ac+4Emd/FDOfckQ2wwSnPuMzXhEpjm7P+MOu
+ CER11txY55/GAfRUfPn3zz9y14T/g1+X1yPq19aN87fhgPtefLn49DffPP2H/3FSP+BcvGfKg+sT
+ DTQGHQrMcbQ391H0I+d5aDSZWF7i6zzq12YHmcsjXu4L8PCinefTj6YdBxWFYxRzPmu5DwSU4Zwv
+ /NGOPHbUcOSTmED8TNbquN983/vA8i/n+7M8SZVKjuBPma58siNpBs3kM/gcI55MaoZreM6Lt/GX
+ n8TlaMfJrNF+7b/bzf06XuvLUdhbdhyZT+SnYjO/bp+yzKmIh7NjePvNx6Yvw/el/iq+xPsEz5xF
+ /l6PqF9bt77OH8VfZfnG62+bvua3bpn4j0CcTVf2q+qmvop9pV3dy8m+GvtKh0aTs2bi6zxcj011
+ F512gExrPbR+9GNG+mG/I5LAUWxxBNA4AQM3zgtZyxx2yZd2GM0ns0p4Z71+AYIYhKHOYe8D6xXX
+ fSkMH9tPTPrOVHWiU0ZRmOVsJDvn+xXyDJd2gjOpOb/kk+R3PI6j22Uc9ut4rYezeohwIRPH4na8
+ gIoj9R5pH2m1h570mff8P/Bv+H3bDbdNX/62W6d33Ix/z48psgmrEM7Z65F1inpC5fpipBll/HnD
+ TdvTM996y/Sd77tj+sP+mgbdmXq5PlG3MaAe3jejnK5U1RP66s9WHOk7j/p1Q91Fl5Vv6yGHxJu0
+ 3ohaPN1vj0frH7FwXuxB5AG85LFCSOOGv3S8tVo9fnrVO69uqc0e9z6wVseeS5J+Art00WxsUlw+
+ 2Y1LfCg01O0E+PIDA9eUSZ48v+KARfHkJpr5NZ95y5FCFAwK22c+4V/FRr6lz7g4yrw9hHyGDTxH
+ /tXv3jE95Y2fnn7i9++KdUfazJNdWYVw4l4PTrNuUU+oan1oRjlG/Da36RW/d+f0tN/8tEb+Q6dn
+ 8uX6RN3GgHo48VFOV6jqCb3fTOYFcp29DKrrXnUXXVa+rYcccp1cdcVBWIun++3xaP1jsThf/mkt
+ PvCSxwohjRv+0nHkv+eXhcc5sFbXkKSfmMo1UqgTNppVcuAVEef7FfIM1/DlBzbywybH8174XiS6
+ EQ4WxRNxJc48xpm3HNHcMChsbx7VWHFLocDm/GFXBKI6427/5VNHpy94003Td//O7dOt+T/4VF/k
+ z2zZlarTSF31hui6Rz1DVv1pRjnHsOc/C/id7719+qK33Dy9of8bgcCdSZfr0/sq6+F9M8rpClU9
+ UTFu6pSzJq6zl0F11foMHPWehwXXyysX/R7rJF4z1htR4RjfnI9I48jna/jpecCOPBWA/ZmvDPUg
+ 3A7Onj2uzQfWtb91Kcg/l03YT0yXzqnWCRvNKjnw8qWiNK8hz3ANX35gIj/A55hx1EjaBb/jmcdL
+ fOLsl2LyliOyGSa4i9vxioj+Sm8eyzIvPyGdEcMHb9+env+OW6bn4ntM78fPU9V6MLusB59bnSny
+ 8npknbwuOS8eCLMx1tN1X+l7Wl/z9lunb7ru1unD+HGJM+1yfdRQvb10KLhOmbEPl6onDw18pFyo
+ XA+3qfWb9lcdGnzw+ox1Ja8Z9aZDhU4Z4hjm8Jv+jRNQhpwffLajZfGFA+OGv3QcuM+ZXnH9gx3J
+ /L75wNqersFJKbZ+YirXSIHzvDQyyGjanFcTCxG3E+Bpt+SX3HnTjx13dvvnYiZP2GUcjs/xmhfm
+ 9cB5y7bPfCI/AsXX7WEQ0wpEBLOQTluB/xfvH7z/9unq37hp+r8/cZcKM6srM8t68JlNuMhf9RaM
+ dYp6hqyy04xyjmHvdcr6r6f/8Ilj0zPe/Gn9X0XGdaZcrk/UbQyoByvSy+kKVT1j86ec9XDdalmw
+ HHvUXXRZeeJj36n+rLsZFYfK7QnRYcXSb42Ml+sf15wv84CdcACFA+OGv5o3cms6tP0lydnHzQfW
+ tHWNTlCQ9xNTudIn/kifY+DoNOfxAG27Qu68HV9+YJL8ORbuOPz2O49XRZj5ddzmLUcKUjAoHEfk
+ EbIiIqD0kb/kyDH8hHTaDq/9/Tunp/zGjdMP4h9a4PetnNairswu68FnrMtyvb0eWafRF7vWmeai
+ c7/ILtc5Rv681g9+6PbpqW+6WT/HtegsRnDaXa5P1G0MOBKyDpkS5VY/HhpNLlSuB+BVz6wjQFV3
+ 0Qkh0zGffsxYb0Qtnu7X8YOXeq5/XHO+bAvELxxAsU+MY15lqIeGuyY0s2GPA2vnGp2gYOsnplNi
+ kIzRQXYcvec8HmaOUt4Lz/klf/lJ3hzJvOC333m8qsYsTsdtXnDUQ9BBdhyRR8gCkqf0xlmONBfx
+ xOxpM7wLP6bw5b954/Qt190yfRw/buB6Rl2QaK1P5pn1YIZYl83rkXUafVE8NMMfdonG4JXfXOcc
+ gSH/f8f/ofzWd982fRm+v8UfqzidL9c36jYGHUaRbqTnChnPennzp5w1cN1UplHPVj/qXWdYcL1U
+ eeJzPv2Ysd6ICsd1mq8jkcaRz9ecz/HQsvjaOpuvDPVQuGnz97F2H1j8/tVq63N1gjIZJg2qGvGc
+ Mj10HJtKshW8j0tF2hu/ib/8JG+OZA2+dGC/83i1iWZ+Wx65ZqUHExw6jsgj5FA0febRwljEk3Ht
+ 9/ETOAT+99++ZfriN944ve3TPARQmMo781vUlUkxX+L4jHXZvB6cJm70xa51prnoxDTHN7vO/45b
+ tqcvx6H113B4/QHiPx2v7FflFeVTHVzRli7r0uoXm9/2I3PXuZZlXkfAqu6isydaj/n0Y85602nx
+ 8DBJvzVSz/WPa87neBS/cADVfvMhVtsm5ws3bfw+1u4Da/vQNSDFQYkgQNJPTKeEafiVPsfAJV6x
+ tyS63Hk7vvwAnPw5Fi79kHDB73jm8dIucfbruM1bjshmmOBYFHx0vCJSPbo961P07UF0+/52DLH/
+ 6w/fNj3lv/7h9KqP3o74ETILw4eogyTORz2odp31YJzUMuBTXYlzHaOe0FIWD55nY6znDH+c9UaH
+ Tj+Fn7J/Cr5M/CH8NZ/T7dxyfaJuY0D1VXD3o6rphTGeq9P6s6od69J51K8b6i66rHxbD9WfeJO2
+ Nx1HgfnaF5jp8WifRSycF3sQeQAv87Ii+DKPMvR84PAvreJs2v19rN0H1rR9DaPWCRqjc6TT1mQ8
+ DCjHoTAbrZC+bifA18kMg5m/k+SXf1gWT9hlMR2f4zV/OVKICg8K22f+kZ+KiHxLn3lzjAzrIeR9
+ PPzXT901Xf1fPzm9+IZbp5uO8oc/mS8CZmHYVZGnJOW3qGsqiOMzu3KRv9eD0+SLegLq+tqO7miv
+ Mexn+Ga35E/51mM709/H32O8+jdvml6vH7kH4WlwuT5RtzGgHqpopodMXCHjKXnzp5ypum7Qs9y0
+ 2qvuohNCpsQVHk+xDPIjhfwn73wdSaB4uf5xzflsx4iMo0Hml3mUoR4GDuLW+prQ1rD7wFqNn78i
+ eT8xXToGyR50kBoDl3ixtyS6vBe+/ACc/DkWb/oh4YLf8czjVXFmcTpu85YjspkOCsdhHgaSvAKU
+ PuYlyzwI4nmfDr+LH1P487910/TVb75x+p3b/KXUyBdBIx8kotF5Z1qLugoWOD6zCTeuB6eJi3qS
+ HTLdsHtm42ydsv7DbsmfcvLzt0O84J34EYy3nx5/zcf1jbqNAXVhZXo5XSnjWTdv/pQFFp51tp3q
+ ulfdRSeETMlTeDzFMjgOKlo8Oiz7OklLvwJu4Ms8wEseO2q44S8dDxxg6xMdWIufvyJJPzGVq4Ik
+ l4PUGLjEK6KWRJf3wpefxu/aws9J8DueebwqwixOx23ecjTCY02BV1PQLuR4KHnERbzM20PI+2i4
+ HT8O8LL33To99dc/Of0ifnlcxY8YR74QqFDXe16S8lvUNRWsD5/ZxFUITnR71tH2OS//EGZj2Gsd
+ E5+jDXkf1x54/pDrF+HHIF78vtunG/l17z69sl9Vtyif6+GYRzkpt/rxUGlypue6eRnEQ4JWP69z
+ LBPm8STTMZ9+zFhvOoVjFC2OrD/14gs7zJd/epEb2AlXE/O+o+mMjzInt3Z9H2v+hrX4+SuS9BPY
+ KZkrT3iNgUs8XfUkurwXvvwALD8stmiQ/knwO555vCpCFNN+GVbylqMRHhxabx7VWPZSKLBhz7ha
+ muFHZPvo9rP4N/Ke8l8+Mf2j37kFv85lUR/EOfKFwIKz3aIOkth9mJjlnQripO6F4IR5a8z1i3m6
+ od1snK1T1j9GE/E+ruPg+eNaP/JR/HjGG2+arsVf99mPP77Fekah+4C6qKKod6ZqnPGsmzd/yoWi
+ QSyD6ip51G+sH91m5b1Ohdc6m1FxUNHiub/fsFar9a7vY80PrGm6xic1isKkkHQ/gV06pyC9ch+4
+ xCtlFUVPvoXceTu+/AAtP8DnWLiIZxO/45nHSzsvDgfG6dG85WjQlT7zDzsbLuyTT+blJ6QHfHj3
+ zUen/+mNn5xe9Fs3Tr/XfvFU1bmthz9zImQWRl2feWdai7oKFvnzudWZIi+vR4xt3co/MFqHHFs8
+ J7Pe83XN9Yox/N+IHyT76++5bXrmm2+a3rTP/pqP66OGi77KeqCuil8Dn/Cn5cVDpcmFqv4OHsnN
+ DrLqLbqsvNfH8+nHjPVGJP/EkXfOR6RxZPBV60sDXB5gR54KIP2Sz3b5MHCcl8E1gdCw68DSyQ2W
+ PpKzTmg8p0yGjqNTyVbwPi42Na698Jv4y0/y5mgi8eXNfu2/28GhIPbb8lAtoCo9nmHoODL/iNeK
+ po954SMC8dDzA3+95Q/xTfX/75PTm/7wqJosm4GRVZ0rbzfTCB+FiToYz/uirqkgTupeCE5k3bJO
+ oy/KPzCs1lgGMSm+6qPjrPdYN8ab67XZzw34C5DPx/e39sulTZ0F5xjlcz2yDhkt5ZYXKsZNrX5O
+ CEbJnUe8A1d1F11W3naSIh4N5OPKUKEVIo7inM9aKgSkqDgGn+0Uf/KFA8dDPpklcPjVPJj4PfV2
+ jQOL37+aVrOfvyJbPzHJoWA4RpAaA5d48bckurwXvvwALD9swvRzEvyOZx6vqjGL03GbtxyN8KBw
+ HOZRjWUvhQKrODkf0zMCCQ/s7SZ878b14Hpp0SugWfyYHflCYGGE97wkLjgUSztMOH+peyE40e3t
+ v+KBHd2QdjaqzmF3Eust/+lnA77ibX7wuC8ubVblG3Ubg9cLUUY5+IQ/0Y+SWL8hY0qX6tt5SJB1
+ AaLqIbqsfJ9PP8EXfWD/xDGK4bfWUzjyhR2AYo8EPMAu+Wo+8yhDPQwcRSU0+z7WOLCOHf5jRLCY
+ jK6PzpFOReGROBvswodCQ91OgNcikg9/Zv42xCPO4Et+xQvL4gk7Lhov5+PR/JisB85btn3mH3YE
+ iq/bwyCmyZ9+9LwPbq4HwvKiV0Sz+ihsN1PmTwvnlXWj6aKunMp6SN0LwQmq0558Uc+YV9nxPBs3
+ 4ZvdrvqeAF95hh8M++pyfaJuY/B6IdJIj0/40+oHmZs665tJSe48JGj1q3qILivvdZIkh+Q14355
+ w8K/h7o1rY89MfMcB9bWscdw0ic1ioJkGf04mZEM9fjjnPm0GR8KDXUjH67Om/w5v+QvPxFHx4NI
+ fHkTLxeTi9T8JM5+7d96gOqB85Ztn/lHvAQSoKHzhx2DWMTDqQfycj0QFlcsuxABzepTMvOFwHoI
+ b5wkzkOxtJMB6yF11IfPcZV/1S3qCV3x4Jnu0u1GfK47OZf1DVl2ictR8Hm/kmI/Xc436jYG1EMV
+ bem6QlUf6PubTubkOrhMqutedRddVr6th+oZfQDSetNp8XS/PZ7j9xcjBC95HJhCpr35JNb6DlzY
+ Ma6tVf2e93FgrTzpkxpkbHKSsgnoMkc8pyzKhks853sSXe68Hb+Jv/xEHB2/5BcvIiueiCtx9tvy
+ yDUjLsOFQ9tn/paVsfhSH/PCy5wT8bA/BtcDYbFLuOhxzeqDuZEvBMGIj/ykp+GirpzKekjdC8GJ
+ bm//FQ/s6IbVmo1RP+FOYr2z3nvhndfww5j20+V6RN3G4PVCoKOdXCnjmY83f8qZk+TOo/WJPhZf
+ 7mMI6gfy0k/Opx9NOw4uEJ50x8DDJP3WSP1x+4vWsBMOj22dzUc9rpwvHCfD32pnw4G1vdakT2ok
+ wSCYDJuHpjmSO2QMM1ziOQ+FhrqF3Hk7fhN/+Yk4On7JL15EVjz0xyLM/LY8GJ4dKETBBHdxHWfk
+ R6D4uj35PT0IKtsH/MH1QHz4UB0ioll9MGeZdYPAegjveUmch2JpV/WQuheCE93e/iseOKKbdFej
+ Agi7k1hvB7w3vuKFL6XFoPbR5XpE3cbg9UKcUQ4+4Y/rz/C1yZvMOV7i6zwkyDqGnnUQL/eFeG3n
+ +fQDlbS095PuElscuV7kER9RS77wx3iFE6DhyCcxgQ3H+fC3s+kNa2v1GEHoHCw+sT2Ss05iPKe8
+ F57zPYkud97yIziTYoiDv/ws4ul8eqZdLELFGXlkHPbb8hiOBh0c2j7zH7ziKX3MS44I2qJlTA/k
+ 6HogTla0uiLzy7xSZr6IlgUXvus5N+8DzlQ9+Ez+Rf7lP9ahy3ST7moMe+FOYr3T3154znc/DHM/
+ Xa5H1G0MXi8EOsrpChnPujGv6M+WkOtgO+W9V91Fl5UhPuokh9EH9I8PFZAjZYnDb4/n+P1Fa9gl
+ n/yk3+EvEx64sCN+tX4MJV7+kvC160PTzvQ4TvikRhLRhBwZco14TnkvPOd7El3uvAxSsuDhB8/J
+ n2PhGn7Jbx7zdbvE2W/LI9eMecq/HROnplD+UQ9G1OTB39IMHpHtg1vVlV0XTcKwnF/mlTLrBiUT
+ E77rObeoq2DkFdr8i/zLv+p2nHUWe/fneGV/nPWer2vwN3zlGfwMeT9drk/vK5dfbyIIdJTTC1P1
+ jM2fcuYkedBpnWvfiC/3MQT1A3lddy57xpN+642I/SA945uvo+ZP2F9EwU64INKQ+4x6XOF44DhZ
+ /j5z4hmFywfWrW+/At/YukgQJgNjn9geVTI2gyig5qikY1zgyeOi6Mm3E+DrpAc6+XNcxiPC4EsP
+ jmcer4ow8+t4zVuOBh0UjiPzj/xUbNYl9TEvOSJYxJNxPVBjrQ8WPZuBsVSdW13YJBJZGOEjP+E5
+ t6grp2jA/KXuheBEt7f/igd2dEO72djiOZn1dsDhZ0P/VZ7hB8O+ulyPqNsYUBdVNNNDzK5U1Q+y
+ 18u4TEr6zqP1iT4mS9ZddFn5Pp9+zFhvOi2e7rfHc/z+Ih/iII/cOm7HE31HSK5/4cJO86tLJp5R
+ uHxgbW9dlU59UsfJxiTZDADWiOeUSbAJz/nk03OT98Jv4i8/EQeTkn3jS37Pz+NVERC/4VzNlkeu
+ WemBKn3mH/lZ0fSZd0szeDKeB3rMOvkzFivpq+pcebuZJAo26kQLwxZ1TQXrxWc21SL/8s/5tm7l
+ n2b4M5ZBTF7fxOdIHwv+lOUncTkKHn2LZ6VFjn10uT5RtzGgHlmHDNYVqnpC3990CqU6u0yq6151
+ F50QMq31UH25zmZ03/B5xNP99ni0/jbT+pV/WsscvOSxQkj7Hf7S8cARNvpuOjY9hjM+sNb4LjwW
+ m5dPahSFMrz5JPQ8fctn4nIMXOIxTQMNdTsOv+FMas4vucVxPH7FC4Yer4ow89vyGIkoRMFYUzyw
+ aOILWYumOFLvkQEHfXuojB/QB9cDYXHFsgtZX+XnkQGOfCGw4MJ3PecWdRWMvEKbvwpBZbe3/4on
+ /QNDd2MZ+BR290E/iXwf3VyPaKAxeL1UhwzWFar6AVH9mRDhWWfWL+qqh+jj0KveotOTrL3+uV7E
+ m7TeiLRCyTvnI/LE/UUU7MhTAdif86AeVzgeOE7aH83yRxt8YK2mx2RT6+SEcR+dI52GT46xCTqO
+ TnM++ehL1wnwtFvyp9/iPQ6//dp/t8s4HKfjth5R1QPnLTuOzD/zBJAADRFnyU4v/YT0gA+uB8Jm
+ l6BueTm/zMsjmyTzp0XmSRvNS9/yTgXrwWfyG0hJV/lXnUZflH+gGBXtNYa97HKdcyTjgj/lvfBL
+ P6TYT5fidqGj3lkPVTTTQ8iukPGUuA6jnpmT6+AyVT1b/aoeosvKE9/XNfpAXgEkTP6Td/jt8Ry/
+ v8gBO/I4ME6E3+EvEx44ouzPYfinGOINC//bkE1HCEcm0UbnSKfhM3E5LvCYJpGGuh2H3/Dd/Ol3
+ GY84F/z+DOS4u13G4Xycn/VgqYcIFzJxagryhxwPJdss9ZHhIp6YfcAG1wP5sEvaZnd+zpPBjXwh
+ MDHhu55zi7oKFvlLrULxqa7yrzpGP0Fb/vGsOuYY9ZPdfdBPFdg+eXB9om5j8HohxtFOqHPUn6Hr
+ sGoy53i5brZTXfequ+iy8rYrvHiDL/qAHs3PKObrqHnhyOCr1rfWk/OwS76az31WhnoYuLADXuwr
+ fNsKV7xh4fSKpvZJDRBlgtk8ANaI55RJ0HGJ5zwUGup2HH7Dww+E5M+xeCMecS74FQcsK07qWZyZ
+ 35YHw7ODQSe4i+u8Ij8Cxdftye/pQaCnfXFzPRAfl5t1iGtWH8xZZt0gCJZ5cSLzW9Q1Fcyfz63O
+ FHmV/1iHLtNNuqtxtk72V+tuQt7HdQJ85QmLkf0wf6CfXA8WEBUYg9cLwUV6fMKfqIek1p+Q8xJf
+ 5xFvs4PMOohX/UBeyjmffjTtOGgg/7bTYSkC21nL+AWU4ZzPdoqfPBWA7c0nswQOvwpv9N3Eb1vh
+ 2pr+5fX8v4OPSqebPsMpFQSlEeCUSbAJz/nk03OT98LXyQxs8ufIKs/sGl/ySw/L4slNFMW0veM1
+ bzkShWBQ2D79RX6MSHypj/mYHgQZzQM/uh6Ik10STcaoZvUpmflCYGGEj/yk59yirpzKekjdC8GJ
+ bm//FQ/s6Cbd1agAwu4k1tsB742vPOFLaTGofXS5HlG3MXi9EGeUg0/4E/0oifUbMqZ0ia/zaH0G
+ ruohOlaED66fJDkkXtOOg4rCMYo5n7UwOG5/EQU78tgRJ+An85DICc8XjqL9OYzVo/ijDVvTkbse
+ D/ShdOqTGmQMgqRsHprmiOeURdlwied88um5yZ234zfxl5+Io+OX/OJFZMUTcSXOflseKt6Ik3DV
+ Snlm/saHouljHgHKruXHx/1wuR6Ij10SzcC4ZvUpmflCYMGFN04S55d1TQXzl7oXghPd3v4rHtXX
+ dnSXbru+1jnX3YS8jysKL7vE5QhU5YlnpTUs98WT8426jQH1UEVHX0WFqj6Q+5tOJuM6MG/nazn6
+ GCDKqrcKrieZjnmvRJTVcRDW4ul+xS8tHQq4gc/xqH/IUwFkPNF3ckP/9JY4So7//2fvXYB2za6y
+ wPc/nU5CEjAoXoay8IIZCAm5QBDRGhFrlKmxykJriDrKCASJiKMU4zhT5eCoU17GKRwZLEdrnBKm
+ RDOiI1rlBS1jB+SWEGIIBJAAIUAg5Nb3TtLd55/nsp611/ue7z//6aS7z59071Pn23vt9axnrfXs
+ /b3f13+fPl1l3LG9/y0vwP+b4tqvoitJ/aT2k43dryczgkXhFoRTGOgK1/Pg41KjmjrFT3/nwVp5
+ ePlqv3mTxwF87eF69vUybt/XyCPxEN51YS0486Z/48sx/LW/6JunC7rNiz4f3hLqUKN17r7TLwCC
+ EZ++09ZBV3Ixnjiuh840OTq/cKVn7TMN43bzqOdWznudG+vIeV2cB+mu1Mh9bR0tJ3SRomkPNVup
+ 1hN238/Rkfw6t9L1It1FF+V9TrKkP/UzaX8jGvXMvLOem98v8oGXPE6kBIw3n/Ml8cJVnHB1n+44
+ /7X4Gdb1ZyukLvXuyUwwLwMAPWMdW5R1WXdxdvB1jZvwE3SKv/NUHWxKeRywuCuelTVP1YUN4Vzf
+ yJMzaz9g1BQ2RZv4cgy/ccZXGcVT1m2fVD+q4KXIZWBR7q/qb5v9wqDgwk8/9w66CkZeoc1/6L/z
+ c3+cW+cXq+P7nLGnuOAzJx/njMp3Ef6YJ2FXZVbduUC6d5LT54Uil5w+GOOp17ifoxnr4LjWc+jX
+ eohOCEWv/eQxqe8N19wPb70vZNc+/ciTsedzHDtrvnFufp9VZPbFxwTcP96768/GA+uaH1iV1E9q
+ iEIbJH4SsiYmJYW55Kc9cMFjmw5N/VL2RfhT/Mp3i/yuZ1+vTn2Xd/SxGlGJgkl7i+s6qz+JSD1m
+ fOzqsPJ0v7d5YT1QL09s3f51jkMXvQmih/DpmzMbOejKLTqoh9wShqsenV+4uk/wch9oxe1mJ5K/
+ 71HdP5GWfySobdZR/JnJf8jTcVdkYX1KtzVBFykqeV0q7aEf/D4v49KO+CYP9Tqlh+ii/NBJ+jJP
+ sjKea2+IbtZRwMvvFznASx6lDV/6oB9jx0ebm+7bYdy4hgfW9XP8TaP0qTpMmBE8Z0L7yYl1bIfd
+ iOd++LQe9uTtPIKzKZa4+DvPoR7SHfnFC4aus/oIznlHHyvRokNCx6d/26pIfPHXvvAK50YtrsZk
+ PVAWFa3LwMp2+rTNfmFQcOGNk6W2DrrGwf65Jv+h/84v3UpPQDs/w2hnrnjF3cJ5J99F+GMepLlS
+ Q3VTAenTE/SQomkPNVsh42n5zR87TcledNL55PtLdFGeeet9J/3rHigrgISNevjQSd6e6b/p/SIH
+ 4oQjHQtI3pWv9xtHlPOpWsbhWXUNe35gVVI/qf1kI0l/UvESicIt+BOCtVql3axc6pYrj5vwE9B5
+ sFae5LtFftezr1ci7PKOPFJBiZnemiOx60j/tlWR6oi/9oVXODdqcTUm64Gy8CuXgZW1zkMXvQmi
+ h/DVn/CMOujKregh9xSCGzPe+bsexOl8gdnNo57cu55NyNc1LsF3n4hgnqs2rEfptiafF4qt9rjC
+ 77qPsqjfsrGlIb7JQ4J6/xLQeoiOinAx95NH266DsMaxipXX9dPrPI468tGmB3HCkW7mJV9FZr9x
+ FYd9VUs/nlXrgTWefGTxE9szOftJzJxli5JxBzz3KdZu3ITfcDbF1hZ/57kFfj/x9/VKjV3e0cdK
+ pDIFQ0L3mf5tqyIC2l/7shXOjVpcjcl6oCwqysOu0ec4dPEnJwCCrT4ZYthB1zjYP9fkP/Tf+bmf
+ 8xPf4ZwZXvuYpH/jR9yRP7byBJe5eMSLNeerNqxP6bYmnxeKXXJS4bqP3Mcvn5eU77asg+PU90W6
+ i04IxTKu8cpjSt8brp1HdLOOKvDy+0UO1E8eJ+KGzrnvnTe83ziah3unB9b29DcsSVNn6E8iHCIu
+ v7Tj7EXbPvP4GY0hnJdX4fXWPwF9mdKmbhXvVvXjyXp03+m37qDeXYf+Vzx1Kj0lE/Mpy37ufAM/
+ 4m7Q9xI88888V+FMZg3WhwKue+R6YWNUe1zh99Cv3vzRl1gO2Yuu7BEXPUQXZRwnSwmJLz7mpUP5
+ iaO557OXDgEV2LoXkSfEha/3x70TkRMvHDedb9THH7qfP/1vCSmNVOFscdcnVjt0drtPpHlW49Ao
+ 9e0eqh9FXP4JmH4B1p1Bv+qLfZcu2Nj1HQdxXOs2G0+To/NTF/inzTRE7+bST7jgM5uQr2tcgu96
+ EcE8V21YDwpIfXryeaHYao8r/B76wX7KfsPazvDAOj97+t8S8krUu8efEH6y8674k0wL3Z3dJ0ht
+ I7wItLoSL66bV73eDVXVrn6VPT7p+N4QPn2nLetBd3glGPtniN50WtHSCE4z/NMWD1C7WQdQ/MFn
+ JmP5RT7sHf/Ac3/yd9wVWVgPCsjz6cnnhRpXu9R16AfE/KaTdqzD4BHviIseoosyxJdOSki8Gfub
+ jk+4ytzzEXn5/SIKceRRWidw3pUviReu4mZ95/wZVv6REIctCGeCxuwembRycr4Jvog09csl+OMn
+ Ytujjq6LpMUXftfjulOvVZ59uW77EdmLooPtvOnftoCqI/7aF74qONSTum7XbD1QJ29JbiGK2ena
+ NvuFQT2Er/7k595BV25FD7mnENyY8eM+1T7TJF3PpZ/qPtw/8rlArfxyCb77BFptjdCrsPT5lG5r
+ 8nmhwGqPK/yu+yjLb37HY6OGdXOc9NX5jDjY3keA7gN5ic9+8mjbdTBA+cO757MXcTe9X0Qhjjxd
+ QPKSj36MWiwcN52v+8Gzav3QvZL6SY0maLMZzGolM7lJdRM8U80mpj15w2945YER/syNq3omn9Z4
+ cT37eiXCrk7jzNuJFh0c7jf9L1710/7al10VVJ6ybvvU58Nb0rci/aWv2OwXJVMY4aefewddBSOv
+ 0OY/9N/5uT/Ozfo6jumS9iR+xLlAJq5R+RQXXGZAjnkSdlVm91sXaE3Qg4qw/lRqhVof+J+y37B2
+ P3THYXP4Se0nG1WbT+C+XMFlLlzw5Jlvkmmf4jccbwqG4bdmXj7arOsW+IXjYR7iUod5zGfeToRF
+ lQuH49N/5WclqiP+2q/tRaDVlXixHqhTTxV27LHTB1urXxiCUe/qT37GHXTlVvSQewrBjRk/zq/2
+ mabPGWvZ5OO+eKN/zXbI3y+X4LtPBJD/qg31aaFL79JBylhe12yljKdufF8MXaox6+a41jPvG2Ba
+ D9HlBOZ+8iQrbAnH/fCuvLOe8XQdeVYcidY3p+ynD+fTfWIe/FJewZyv+9E3LPxzoULQHMepTyzG
+ Hj+x+onPOFyeXZyJ+LrGTfgJOsWfvLfC73pcx4wDsWpwfSOPVFDi8mNCoOtIP7bLMfy1L7zCuVGL
+ qzFZD5TF0683Nytzf1V/2+wXBoUTfvq5d9BVMPIKbf5D/52f+7kfZIfNNEnXc8UrLvjMycc54xL8
+ MU/CrspsfeoCrcnnhSKrPa7wu+6jLL/5HY+NGtbNcdKXBEO/1kN0OQHi6zyUsO4BOPtnScof3lFH
+ FXj5/WKBiCOPC1PFzrvypeGFq7h9fU//W8K8mXG2PCWYFnd9YrVj+I0zXvpzoxZXY7r1T8D06/51
+ q0oHduK2bviks6PuoC7bof/Oz31euvJzRtiND63hb/yIu0HfS/DHPOzlKg3rIaHrXmmCLr5H1R5K
+ 9v1r/WDzTR07PcledPYP/eiX7qLLCfAYs588ZuxvOqOemTf5jSNfxe34fE3YWfNVY87LPjpQi4Wj
+ 6T5dN4D4F4T4ofvT/5ZQ0tQZric/NqAR7Vq0raPlvvyMxhDOy6vw6rp55KyTFXu4v/TlmZckbTJi
+ 9U0/4+jnpVtxcrB/uacQ3Fg41cFLXPo0DzDiyzz8rPcYp3wkzrgEf8yTsKsyW4/SbU0+LxRZ7XGF
+ 30M/2D4vKd/tWC/HSVcSREfx5fxIF+V9To1XHlP2Nx2fsOqZeV0/q3OeFNK69/nQg/qFw7L300dF
+ Zr9xFYf9VZ//LaEj1AR7gZugMUsy2JqZE7/7CTtwHadcTDMGcRiTd+K5f+TvPId6ikhTXlyP655x
+ SFhw9uX89mO7F9y37TrSf/pU4CE+fFVB5Snrtk/WA/XzuOsysKjWeejCy5T+GRGdjOfrQdc4KIvc
+ pQ/XNTo/iXN+8HV+rCV/5lFP40ecCyxyTpfgj3lG5JVYWp95r6KHFE17bBS/6z7K8ps/+qYZ2YtO
+ OreOjINe0lt0UX7uJ48ZfW+4XvX4nsSuWfeFfB4rT3DcR/3CYTnOre8dIdlvHDdvvHf8Yw1305VL
+ 7Sc1muMlZ5O8NHLXjHVshy1c8NwPn9bDnrwT33mADX/mxlU94mR9Y4gXkc1T9acO52VZ6QfBTiAW
+ 0cG2P/3bFlB88de+8FWECEZBt3mpflGDP9nYqMfq3/qtfuEXjOeZvjkz7qArt6KH3FMIbsx48pWe
+ tc80pN3NpZ/qDj4zsFUIVx6X4LtPoJnnqg31aaFVYMnp80Kx1R5X+D30gz2/6aQv6+Y46SrCEQfb
+ +6SL8sRnP3nM6HvDNffDu+fTPv3iE+zA5zjVL1wRaUofjkvDnVdpna/7wbMKf6zh/AMKqaR+UoOM
+ NpvBrFYyMxl+y5+5cMFPPq35chN+uysPjPBnbt7kGXxccriefb0SYZfXOPMiqBdVHmz3m/4Xr+pv
+ f+3LVvoiqPUVmPp88qboMtd5cmv1C4N6CF/9yc+9g67coq7sX+4pBDdm/LpH2Wcaxu3m3TlF/5od
+ yNc1LsG7r5VnBV6Nlc+ndFsTdJGikteVWql5nv5mYly6kX/y6HyWfq2H6KK8z0mW9CQ+WbGgY9Qz
+ 88569D6rQlYeE5kPvORxIiGNW/mSeOEIc/1dH55V/BmWH1h4GAjCGVn8xPbM1P0kxjr2RXjuzyam
+ PXk7j+BsiiUu/s5zqGfyac041b+vVyLs+hp9rESLDgndZ/pfvOqn/bUvuyqoPKnnds/WA3VS0dxC
+ FOX+0lds9gsnBRd++rl30FUw8gpt/kP/nZ/7OT+yw2aapOu54hUXfObk45xxCf6YJ2FXZbY+dYHW
+ 5PNCkdUeV/hd91EW9Vs2tjSsm+OkLwmGfq2H6HICxNd5KGHdAzD2Nx3lD+/K6/qDI5/Hns9xqp88
+ LkxA41a+NNx5WWf12f3gWbX+kRDNCcKZTYzZPYJcFCySWlyMLyJN/XIJvp/MCAh/5mM9p/hdj+ue
+ cTw0Dvfj2X5s9oL7tl1H+k+fABKgqXRom+wYlcfG7X+1HiiLt0SX0TW5v/Tl2Z+c8FMP4aefewdd
+ BSs95C59uK7R+aVT6Qlf58ea6Up27TNUcYf7x/0b9C29L8If84jjCr2obl8oCVHXyee1a9cKGU+9
+ eP+WnmnJOlgm6XqR7qKL8ta78eI1Y3/T0QmFd+Wd9dz8fpEPceRxIiVgfN877uQ8G8dN51v18R8J
+ z5/+GZakqTNcT35sUOP65NGhlO0zj5/RGOOh4I3b++q6eeSskxV7uL/05VlvguqfEatv+hmHy8WH
+ CFbhlYN6yM2FVrQ0gtPMy1l+zuIBajcPP+s9xh35Y+9wiQP3MY+rujqvqpsKsO81+bxUf2qlrkM/
+ IHxeJ/SePOIdcdFddFF+6CT9iXde3xuuvSG6WUcBL79f5AAveZQ2fOmDfowdH21uun6HKR4PrO3p
+ n2FJmjrD9eTHBjXSm12Ltqml9mub8fOhIPs2v7hulMVbkluoMnl50pdnXqa0yYjVN/1shP59nBzs
+ X+4pBDfM2zPydz3hgVN1ZHYi44LPbCK+rnEJvutFBPNctWE9Src1+bxQbLXHFX4P/WD7vKR8tyW+
+ yUOCoV/rIToq4vi1L0fn7W9EjWMVo47oTz/yZOz50gfihGPamZd8FZn9xnHf+VSt/XxgPf0zLElT
+ Z+hPZj/ZfRbt0Bn3JzcPaZ7VOLQ6gts6+RPcl2zdCt4tXh7P7tuXSeXr8qQv9k0cX63HjJOD/cvN
+ hfE0OTo/93HZpi0eYHZzxQsXfGYT8nWNS/DdJyKY56oN61G6rUkPBda65KSuQz++iYedvqyb46Tr
+ RbqLTgiFtk5KyDxm7G9EPmHtz7yuH/no98PkBF/6AK9wgFQC5135er9xpHPf3Y9+6H5W/0hYSf2k
+ 9pONJPOJaelYJGskTc2FC74cmvrlEnznQUD4Mzdv8pB0iGTTh9A89FOcXV7Xa95OxHDDBLe46q9s
+ VSS+GU/+ph8L0d32lz4fHLp0qIp2+qjv9AuDwgjvPmVRVjiOcRKM/cs9heDGjHf+rgc6Mg3jdvPu
+ nPb3j3w5R62HLd7ci8yC7/N03BVZWI/SbU3QRYqOdq1U6wf//KaTdqyDZZKuuq+l49RDdFHe59R4
+ nbMZ+xvRqGfmnfXc/H6RD3WQx4mUgPHmc740vHAVl/ui+zF/6M43N4af1CCrN7ufhN63dHXZboIv
+ Ik39cgm+8yDAmrLJ0/WIs/jCr3oR0TxVf8SUn5ph37ydaNG1P/1XfkaIb8ZDr9reEaSg2zi/9BPu
+ 3H7nr6i/5oy3RIftglb/67x5SSQnhRE+fbttNnqMaz0YQn4R0PCQ3lhq5qUrf/PAx3SsQvPwk+8Y
+ d+SPvcMlrvLu+LF3VYberBGcc8nnenMuqdYKtX5QzOdlXKOOPLJP6C66KOPzkVX1aAJpfyPSCRHH
+ Mvd8zG0cGTz6fIvIE+LI40QCGke+DvR+42g636pv/tCdl44QzmCZs3tk0soZXOYDHtsk0tQvN+E3
+ /Eb+5D3WI84Dv+pFhboMAKT+1GHb++YFqBdVLmzHp//ikYjUJf7al61quFGEZd/G6Zc/69r2jz/n
+ E7dvfcUnbr/mOfynfjbqsdMHW6tfGIKtPhlhmQ+6xsH+uSb/yfMwf5+f+A7nzPDax6R6Gp97ZQdf
+ 16h8PtecV81AdZ9Yi39F3vaV3qy5L5xLPtcpRYectEdffBMPO81YBx9D6zn0az1EtxRZ+8ljxv6m
+ 4xNWPTOv8gFqHPk89nyuR/WTx4UJaBz76kDvN46m++5+8C8I1w/d61LrSQ6WObtHBIuCRYLqJnim
+ 4qXbjUvwOkSG4Xf4M7OrWY94D/yux7gZlzoc77rt70SLDg7XkXy2VRHztb/2ZSucG7W4OtN/8Sue
+ tb3xt37S9uf+0+duH3eH63N/6cuzPzlRN4XhCVSfshR20DUO4rjWm878NDl8HjXn/GqfaYjezZfc
+ jxv0vQTffVYeTFdqWB8JXXpHD+u4rpOVaj2h3Pymk6bkX3TW/5TuoovyPh9ZSshzNmN/I/IJa3/m
+ nfWsp86Rjzb5wEseJ1ICxve9404lXjhuHu6dHljn9UN3XjpC6vLN2T0yaeUMLjOTzzjszyZoxt7h
+ Eif3jfzJy2Z2cYOPSw75USHnGbfPa5z9COpFlQfb8cm3eMXT/tqXrfRFUOsrND3z2tn2tZ/6vO1N
+ v/WXbV/8yc/e64M6V78wqAdvVfUpS9fioGscxHHNy1b3hyaHz6PmnF/tMw3jdnPFKy74zMAe+WNf
+ hHdfKw8prtKwPqXbmqCLFE17KNlKGU+L97vu52jIOlgm6Xp4P7YeoovyxI/3i3hNqjoIG/XMvLOe
+ PGyE3vG5Hp50841zNp/zpeGF4/7h3t3BP+l+7ezp/5aQ0tQZric/NnC4/iTRom2fefwluAhqfQWn
+ T372Hdv//dJfsr32837p9mnPu6P6cn96E1T/vCSr79IFG9YlepRglIW91ptjtm3dCs9LXPo0D8Pw
+ O2mnn3yyM5P4qO/gO4U/5iHFVRrud96r6CFFR7tWqPXhm7jOY/ZjvSzTup8ndBddlPf5NF68Zu1v
+ Oj5h1TPzznrysGEk9xdfjg11kMcOJTCO9TlfFgvHfdfffOfXnv6T7hGb7zlpyjcJfq1PrHYMP6Rk
+ AM+Abo5e2Lyqr694/p3b9/6WX7Z9/Quft33infhzw90vKtblSV9uzG0dPunYHB11B32bI4Q7709g
+ 4UpPhVFfhOP3bnYi1UM+67vilM/Ufr0E775Wnhl6FdbWhwKWjp6gi3Ws9lCqlWo9Yff9HI1Yr6a7
+ Qb/WQ3RRnvg6DyWk3iZVHYSNembeWU8eNkLv+FwPT7r5KoHzrnxJvHBk8/mrWsfhZ1jXr/8CXUnq
+ JzWa4KOPyccT09K5BfkVtnDBTz6t+VKP0sk78Z0HUGvKJhl2a/yuZ1+vRNjlNZ95OxEWVR4criP9
+ V35Wojrir/3aXgRaXfkX/FPi9hWf8nHbm/6zT8T8HLTGflE2heFjpHSQxX1s9PmUnq2H3FMIbpQ+
+ meseZZ9pkq7n3TlF/5odyNc1LsF3vYhQWyvySqxyX1tHyw5dJLjPQ5VaIeOpm9/8sdOM7DoG6Ut9
+ TukuupyAz6nxOmczqg46Rj18mCRvz/T7YaJA7i8+8nMbccKRThviMZ/C1n7jKm7y4Vl1bXvO834S
+ hI8mqZ6cBLGImt0jkzI1tjlXkRMXPFOFT+thX4Tn/pG/8xzqEecQyfSurHmq/tThvK7bvIjqRZUL
+ 2/Hp37aA4ou/9oVXNdyoxUfPxG9YX//C527f/XmfuH3WJzzDevCESwd24rash+WqPqMHQTifY//S
+ W/HforHqAABAAElEQVTkKz3LFg/DaGcu/XxO0X/FHfljX4T3OS5+pLlSw/qUbmuCHtZ3XScrZDz7
+ 8Zs/dpqSPXl0Pks/+qW36LRS6NpPHjOqDsJGPXpYznOSF3E8/xp7PkSTFpmbb8SbrwO1WLiK67qv
+ P7o9/My3Xdv+0Avuhetnk9RPajRXl3A+Md1SXYIqcuJYnWzlWk2okkvwnQdg5QE+c/PehN95nX/G
+ 7fuituHtRKs8BNqf/m2rItbf/tqXrXBu1OKjb3rh856xvfZzn7/9nRd//ParnnVH98lO3NZB1zjY
+ P9e8hIf+fR7cpm7rXrT+DMNvxmuu+B1+xB35Y1+EP+ZBmis1VLcvlASQTKhQ30Q493WyQsbbr4fW
+ Aqgv6+C41nPo13qILsoTn/dD8lim/kbkE1Y9M++sR+fvsANf+sD5k8eFdb3m60DvN47mundg+tnt
+ q190P/5Yg/bfnqR+UqOJuoTziemWEMoQ+jOz6YEvh6Z+uQTfeRAQ/sxU6zJ+12PcjENglYMZjs4j
+ 8eBqvxPbn3zpU4GH+PCJvnnK+qic/qv/5FnbD/yW529f++s/bnuGblfaOujK7nTemLjmm6d0pMnh
+ 86g551f7Oh+sd3OfA3WN/jWbkK9rXILvc0YE81y1YX1KtzVBTyk65KS9dNCbfNjpS3yTR+cz4mBT
+ B8nG8+o82U8eM6oOwhrHKvZ89vq8HOXzdp7ZB+LI0wUER76KrMXCcd/5XMb527njB9bZ2Y/q0hFS
+ l2/O5OwnMdaxsTyJ5374tB725GW1sgVnUwjD7/BnbtzAH/nNs57IyROc7dHHSsTqDENC4nQppINt
+ VTTsVVfFNYGoPqpfnoM/r/VnP/U52/f95udvX/hJzyz5DrqmX+rFNS8b9RnD51H6jXOzvo6TjgzH
+ 75P4EXfkj6244DIX3+QfpV2Jpfst3dYEPa3jktMXtfWBv+/n6MQ6+Bhaz1N6iC7KWPfGgzl5+xvR
+ qGfmnfWsp86Rz/XwhJuvEjDefNVE9pmvy1v3DoX9KJH1Dev8R5PUT2qQoVlWz9k91oyg2CSYuOC5
+ Hz6th30RvvMAG/7MzVv1iJP1jSFeRDZP1Z86nNf1mhfBvahyYTs+/dsWUHzx177wVcShnlHaR+Xy
+ 1z/nju01L3ve9m0v//jt1+CPROx0ZUfRg2tetkP/0luwdY8IbR6G0c5c8YrLOWcG5sgf+yL8MQ8p
+ rtJQ3VQgOlImFMg3MceS0woZb//8piOw8CYoOsTTrntcfvGLLsozz3p/s4LkVR2EjXpm3lmPzp9Q
+ jD0fbe6ClzxdQHArXxIvXMVVffCPB9bZtae/YdUZric/NnTm7WjbZx4/hcXgm/ZjcHz+L71ze/3n
+ /ZLtz/+G5+hPy+eT1Q8p30FdtkP/wWnmpSs/Z+kHrXbz8JPvGHeDvpfgj3mu2tFYD12wulfRw/eo
+ 2kPZvn/G0/KbP3b6kr3obtCPfuktuijPa5v95DFjfyNSfuJY3/4ciTSOfB57PscxsvmqMePI14Fa
+ LBxN5xM7n1EY/oZ1x/Wnv2HVGfoTAofIjwYeEmcv2tbRtp8yYgjn5cfa6zNxS/7Er3n29sbf9Anb
+ F//KO91e+qel26xr1a1bt9IP/mlLP4bhd6m785PP+q+4G/QtvXe4xJEX/snfhV2RhfVAhdHR1w16
+ WMdqrxUynnr5zR877chedDfoR7/0kOBRZuikhNTbjP1NZ9TDh0ny9kw/z7/GyjP7QJxwAFUC41a+
+ 3m8cCZ1P7HxGYfiB9dyX/Qx8jwrC5GxuzO6RScclqyInruNMxNc1LsH3kxkRu3yjjpvxqw5ENk/F
+ RUzXSW2rj5xZ1+XE9qd/41WR+GY8daJdLfZitfyxtvpV+I+q//ZnPHf715/9vO1F+NPy6p9N8hIe
+ +pfecFn30rPs3T1ieHCZD/cP2zfwJ9+OP3HFM/OI4wq9WJ/SbU14G/pCLTl9UVtPvonxK3Zasg6W
+ qfU8pYfooozPp/HiNWN/0xn1zLzJbxwZKg6FL74cG+oljx0CMt58Hej9xtF0n2jj/o3PKAw/sF55
+ hj+Htb1NEHh5GfQErdk9Mmnl5Exc5gO+HJr65RJ8P5kRsMt3op5T/K7HdSdel3qX13Xb34kWHRyu
+ I/2nTzlUWNepuuhX+FiU/TE8vQJ/Zut1r/j47a992sdtz78T2uD8lxBu3OdR+uV+wNX6YY0o3+Ha
+ xyT/8f5x/8gfW3nCn7l4Jr84rtCL9Snd1qQ3Mcvse1UKtZ6w+eaPnZasg+PUt+7nwtHvfUTwvMRL
+ fPb1NOm8/Y2ocTyvPR9zG0c+jz2f62Fk81VjxpGvA7VYOJqV7+z8JzY+ozD8wOJq80/h/aRGE2yK
+ zfAS0JsZ69iKGrjguW9RtPILcRiTd+JP8XeeqmPij/ziRWXNU3UF57yjj5xZ14XikNDx6b/qtWP4
+ 0wdntTUWZX+MT/zT8l/6yc/c3vi5H7+9CvM1bozh84hOpSf8fT5Y63wz9zmA5xbOO8L7XHNeF+cZ
+ pV2JpfXRhat7FT2sY98rvGnpaT3rzR87zVgHX8Mb3jcA0e990kX5uZ88ZuxvRMof3lFHzot+8VVc
+ 55l9IE44FSKg6yGf47JYOO5XvvOztxdqPrD8U3g++Rg8Z7XCSyQKuDlXkRPXcWQfTdCMfRGe+0f+
+ znOoZ/JpLXofQvMwP9XY1em6zYugXhRMcNaR/o0XUHwznvwVxyIqD5dPpfH8Z5xtf/UFz96+87Of
+ u/3GT8A/JtbQOWO9O++yJTvWu3l3TtG/ZnIe9b0E3/eg8pDiKg3rUxdoTXiL8h7Pdte91j7fxLmf
+ QvrFOjtOulKfvG/EV+8v0UV5n0/jxVt8rIOOUc/M6/rpdR5HHflo04NzFI502tC9MF9FZr9xFcf9
+ a342cWd9w6qfwvtJjebqzb6ezEw6Lhn9tAeOxci2Q/5+uQTfeRCgPMBnbt6b8Duv8884HhqH6/Rs
+ fycqv23Xkf4rTiKCB4Fdp/qmrfCxKPspNn36c+/Y/uXLn7v9nRd+3PYrn4mruNN93YvWz3LrPdHn
+ hT3F5ZwzU8sWmgbGKf6BP+Zx0NV5tT66UHWv6t77KZH22Ch+D/3qze/41Y91syyt5yk9RCeEglsn
+ 6ck85uTDRG/AUQ8fOsnbs3DkqzgQdH5smQ9x4asEzrvyJfHCka/ynfvfEHJnPbCuP/p2bvhJjaRo
+ liSc3WPNwMS+CM99PmR2o+zJG37Db+TvPFXHxB/5xYvKZr0SYZfX/ZkXWXtR5cJ2fPq3LSB52l/7
+ sqvLY7+75p86xu/5FXfiHxOft33tpzxrw8/opec8tz6fyJ95d07Rv2bKd9T3EvwxDymu0mB9+3tV
+ 19FPidGuccZDBr6J657PfuSv+4jpYt1FJ4TCW6eqJzL3N6JRz8w769H7rIrZ8+XYcI7kcWEjL8+3
+ A73fOJp1/tdPfcN6xjP1rw315ATLnN0jk0ZUzONJyawTr8zl15ovl+AZf+RP3lvhdz2uY8bt87pu
+ +1FTL6o82K4j/dgWkPW3v/ZlV4fw11/qWRtP3enj8POsP/PrnrV962fyb4KgbqUnJLnhnLHnY+Bt
+ jq7Rf8XlHAUyUMsd/yV5OvY2L/Smpi6+UHPSw4jlya06jVOf3OebGL9iCyI8dXZc63lKD9EJYXYk
+ arx4zdjfdJCPQ8c48ia/cWTw4P7iSx+olzx2CGgc++hA7zeOZvX5DP+RBu6sb1hf9iL8NTPnb9aT
+ k0nRLNnmE1O9Ikgz/RgTF3w5NPXLJfjOg4DwZ27e1EXS4gu/6kBk81T9wblO12veTiQK0cHh+PRf
+ /UlE6hF/7cuuCkDwqCVJSU/Z+QEI8b/81Ae3L37Lg9Kzzw+K9PlgrXPIXOfpc4r+NVPJ8nOpcQn+
+ VJ6E3u5Zb1bVXxdoTXiL5n2VKmkvHfSwGnajyDd5ZI842NJbdFoptHWqejTB09+IRj16WBZA59Q4
+ 8nns+XJsqIM8XUDuAevrQC0Wjib919+8fdnL/ZeMYmc9sATf7tKTEyxzdo9MWjk516Nx4pg9+1iY
+ Ma+X4Bl35E/e5r0Jv/M6/4xLHa7TdduPwnrBfduuI/2nTwAJ0FR1tl0NiiDNPjVnyvkP3/Xw9jmv
+ v3/76+/44PbB66XfOLcbzhkxPgbfF59T9H/87tNVOxH16QtV96p00Lu67qOK9sU0Hvt8E+NX7PRl
+ 3fqa2n9Kd9FJcbPj3i79yWvG/kY06pl5k984MlTcji99gJc8TiQg483Xgd5vHE0EnJ3dJUe93PDA
+ 0pOTZGiW1c8npqXDNqnoz3zAl0NTv1yC7zwICH/m1NEzSYsv/K5nXy/xwckvk+LVdi+W7TrSP/el
+ sgG7eOpTcSfqSV1PlfmH7n90+50/8MD2VT/60PauD+Eqnjpv6V76Yy35M5/C515RxMN5x/a55rxq
+ FnyfhxRXadx4r6JH3lep1vev9cSbWA+tgx7Woa+p9R/60d/Xne8LPT2Iz37yOC8fJgpoHM29vkQa
+ Rz6PPZ/rYWTzVd3Gka8DtVg4muC9frMH1p3P/k6ArpPFT2zPaoXNm8Kzmgaa8wGvzOXXmi+X4P3E
+ VYnNn7y3wq86ENk8Vdc+r+s1L2rqRZUH2/Hp37aA4ou/9oWvDo/91vbH+vQePJz+5I89tP22Nz6w
+ /cB9+rN9atnnEZ1KT3j6fLCW/JkvuR85R5Hz5RL8MU/HXZGF9akLtCY9FEZ7bBS/h3715o++aUf2
+ 5NF9HXGwpbfotFJo6yQ9iTdjfyNSfsvNh0ny9kx/P3XG+RaRJ8QJB+7e90Ms+Xq/cewaxHc8epcr
+ 8uv+G9YffuF78Uh7C4P5BMzsHpl0XLIqcuKCF/VoYtoX4f3E3fMnb/OmLhIe+MWLCpun6g/OeRlW
+ feTMikeTtLe4Ew8hnK/95sn27E/rp8DLw9Dvb/7MB7fP+r77tr/3Cw9L19m29MOGdaz7VLbOFevd
+ 3Oew7l2fO4nLz6XGJfg+Z4CZ56oN66MLNa8X36QqdbXri9p68qGBX7HTl+xFd7HuoovyPh9ZSkhe
+ M/Y3nVHPzJv8xpGh4kCw+HJs4CWPHQIy3nwd6P3GwTw/+8H58ysC9g8shVy7i1X7ie3ZPTLpuGTj
+ SXnEi6YfnbIQ6KYmb8eRlw+jA3/yNi51kfLA7yf+vl7GBee8I4/EWzyiE9ziTjyUM0/7zZNtlpM8
+ Wn+Mv7zufQ9vn/f6+7b/6W0f2PgD9qlzWvd5RKe6T5LpcM7YA4POn7HWfX//uH+Dvo/xPonjCr1Y
+ n3mvSge9q2e7vqitJ/x888dOS9bNca3neL/Q731E8H3RebKfPGbsb0SNY30rb/IbR76K6zx5v3Mf
+ ceTpAlhn+uhALRYO5tn5XeXt6cQD6/pdvBx+YntWK2weYWm1n7D1UJh4sUuUzoNAN7XDJQ95T/An
+ 77GeU/yuZ18v4/Z5R57VyKIT3OK6TuN1uOpzxoNg0Xee0fHH3PInH3p0+wM/eP/2e978wPaTD+If
+ /9g/uxw6p2mfR+l3s3NmOH6fxI+4nGP4Y/uc9veVGO6LF2vOV224Xwq47pHrlaJpD2XTrv5kjfs5
+ mrIOTaf++33DuOghuigz95PHpP2NyCdcZY46WDd56ef511h5yq8JccIxIPvpowO1WDiYh59fEXDj
+ A4s/x7p+ft1PbJDy0gDYM9axSTBxLEa2HXxdo5q6CH+Kv/OENzNZh0g2fQjNQz/F2eUdfeTM2g8W
+ wS2u66z+7Bj+2l/0nYe1fKwNfov68z/x0Pabvvfe7dvf+4jbk75om9bQOb1LPxjWcd2LPh+G0Z+5
+ z8HndozLOQLucQn+mCdhV2VWf7t7FT2k6GjXChlPvcb9HM1YL+pdPFqc0F10Ud7nI0t6El/y8mTo
+ 0AmFd89nL3ECKrB1LyJPiAtf76cPhTmBsjnvOd2Hn18ReeMDiz/Hura9xU9sRPEhAWDPIrVNgolj
+ t7Lt4Osa1dRF+FP8ydu8N+F33n29Pj2L6byjD50Syuu6sJb2FnfiyzH86bvDx2K1/NG+okSv+fkP
+ bi//7nu3v/72h7ZHsGGd4aBuvluYJdyu3eCs47oXtHWuQO/mPgfyFj4zmcvfSS7BH/N03BVZWJ/S
+ bU14l1L12S7toR/8emgd9LDOfSw+p6Ff6yG6KE/8en87j9K7DsJGPTOv66fX5+WoIx9telC/cKTT
+ RuVlXxWZ/eCubzf8/IrIGx9Y3D0/v8tPbDTDprHVM91lY9I+s04897GhqV/K3uESJ3jlwTr8mW+F
+ X7yI7DqZjyLs8o4+cmbtd2LHp5/qjxWJb8aT39vqsXi0/hh4+Q/3PrJ9wffds33VWx/Y3v2hR/e6
+ sr/owfXQmSaHz6Pmm50zsH3OiQs+swn5ukafm89Z+Qa+7wEiyH/VhvWZ96p00GN83CvZdR/RxFPl
+ Gxb+g+e7Tp3Z6QfWdsddfmLzCQixENmzRLNNwonjJZZtB1/XAA/HRfhT/MnbvDfhd959vX5Tzbyj
+ D247QdVl23VUH3ovFJD1l73qIp/Cx6Lsj9LpFz90fftqPKT4sHoz/piC+zvoyt6iB9d6eEUIbljn
+ nse53XDOALWeiQs+s4n4ukYJr3MPLjNQxzwr8GqsVLcvVN2r0qEer32v/DhXP6yc31TmN510Yx36
+ WIw/pYeusxRXaOukhDxnM/Y3olHPzOv6XY/OvwrZ87kedtZ849zM14Guh/lY3qP7P39VqAu+Yd15
+ 5+7PY7lHJo2omCEGx+6TDcVkfzZRwJviGXfkT16quMvjxOLLi/MaN+NSh+Ndr/2I7AX3bbuO5Euf
+ ABKgqepsuyoQQar56Jv5xxS+8e0Pbi/793dv3/JzHyhBMh10ZXvpn2tewkP/Pg9uU7fSU2GHc2Z4
+ 7WPa40fckT/2jn/gfY7gK35yX6VhfUq3NelhxDqXnO7AePZD/Zae6ck6OK71PKWH6IRQaOukhOQ1
+ I/PoYDhj6BhH3llPP+WEG++PiiNR81UC5135krgeYid/fsU6Tn/DOvx5LPfIpHW5OEMMDs0oYjfb
+ IX+/XILvJzMCdvkYdwv8rsd1JN4qzzpdr/2dSCWqPDhcR/pJn3KosK5TddFfHfai7I+i6XXv/dD2
+ G7/rfdvX/fiD24OP8r+nSd/p76Are0v/XOs2RwhuzPhxfrUv/bHezaWfzvEWzjvCX4Tvc6o8mK7U
+ UN0WuvSOHrmvKZd23UesnhLfsLbTP7+iIqcfWPRs689jSTJeIuz2JeMlpV2XdTfbIX+/XIL3E3fP
+ n7y8nJfxy1+fADNOb6au0/Xaj81ecN+260i+6o9A9TnjEVDb6rH60/qj5OUn8EcTft+b7t1+9xvv
+ 2X7qITyoqIfeROnbbdPR55M+owdD9PDyfaDJ4fOoOedX+5Id691cvIoLPrMJ+brGJfiuFxFqa0Ve
+ iZX1QWXR0dcJ6lvHag+10q77KIvvw2WnGevWdNZ/6Nd6iC7K+3xkKSF5zdjfiEY9M6/rZ3UIQJ6M
+ lcdE5gOvcEBVAuNWvt4nbjv98yvmuPiBdX7920jSn1RsnlT4rbmKvOgTbjbBRLEvwneewd95qo7U
+ M/m0Fr0rax7WR3F2ddKsPlYji05wi+s6jVfH4pvx5G/6sUhFV3e+H/+67+t+7IHtc//9+7Z/9Ysf
+ RBs8Z9RLwXnCpYMs7ssf3bThfomTewrBDbqNs46+R9lnGnp38yl8zt2BfF3jEnyfMyKY56oN61O6
+ rQm6RLdUbKVaT/h9XsY1inpMHtkndBcdFXF86yQ96x7Ia76FI/2ej7lVL99nNfZ88CsN4phPaWfe
+ la+Ahbv2beE7zhc/sL7iZa/bzq+/bT4xmSqt9hO2HgqyUV329bCY2aqpHW7guX/kl32L/M7r/DMu
+ dTgv6k+e1YiqVHkItL/6KFuHpjri96wzyFmJYDZ89dbU5e//3EPbS7/jvdv/gZ9X8edWq184CeAJ
+ d9/0c++gq2CFk1sBXPWQ3rA03+ycgWHak/gRV4U0f+wd/8BzX7zFvwKvxsr9lm5rgvq+UOs60V7v
+ Kz00hp1urIPPq/U8pYfooox1b7x4zag66Bj1PBnfsJDwp7YvffHrXMWNrxc/sM7OzrdrZ/94PjEt
+ nVvoJz5E4eWRnZl5uD9H2TvcwHcexFhTHBLWF+GP/MIhonmqruDMYz7zdiJVqfLgcHz6qfysRHzx
+ 135tLwKtruTLm+55ePv873rv9kd/8N7tPfg3gdYr/bBflE1h+DYvHWTpGA+6xkEc13x3iYCGx+Tv
+ +wGX9XUc0yXtSfy4H0f+2IoLLvOJPK7q6ry639JtTdBDiqa9Vqj1gX9+00lH1sHHIF11X+sekwW2
+ 92HwvDpP9n0SOcb+RtQ4Xos9H3MbRz6PlWf2gTjydAGph3wdqAVw/w/qy24513TxA4uYR85f059U
+ vAzYSqv+hGDv2KEYc2asROGiRtk7XOIA4f6RX/bkHfgjv3jB0DwVF5zzjjyrERWo8pDQ8enHtg5X
+ fPHXvvD7/sq6MtO78JdS/dEfvGf7bd/9vo1/tmqnD6pc/cKg4Dzh0kEWdTrqGgdxck8huFH6ZB7n
+ 1vnhY7p1DGJSPU/EfUKqKzWoQwk9J+gRHVKuccZTL7/5Yzeq72fpKrvuMUCtu+ii/NxPHjOqDsJG
+ PXzoJG/P9ON8M1Ye8pGfr4gTrjeqHvLRj7EWr/HG6debP7C+8uX/gX/jHzn7yUnusknpJzuflCga
+ STXbwdc1qqkdbuC5L15EhD9z8w48Ei1urJzX+WdccM47+mC4geIRHWzXkX4Wr3jaX/uyq4xDPbV7
+ 26YPXT/f/vpP3L+95K534x8DP6CHA4tpnave1S+c1IPI6lOWZD7oGgdxXPOyHfqX3nBpHufW+RlG
+ f+ZRz62cd/Lt+G+SB2mu1LA+pduaoIcUTXuo2QoZT8tv/thpyjpQ79JVi/V+pF96iy7KE5/95DGj
+ 6iBs1KOHJXm5m5l+nn+NPZ/rYUXNlzjlZX0dyDX+dtGX6K9qD99xvvkDi+iza9+kVngZaOJ3bLlZ
+ LJPP2Q6+rlFN7XCJA6qfzFiHP/Ot8IsXkc1T9URM5x15ViOqUeUhoePTj21VJL74a1/4ajEEZd6u
+ 6UH8d39/9x0Pbq/4jvfgB+v3b/jvlXU+qWenDzZXv8QRxfOs/uTn3kFXbkUPuacQ3Jjx437UPtO0
+ /FjLln4Vl3uRGZicI5cal+C7T4DJfy/+RcOrfviB7R0fwL8Nvc1Db+rcl+ho2aELlZntWin2o334
+ 9dAqW5vCm6DodK79vim/dYYBXX0C1nvpz3OGS17zLRx1rPcF/aMe8ylM+4uPOO4jjn11Acm78hF4
+ fnb2TWa5+PXyB9b1a/+AGfvJCYM10ObQzGS0M9shf79cgj/F33nCm/kEv+txHTMOhakE1+d67cd2
+ L7hv23Wkn/QJoPqb8ey34roeEt6ewfK/5Wce3F702ndtf+It+GMKD/g/UvYn26qrdR66+JMTBIKl
+ LzKmv4OucbB/rnEu0Zkmh8+j5nFunR8YpmO85lFP36MRd+SPrTzBZa78O37s/ZN34c+bfc8921/4
+ yfprcbB3O4YepuvCSYC6XtBj6s7qrFDrCXt+00n91oF6Dz1P6SG6KOPzWfrznM3oe5P84a33Basq
+ 4OX3ixyIYx9OxA3F972jff06/lK1Z32LnDd5ufyB9Uc+812I/47jJ1Y/YSEKu5SdmQm5P0fZO9zA
+ n+K3trfG73pcx4xLHc7LsiheldeLZduffoxHgwZoSjzrqjj2eex39v4Er99094e23/qd795e/ea7
+ 8QN1NpW6MfOW1OXKPhHWyzMvk8pXaPrCLBxfD7rGwf7l5sJ4mhyTv+9H7Ss/1ru54hWXe5HZhHxd
+ 4xK8z9H1zTyU53/Hf8j9Wd9z7/YP8B9203c7hvVBdvaxJp8XClpyUte6j9zHL5/XCb0nj3hHHGz2
+ Kl7dB8e3TnZ0Xt8bBPiEq8w9n72sn8wee770gTjydAHcTx8JvPba7ctf8O7wXDRf/sBS5Nk38YnK
+ FpWTcxXpJzuS02YRKT5zMl+CZ9yRXzbjwpuZnAd+53X+GRec63Pd9oOjF0UH23WkH9sCqo74a1/4
+ avBQT+0+odMvfODR7Svf9H48rN6zvenuh6sdnlDqxswTg24ZrXPVu/oFQjDiR7zoDrqSLHpwTf5D
+ /9JbMOefNtOQdjePem7lvJNPvLkXmSvvjj/5Ks8vfvDR7Y/9yIPbF7z+nu3776m/MgeYJ2tYj9Jt
+ TT4vFLHktFKtHxB6aC2ASrYOfSyIv0B30UUZ4ut9Jz6esxXob0Q6qfDW+0L1GXj5/SIf4sijtBWn
+ vDPf9Zv+sN1V3ewPjgah+Zn/GJfoPqZKq/6EgF2XdTczhvtzlL3Dseixf+SXfYv85jHfjEsdzut6
+ 7UdxvahyYRNHcSdeQNURv2edQdqsPmbLT9SaP1D/az9+3/bSf/uu7e//7ENVb9pxQdYDdfLEcgtR
+ kPur+ttmvzCoh/DTz72DroKRV2jzH/rv/NKt9CQ7bMkuVsfLrnjF5V5kTj7OGZfgbzXPD95/ffsd
+ 33/v9pX4D77fyf/Nz5M0rA8FLB09+bxQQ7XHFX4P/WD3/Ry1Wremk848d+fhfukuupzA3E8ek/Y3
+ IuUP756PyMvvF1GII48PmhtVD/m4ff7Adsf5t8pxycutfcN61affh7/U71+4JRaJJPUm2D3JKUrt
+ zzeJargE3096gMOfmV3t8pAweUSeeoybccE53jj7O5EYRAeH60i+xSue9te+7C6gFk/s9C9+Af84
+ g59Tfd2P3LM9gAeX+2O9SzdWoH4585bwVtRwf8OPQF6m9M8IHkDHw+TGMU4BxMk9hXCiFe/802Y1
+ jNvNKqDy3sJ5u+CL8V3vreQB5lvxj4evwM+3/upPPbR9gLo+wcN6lG5rgi7OXXKgCivV+sH2ee1r
+ lH/ykCA6kgW29BadVupw7SePG/e94dp5RFf3QLtV4OX3i2jcH/J0Aamn7u352T/a/puXPkDkZePW
+ HlhkObv2GrdUEtabwE92PimtVp7o802iIi7B9ycAwMpDsZl28lL04jnye9/+GRececxnfyda5cFB
+ HMWdeFWkOuJPXZwVPhZlP87T2+5/ePtd3/WL2xe//r36gfr+E4v1Lt2Y2npg5i2py5X91qdw7hcG
+ HcKPePV30FUwJ7RbwnG3R+dXYaUnvNZXWXy+2DvWw3qt/4pTg80uIlk7XOLkrvuDtfgz14Ht4gr/
+ EP4N61/+yYe2z/7ue7b/Dz+gfyKH8rOyOrg+P72rve38VHjpoIfVsFOj+2m6G/Sj3zqTLooQn/3k
+ SVafLxDacH2jDm5gXH6/iEIcebqA5CUf/ibk7eyW/nGQTLf+wHr0Q/8cCd+VVvuJz+bZ9JxVI5Fj
+ SCRqdRrfT3qEUIq2L8Bb9MUvXkQe44KTn5qBz/ydSCQqr/3pp+qV2Kx7xseuGkSw6nm8Vvc+fH37
+ H95y9/ZZ//YXtrve80EW4Dp4+qOR2lZ/zG09MAtHoMfqH/EYttkvDMGKv/rxdNDVgVUH4yQMd3t0
+ fhVWesLb+bFW+Zk7H/NH/xXnApueRDKU5wT+MeUBU+olKf/R8FVvuW/7z99wz/aW+56Yn285X+m2
+ Jp+X6mElHD6Y1KcPFygX25iqf/JcpLvoorzjZElP8prR9yb5ieN5rbzJf/n9IgfidA+xrASMJ9/1
+ 6+fv3j700L8l6lbGrT+wXv2Kh0H4mrTqTwjeLau0m5mZ+3OUvcOx6LHvo0FPCmeTt85vHvPNuNTh
+ vOazH+S9qHJhE6dLwbrKrkXbDou/mqw+yvqIJ/5Tyd99+/3bi//Nz2/f+BP3bf1//EtdOOxVP/uO
+ iQVG6yocK/Zwf8OPQPcLv2DpKzyMO+jKra6DbgTSHqPzC3eTc2Y4fp/Ej/tx5I+tuOAyF594w5+5
+ 6tzFjfxYegD3Rvww/vNff+/2x/HzLf7Fho/ncL+l25r0JmaeJSd1HfoB0fdzFOR++lis5yk9RBdl
+ rLssJfQ9Uv7cL84YdM+8rh/7wpHBg/uLz3GqP3zVmHHId+3a39/8bAnFTedbf2CR5vr538ST8RH3
+ 7CL1pGWREIddaSaW9hxl73ADz33xIib8mZt34I/8zuv8My4452VZlUeqIlnX5cT2px/jVRFxIF7x
+ savJ4inrI5re8L4Pbr/pte/cvvpN79veg3+b5X5GHtbBa9EO1hsTCwz1y1k4Aj129WNr9QtDsPQV
+ HsYddOVW9JCbBRlPk6PzC1d61r7Kxno3V7zics6ZTcjXNS7Bd5+IuDQPMKm3E1Td1zF/yzs/sL3s
+ u+7evgF/HOLxem45X+m2Jp+X6kkl1HXoB8T8ptMo1dvH4n6Gfq2H6KKI+5YlPX2PyNn3i/eHNqaZ
+ N3pdfr8YDV7yOBE3ovcj16+d/a/auMWXx/bA+sqX/Udk/Rb3zOywIAq72c12yN8vxGHscImrffFi
+ Hf7Mt8IvXkRynnHYYNrK69n+TlR+245PPxUnsdnnjI+tcDpq8eFPP/fQI9uXvuE92+ff9QvbD+G/
+ ++NY/RQv87AOnv5opLbdZ8UpXjgCPRaf6139wi9Y8bdujDvoyq2ug24WZD66OMjb883OGSC3cQI/
+ 4o78sZUnuMyVX7zhz3yTugBZQ/2tvvnzrf/5bQ9un/s9d2//4hc/8p9vWZ/SbU0+V1Sx5KQudR+5
+ j19880ffFGwdHNd6ntJDdFHG59R48Zqx7xfvD4bkGHmT3zgyeHB/8TlO9ZPHDgEr/lu2L/mMn0/s
+ rcyP7YFFxuvnfwmveMC6SM0sknZm4srPpcYleMb7aECDgLYn7034lR+Rx7jUIT81Sx6Jp0SrvPan
+ H+NVkeqY8eyXtsLHouzHMH2AP+z9kbu3l/zrd27/7zvur3pMvKuXntTB07dQ2GS9MVec4MIR6LH4
+ Fk5vApqCmUh6Ycv9HXQlVdfBuCkEnXQv/nkvOj8wKj/zKfxNzrsKc57gMlf+Hf/N8hQe0xrqb/Rd
+ np968JHtv37zvdvvesPd23+s/5pgBd36yvqUbmvCaUW3cPlgWk/4fV7GNSrngW31PeonpnUXXZSZ
+ +8ljxv5GNOqZeWc9Ov8qZOVxfT5W6EgeFyYkvrniD7df+4sVdsvTY39g8VvW+fbP+glbl1U2qsv+
+ bELVEIexww089y0ZRCxcZl7OXZyJ+NrDeY2bcanD8c5vP0J7wX3briP5ql4C1eeMR0Btq4jqrwu6
+ xcW3/dwD20v/9c9tf+Gtd28PPoJ/X1I8cx5lrjp4+u1gvTHZSOrGLByBHu5v+BHoT2z4BUtf4WHc
+ QVduVUKheCurbro4Zv19frWvsrHezRWvuFs47+S7CN993kqeUS+WHupv9N3b1uW77n50+zx82/pT
+ P/rA9j78i5HHOqxP6bYmnxfIlpzMV/eR+/jl83IdyWsdHCddR/3EtB6ii/JzP3nM6HujSG2IbtZR
+ BV5+vxiO+vELkwvEdO3s7J9tX/YZP07vYxmP/YFF9vPtr/QTti6rbDSRfV7S3Sh7hxt47lsy9MQU
+ sRkXXGYSF19yOK/zJ16nXjjnnbyINFAUgsF23uqjbAFVR/yedQZp81BP6rpo/rF7P7T9jrt+fvv9
+ 3/OL2zvwqa0x+nM/M18xpQ6eftfPemO6oI4XjkAP91f1Y2v1C0MwE3W86A66kqrrYJyE4m6PFU++
+ 0hPezo+1ys9c+iku+MxkLT+XGpfgH1MeEKbeYq/+Rt/lWLjzDV+Mt/8L/6H5y77z/dvf+mn/fxs7
+ /pKFeUq3NeG0JPhol/bQD349tA56iG/y0D/0o196i04rVbj2k8eF9zeiUc/M6/ohE/3Ik7Hny7Gh
+ fuGAqrofffTsryTmscwf3gPrj7z0e1Hk65jIT3aIwaIpSorPnGrK3uEGnvuWjCKYN3PzDjwShVmz
+ 8zr/jAvOeScvwgyseNuuI/1UfxKb/c342AqnoxY3n96Pn9p+zX947/bZ/+ad279/zweWXgwb/bmf
+ ma94mYd18Pp1/aw3puvoeOEI9HB/5uXO6heGYMVf/Xg66OrAqoNxLMh56eLo/Cqs9Kx9lY31bu58
+ zB/9V9yRP7bynMB3n7eSZ9SLpceoO3XSkb6wEo72vfhm/D/ir5z+HPyPPPg/9LiVYZ7SbU0+V+UJ
+ C/MsHfTQGHajVK+PQfWO+olpPUS3Olr7yWPGvl/dJ6sYdZCfvPTz/Gvs+VyP6heOAYp7Hb5dfW9i
+ Hsv84T2wmOH6uZ6QetKiiN1M/2iCZuwdLnFyQwzO+K2Zl7D22eQuDvvh45JDfkRwnnHBOd44+xHU
+ i6KD7fjkW7ziaX/ty1b6Iqj1iYmfxn/rbfduL/qX79j+NuZHSh/XXQHpE2b2Xc+gZxzzUinM7o/1
+ xsRixgtHoMfiWzi9CWgKZqKVn3EHXbnVddDNgsxHF8eKJ1/pWfsqG+vdXPGKCz6zCfm6xiX47hMR
+ l+YBJvV2AvU3+i7Hwrlf28Dh10/hf+rxu7//7u2VP3DP9rYH+g+iNOVcJK51tOziIW7JyTxDPyB8
+ Xs4fTvHVMajfUb/58r4gXRRx341XHjP2/UI+x7OKUUf0p198FYf9xZc+rE858L8cvPbnjH7srx/+
+ A+tVL/l2lPr9+qRgkSw6M+sYTaissne4ge8nM8CUqO3JO/BHfvHyMIFPvC8D5TOftY0fmwaW37bj
+ 00/FOVD4Pf9o89ivWP3yHe9+aHv5t//s9jVves/2fvxF6kqr0wt/gUd/7sf+UaYTYsOfbIhTXtab
+ dka/dDMPeDN29Svcl6loFBGdGOO2DrrGoTpgkN9AejRm/fNedH6gWBWr1VzxiosOmcl44I99Ef4x
+ 5RG9dWMqDear/KmT+8pnQMHcgd7M5f/2d39o+1x82/oz+PvI+PdwnRrmgU95evJ5iSdRxR99gOAN
+ WnUYJ3vR2T/0o199iE4rBa795Ck+3RuuuZ/6Vt7kv/x+MRpxi+/7ti954V3c/XDGh//A4t+7fL59
+ vZ/sEAPiUHx/cqAU2nOUvcMNPPctmSVqe/IO/JHfeZ3fZ+J6gnNellV5cmZdF4pFoP3px3Y5hr/2
+ ha8mi2e2/NP4t0i//7vftf1O/KzqP97Hf1QY9en6hL+iRn/uJ/VwHnmYl/FutHhjGtjxwhHosfpf
+ OL3ZaApm3o4XbNSdQjirDsZxIWBlcd00xHPoi2mI3s0Vv8OPuCN/7Ivw3N/xJ9+pPKkTcw/iKn94
+ 6FM+gdyvbeiDX8IVP79B/w38jz74P/z4pp95CP9A0syOFg4RytOTeAgoGq7wu+6jLOZZNrY0rMPg
+ Ee/CtR6iWx2t/eQpPuYlTPnDu+ezlzgBFbjnc5zqL75Hz7YP62dXIsfLh//AIsOnvuQf4RX/1hAi
+ sujM9I0maMbe4Qa+n/SAWlOIo7DBO/DhIzWHn/iuY8YF57zG2Y+gXlR5sF1H+lm84ml/7ctW+iLw
+ mn/r51/4ofdvL8E//vHfAnrwUEd9vg1Vd0FGf+4n9Qx66Qyb8V0/eWMyj+M0C0egh/sbfgT6Ext+
+ wUy08jNu1J1z7TronkIQv+ef96LzA6PyMxev8kaHzCbk6xqX4B9THrCm306g/kbf5Vi4qTNw+KV+
+ qi4wKoL/BvFP/vB92+fhfwDyve9fP98yT+m2JvEwsGnE4zq0rzzLVhLhc26l66hfcbBdHyyeV9XX
+ OikheYmm13wLR96VNzoYRz6PPV/6QBx+nV/ffnz7Qy/8p8F+OPNH9sD6gjP8662zv8wu/YSvmZVI
+ lFFS2Ttc4gRnUyX2tBkXXGbSHvjFS1GwL56KC855GRY/OAwkm2E8I/nTj20BxRd/7QuvcG6QZfuH
+ +HNUn/HP37H9xbe+b+PfVqK8gviSdH5dn+kHaPSXuMbnTqQOxnf9rDemgR0vHIEei2/hdJloCmai
+ jhfsoCupug7GTSHoXH2J59CXygZmN0u/igs+swn5usYl+O4TEZfmASb9dgL1N/oux8It/Zjh+A0L
+ jIpQmSjgR+5/ZPvC77t7+5I33bP9NP6tsHngUJ6exMPAao8r/HYd2oft8zI/9zjEt+jKHnEglA6i
+ 06rjvJ882nYddCi/65l5owP71vk7THkXn+Nan7OzvwTsvvCKu9VJJd0q+CTu350/Y/vJN//02dm1
+ T5bKvLynBvcpWl3C3Qy8mpzzwDWvDvc0v5/sdZjkqXwR84Z8ndB8C16XTxsiwksdiqajH24O4D/n
+ lz57+/73+X/4wMPtOANku072e/QL5Lq5ZH5NzNemF1VHATCVroXn/kV5en/H7/iZiDiNTLBdR28c
+ 2qt9R+1fc24187ITLb5TM/3MN2bpwfhTg/us9zB/WHkmf/HdwKM60gECyl4PrZBUvdWoedIXw9qB
+ +qt80uGX2CucOw1ov3mSSbPqHTywoyP93Ufyive4T6DYqj5FamPWXxu9r0W97PNgE/lQyc8/+qs/
+ /VM2fcmZ6Me2/si+YTGXv2X9bxK/LplKkCijmLJ3uIHvJhHiw8qhQb3gMpP2wC9eRDYP/cAH57w0
+ w9uJyGaY4PTXZSi7L0v7zcNCuwws3sCHFdOST6c+LxVvwaivboXrZgUYo7/s7+olphKIX4nCW3mr
+ oI5nHupQY/Exzn24XxiCEe99+/k66i7+VQfdCiCwR+dXvUuHzg8k07n6mc/1Kn7osYSuFLPP4DKT
+ F/4df/Kdiit8MXsadYeHjvTlymOjP/wSrviXH0FwuJ7SoexyzEk8zsNXDiuUvM6z9DSm6hBv6Trq
+ Fwts1weL5yXe1FXxQKT8vl+NI+/KO+u5+f1idsX91Y/0YUWmj/yBRZYHnvN/oqgfY7frk5mijCGR
+ qJVV3c2A6ZOAM37riHj5ar95b8LvvM4/4yKm8408KxGycB8vCHQd1UfZ5Rh+44xXuAmEr/qrE9el
+ DEpgfsDlL56i6D5hJ67xrI9D+lW8G8Um6628amTEM09uocKHrm07XoILP+KV96Ar4pLQ7mqc+zVm
+ /ce+VDZwu3nWnXPOTM7yF33byhNcZsGrT6wvzVP45uZCgo6+y5m+ANCObeDwS3m6zvgBg4M4PWzE
+ a7sccxIPiZtGeVyH9pVn2dzjsA6O6zpO6cGysA8kw6quikeFyav7SVjjWObKGx2ME5DgAx9t7p7/
+ 2PVPuhPPiI98PD4PrD/xAvxt/udfw+ry5LUoo0CJRK3QXHCZAeO+tMTampZ9Af7I77zOn3iptcs7
+ 8uTM2u/ErqP6AFF4la/sxU9/9ag6bcuPw2Unjidm2csf/uI46KEo8DpfYZKHfO1gnpjMs3j9SUmg
+ h/sbfvE7XnzFm7rJmz6czvxJaDc8BlaWPX+fN7ydX6ywM1e88kaHzGQ98Me+CP+Y8oi++mIuDuar
+ /Orbu6rfS+OVn+eMXzt91FmVLXnor/tQtk5MeSod0844JbJCzmN/81RNnKzD4Bn1x+/6YKEvMu33
+ k0fbroOwxrHaqp+7dR6X3y9yXPua7b/EM+JxGI/PA4uFfMVL/9X59fN/mievRRkVSiRqpdPaz4Bx
+ 35JZorYvwB/5nReXIjwVF5z8SLD8SKqEOhXD2m8e+sMrQPvTB+fqUflsixaHywSOJ2bZy188RcE3
+ SfBzFv6Yh3ztYFxMAzteOAI9uO+whdObjaZgJup4wVzXjEtCu+FpIVYersRz6Es8lS5pVz7mLx0y
+ m4iva1S+Hf/Ac/+W84A1+TsB+YsvPPQtnDovG/Xil3CtQ/wIgsP1pK/wyFF+TeJxHr5ykKfiZDHP
+ songUF2LruyFc37iAEZf5nWcLDvslxdAOpTfcTNvdGDf5iP2yMeN83/6yJd8+r+S83F4efweWCzm
+ 2rX/Fk/e+1SXRBkVlu1PAohOGyL1kxq2jwbbCON+5sYN/BSJWcxjvhkXnPNNXgQZyHDDYDtv6lu8
+ ArS/9mUr3ARlY9KlYgLXpQx4GfX5Ngw/3QufONdjerJ0HYxXIl23bDdfxwtHoMfi020U3p+c8Atm
+ 3o4XbNRd55iEdiMw+yMPl+I59KWyKx3jZVf8Dj/ijvyxL8Jz/5bzpE7MPVhP5Q8PfconkDovG/rg
+ 1+wDSKOqQddT5wugebSQAEpH/hnXeca9UJ5lC8K4ImieUT8xzs8ZBvpiJo6170Lllxc2YY2jufK6
+ fnqJE5DgHR989z+63fnH5XicXh7fB9aXf+bPoK4/q9pGE9P2JwEOl36o009q2JbMEnFf9sQN/BSJ
+ /OYx34wLzvmMsx9BvSjNYTtv6lu84ml/7ctWdyYoG5MOlwlcFzG+JOaPn2HrsI96KKp1oIUhPSpe
+ icKbdswX3ss+AV0P6wQ3+Vin+ggP9w66ClY4uRXAVY/Or3qXDs6nLEqXtCfxNzlvF0w5WEfxZ0YV
+ jylP4bt4LgavZC5n6nQHlZ/64JdwElIEipBZeuqhJt4V5zyVjmmpP2dP2on+2leepSf3OKzD4FGe
+ hWs9JPjqaO3L0Xl9b8Rc/Kxiz2cv4qB7xuTD7tdtf+gFPxvf4zE/vg8sVvTxL/lG1K8fwO8KrKb8
+ SVBPaojaT2peNgREyt2TP7jMJB4i2XTkMS4452VY5VmJGG6YtKc/9RnPayRA+2u/tptgwXS4jFNe
+ A/BqG7Dyh18A3bbg5yw86+Vg38xDpdpB3pgGdrxwBHpw32EL537hF8xEHS/YqJuJOLoOrOvNof16
+ WfHkWzp0fobhN9mO9TR+xClfcWuqOpQnuMzkTZ/hz3wqrvCY1lB/o+/ypC9X7jzsQG9mYG70YxMN
+ up7SoexyzEk8TFVlciWC8DrP0pNYDvnFK7qyF875ixc6mddxspSQeNG5DjqU33H9vuBuAX0PBVTg
+ ynP+xuvPfOE3avNxfHn8H1ivPMN/9Xntq/pRnWIlEsTkjGZ3MzD9ZMZaR8TLV/un8Ed+8SGieSpP
+ cM438uiUlAAv3McLEjo+9dkux/DXvvAKN0HZqrvejq5LGZSg65M//MURXWAmrvG5E+oLfr0LBCze
+ pRvZOl44VuSx+Exom/3CLxgW6iN+xh105VbXQbcCuNuj8wtXesLb+bFmuqQ9iR96uMCmd/7iO3U/
+ HlOe8Ax69zf6Ll/qdOXuR/qgE/UjIQke+pWeethEN+FKtzUhasQppxVKXj006jyqJKOat3SVfUJ3
+ 0anSjpNV9WiCp+/XqKfrp7+AxpHBo3U/21696VkQz+MzP/4PLNb1qhf/O/wA/pt3JfJSY/iToJ78
+ aLqf1Lyc9OO3NS273gy7OBPxtYd5zJf4XDqCHO/Zfmz2gvu2idOlUN6KI3DYi7/inEB8BUMfJnRd
+ AiiB+REnf/jpxzjowa3GWz4nZDmM7/rZd0wDk9c4Aj0W38K5X/gFM1HHC3bQlVTRg2teXtpjrHjy
+ 3eScGY7fJ/Ej7sgfW3HBZS4+8YY/c9W5ixv5sfRQf6Pv3k6fnl03cPg1++AJcSid5LnsXpUOM84M
+ eB36Kc+yBVEe6ux8XccpPVgW9ld9s27ymrHv16in3xfJJxbmJZ8H9Tg7P//mh//gC9+YvcdzfmIe
+ WKzw0Wf/KRT/ni62mtKTmU3RzgxQP5mxtqa8BN5v3MBPkZhDfDxM8M644Jxv5MmZdV1O7PjUt3jF
+ o7OZ/PQzO4b66Ql3xwlclwB4GfXJH376MUZ/iXM9J/LodiJGBZB36UaqjheOingsPhdu2/ESrng7
+ XrBRdxpOv6RF3a6jkmBa8SzM8fR2fqx1TpmLV3HBZ3YgX9e4BP+Y8oA19XYC9Tf6LsfCLf3Yid7M
+ O574sdn3pnQouxxzEg9TVXtciSB5nWfpSSyH/HUM0nXUH7/3YfG8xOu4xrMPl+066Ggcy1x5Zz3m
+ I1bjvQ8/uv13MR7v+Yl7YL360/CwuvZ1XbBEolZQAarsZoD0ZOaM3zoiXtbaP4U/iGQ+RDRP5QnO
+ +UaelQhZuI8XJHR86rNdjuGvfeEVboKyVXd1oryCOEHXJ3/4iyO6wExc4xnOob4wMV6JwhvTwI4X
+ jkCPxbdw/uSEXzDzdrxgB11J1XUwDoESkA6PFU++0hOuzo+1ys9c8YoLPjMpD/yxL8I/pjyitx5M
+ paH+Rt+9HZxn5UcnT3/DKtm28z+1/eEXvrfketyndZMfd2oQ/sPzO7b73vK9uHevyKXmk1mXac6A
+ 7i4v7eHX5YTd84lahce12X3S1aUjfPLxqpGO72UvPBlel0/54gew7M7TfmA42l+06Yj7Gk7oeMBv
+ 8Bcq+Job3zRYqJzaEM6ffG7H+x13yNP7O/765NzpEf5V1+Sf/bq9whu+f805nHgIMEq8cwZOdY7Z
+ +Yg8MQ78uScXPrRAobyDP/l27MV7Aw/3oysDyl4PrbBUvQU3T+4/w9ohOqUjHX65vsmDHeHjN08Q
+ mougebCYfXUf4onyriN6mMesro9r9zHrN27tl/3Gh//gp79C6yfo5Yn7hsWC9UO3s6/CATzaYkvN
+ EhtvmojSYiIs4mXmZRUuM7klOhce5jFuxgXn+Hk4iDNQBDlD15F8xs/bZH/tI77LUF+2RVtvQ9fF
+ FL6cHT8ugQrgy+gvcY333egEvNSrftYb08COF44VeSy+hXv6GxZltR7RyYKO+zT0KyU1OQ44/PJ1
+ Ck/0BUz3hP6b3as6P54XxiqHdsVxX3mWTSyH6lCecQ8O98n1AYx9MnHs7wN5ta08rHvhyLvyRi/f
+ Q+zjh0Db9oxXO/qJe31iH1is+1Wf+f14/RsWiVpZ1d0MgD4JOOO3johi1z5VPOLDB4iG/IhonsoT
+ nONHnpWo4jEhoeOTz3Y5hr/2hXd+5SlbdVcnrosYJ+z6ZIe/ONIn0bpUnsXHcA71hYnx7WC9MQ3s
+ eOEI9Oj8O37Hi694O150B11J1XVgzVtefM5Cc9Rx6EtlM4w0mU/hR9yRP7byBJeZvODb8d8sT+Ex
+ rcF6ii88dKYvVx4b+uCXcNXH8iMIDtdjHWOXY07icR6+clih5HWe4jHAKNXLPKJznaf0EN3qqHVS
+ 3eQ1ad8v5Q/vyjvr8VPu/G88/Ad/wxPyg/bR5uP0Hz9PxlPr5z3jT+N/QvZ6uvxJgMOFmFSnn9QU
+ l378tqZlT9zAWyQyepjHfIn36ZExeT3bj81ecN82eXQplLfiCBz24q84JxBfwdCHCdMfkEpgflqj
+ LsZzjP4S13jDVx2M7/rZd8w9bz4BnYDhQ1ds2na8+Ip35VdhN8QlobLxlrOAMVY8C7vJOSOm9cRa
+ ccFnJu+BP/ZFeO6LN/yZi2cXJ/p9/eKv/OEBzPVxsTs/9Idfs4/lBxQO11M6lF2OOYlH7F0OF0M/
+ 5Vk2sRzuh7Poyl641kN0q6O1nzzFhzxqiLP4ae75tM+Kz7Y3PHLfp/33Aj7BL0/8Nyw28MoXfWi7
+ 9szfh9bu1pOZlwmXger2k5qXA9BIyX3ZEzfwfBPMYR7zzbjgnA/8zYtoA0UjOtj2pz7bAqqO+Gtf
+ +Kqi/UVbnbguYtxZ55cd/uIY/SWu8Wk3eRjf9bPemAZ2vHAEeiy+hdObjaZgJup4wQ66kqrrYNwU
+ gs7Vl3gOfalsYHYz+RIXfGY75O+XS/DdJwIuzQNM+t3xV/7E07dwo15k8DefU34EgcD13Oxepc7w
+ phLaFcf8+OXzMq5ROQ9sq17ZIw6290m3OmqdpCfxZmQeBXDGEB020n9mPK3ufmS7/srt1WcPO/KJ
+ fX1yHljs4cte+Pbt/NqX+5MA4lE0iijxqCHEACxStj1xA2/RlzjmMZ94Ki44+XkGybMSiYRwnZH8
+ qc/4cgx/7YuvalA+7guGPkzouohZtvukHX4tEbjXI373U5jkIV87GBdzz6s6eNs6vHRmAIb1cLzv
+ polSt2Guy+kcl4SyyF98lUa84T/2JR44d/Oop/FDjyN/bNUZXObuK6qP+VSewrPeHsQVX+qkL7qY
+ MTb0wS/hWgfrJBOOpTN5V5z6kB09RpyKoW39lV95li0IXqwD5+IZ9SsOtuuDhb6A5HbVVfHKo231
+ o4DGkXfljQ7Xr13/0u0P4L39JI0n74HFhl71mf8ET+ZvoKp6QmeGnLO7BAAAKHpJREFUi7aPxlK2
+ TXGDy0wuic6Fh/jAcIwLTn4eZvLkzIpHU/tTn/GqiID2177sLmDCdLjccF3EOGHnlx3+4hj9Ja7x
+ DOdIHYxHfvfHPDEN7HjhCPRYfAvnT2z4BTNRxwt20JVUXQfjphB0rr7Ec+hLZQOzm8mXuOAz2yF/
+ v1yC7z4RcGkeYNLvjr/yJ56+hRv1IsNT8RsW5PmGR3//p39Ef0d7632Liyf3gYWirj/32p8+Pzt7
+ vT8R9k9sXoFcDn8i8b2AHVzO3czmuD+G/Lg2x7jgHG8+5VmJxCI6OByffJU/b/r2177sKkJ1ch/l
+ Yuvpb1g+H+t+k3OGVtKrznOHz7lT4vJzqXEJ3ufIc8h53CQPMMprZr/qIMd9Kt/Cjf547/Br9uHM
+ VbbuST3UxJt8cqiw2q57U3HK6Q6S13mWnilZ/kXnfoZ+9Ls+ROhhv+r3fvKYUfeXDinoeuY3rOvn
+ 19/w8H0veFJ+buWK/KqS5saTsv67P/Jrz7aH3wQtnn+zT0KJTHHr8vR8okjzHD7pEge8/Dw0HqJs
+ vPTCZ2h4XT7lHcCyO0/7geFof9H6etTlEAAvI/8NfmJcZy3aHmWOPHV0zDt5ZZvHV9B+X9Kxv8O5
+ rr0e4VcZQ7fsY0aCohkL43evOYea+80DUFW3n3NOY7a+7OjEOPDnnnxYeSb9RfVyvyvGsuz10ApJ
+ 1Vtw1+OHDXWjLR7lMY314D0uWlENHPd53vjleAH8cuSBnXtPQOuRvDrw4z6BoavFDneG/2XZ+d0P
+ nz/j5dsf+HVvN/LJe33Sv2GpNfw86/xR/wfSEb3FBMCHlUODaDhd4TKTRKKLTS/mqcsgt+OCc/w8
+ HIDGrcgZuo7kM35/qWZdowwSgK8mHDEPe16qZSvtuARqgC+jP/dzqJeY5GF81888MZnHcZqFI9DD
+ /Q0/An354RfMRCs/4w66cqvroBuBtMdY8eRbOnR+huE3ozRXvOKCz0zeA3/si/CPKY/o9/W7v9E3
+ a8BQPq+GDRx+zT7cWZUteaIz9QiPFmVHD9ex2rVCyXvRw8o6OF/XMfRrPUQnRNffeBSWvLq/dOiE
+ wms98D+Dws+tnvyHFau5PQ8sZv6KF78GIn4zPwE4/M0lhzZs+qGicJkdoLi8mMc4n4nj9GYqfmrf
+ eZi2D4/7tu1PPuMFVB0znvwVxyLaX7S+vspHNwD4PeqTbT65+TL6cz/2jzJXHsa3g7wxmWfx8mHU
+ t7D2HbZwehPQpKN4V37ujbolFGHkFdr82SccY8U7/7SVH5jdXPHCRYfMJuTrGpfgybPjR6TsU3Hw
+ pb5OQFzlDw99C0fBYkMf/Jr88JQfExyuxzrGLsecxGNeheOFPBUni3mW3SjVyzyic51Dv9ZDdKuj
+ tZ88ZvS94Zr74VVm/NzqBU/qz61UQL3cvgcWC3j4k/4Y/ln4h7nsTwCsrSkvgfd5Cv4EqdkBfO0h
+ PyKaB4fl0+PhhGfkyZkRJz9ekNDxyVdxdgz/5FM4N8qvSZeKK9elDErQ9QGh3covI33CSFzjDV95
+ GG+BgGaemHtef1IS6LH4Fu7pb1hL7+hkQcd9GvqVkpp8TsDhV99Xe8qP6ZbuVZ1f3wuF44XntO6R
+ 8yy7Ucf7J3vh1rmTTpUqdO0njxl9b7jOPdH+Wx++51Of9J9buSK/3t4H1qs/+cHt2jN+H0R7sJ/0
+ JVHb+sioTxQcAvc1MtuqffslfcX5cHhGvDWe7e9Ei679yVdxDjzEh68LKL+PWJ9QiOt6fZ1lK7/s
+ 8BfH6C9xnLtewtKH3gVlK0/lpV/bNQtHBo/Ft3B6E9AUDAvMKz/jDrpyq+ugWwHc7bHiybd06PxA
+ Ml3SnsSPOOVrdgYysuoMLnPt7/ixJ/tUXHgw91B/o+9ypE5XXvmpD35N/uVHIBzuu3QouxxzEg9T
+ VZlciSB5nWfpSSyH/OIdfZ7SQ3SqtONkKSF5te066FB+TQ9i+cVP1p+3chU3vt7eBxbr+dIX/TCE
+ +Op+0mPLmvISQHyIThV3M+O4P4b8iGieigvO8ebbnVnxaILD8clX+VmJ+OKv/dpWGe2v+n19xecy
+ dS2KH/HyF0/6SJ+w3Y/nrpe45NG7oGz1vXQzzPr4k5IMHjt9sLX6hSEY9Z75GXfQlVtdB90K4G6P
+ WX+fH7ydH2umsyozH/NH/5rJynxzlK08J/CPKY/oT/AXb+pk+vTlymOjTvwSrus0n0zJQ3/6WnGt
+ I9smvxShn9k4uFg6OM+yBSGKAcpTPLIXrvUQnTMxdu0njxl9b4TQBrxf/aFXvuCt9t6+19v/wGLv
+ X/aSb8InxDdYsjoiXha4/MnBJ79PQzNjaI/hfeNmXHCON5/9CO5F0cEmTpdC+WwLOOzFX3Gso/1F
+ W9ev6x224mWHvxrBJQt+zqPMlUe3s/Ki4kp/Y7xwZPBwfyuvbcdLj+Jd+Rl30JVb6Vdu8NMeY8Xv
+ z63zA2sdaq54xUWHzOQ98Me+CP+Y8oh+X7/7G32zBgzl82rYwOGXzyk8nlW25LnsXkWPEdd5XAfN
+ 2/ENCz9k/4aHX/kbvknl3OaXq/HAggjXf/rFX3t2vv29vsS8rNj3JwcOGzYvkWaKRnsM79s/44Jz
+ vPnsR3Avig42cTf/JJx1jTJUn23R1tux6x328jtftzH6S5zrOZGHfF0/+45pXTpeOAI9Ft/CuV/4
+ BTNRxwt20JVU6Zdrvitpj7HiybfOrfMzDL8ZpbniFRd8ZvIe+GNfhH9MeUS/r9/9jb5ZA4byeTVs
+ 4PBr9uHOqmzJc9m9ih6uY7VrhZL3yf6GdX5+/e89/NZP/Vo1ewVeqPHVGf/u/Blnb3/Lt+Pofrs+
+ IXFq+QTtNwXfHCeGP1HrUsC/iytbl5hvAtl46QXxvlzNo40BLPtGPzAc7S/avB25r8HZ/fgK2lZc
+ ITipbi+063ymrw0l2OMGb+XruEMdvb/DRWcV0Hm90KvqslzVz+i3Agw89cp3n/CeL3yYIFaq1Ln3
+ ec34W+A/9bAptaPGxXkmf+W9oV7uNxOWZa+HVkiIwyi4eeohiIJoi0d5TOM6eY+LlvGLoCz6zaON
+ vBx5YOu8lcf5zLuvv/srnNIx6/n5az/4y3/9Fz4e/4v5lPiRzlfmG5Ya+YKzR86vP/OLsNafhG9x
+ cdl1uJkJjrgK9GHw8Hfi500iOA4JhMuPzXErRNf+5Fu8ytf+2pfdBcy7hzP3pVDdgixbaetWLD/r
+ WZcw+7t6ycNCmZfxIgpvTNqpOzgCPRbfwj39DWvpFZ2s87hPQ79SUpPPCTj88nFYVyrPcev3qs5v
+ xpkBr+NeKM+yBVEe5OO98ISZi4Vb5046VarQtc96idc2/3Do6z/4rE/4oqv0sGJl6ya7zqvx+s0/
+ 8svOzh/9Pqj3qfomUeL7NE6X7G8O9cnDxvKw0uHY1uHwEOXHSy/or8OW359Myw+gAMXDS1N2K9j+
+ opWj4qSqL4nrBM8NfoHMyyX5NI16vaEEyh+76nE7h7hDns6/45/9rHjSK5wT8JNf9e3ac5xiji+H
+ 8+s3SdGLF+uegVe+MTsfESfGgT/35MPKM+mL9wYe7kdX4steD62QVL0FNw8fCuyPYe0QHU2qeJpH
+ ASK23zzJZIcJmkd5Fq77SF4p7jqUV/uwr5//xAef8azP3X7vr37C/qrjXd2Pwbha37BSOP5OaBzK
+ F+JE36lD5ZtFp7DEx0bQmnP4u0PxrVj+uiQ+HGz3godm2/HJ58Oct2nPX3HMoPp60qVjnOsSQAk6
+ flwWejXSJ4zENT7tJg/ju37miWlgxwtHoMfiW7inv2EtvaOTBfX5SeahXympyToDh18+DusKxvJj
+ gsO6130ouxxzEg8DdR/NIIJ5nqf+cVB+8Y57cLhPro90WpkdibzPelHf9e2d2513fuFVfFixYNZ6
+ dcffeeuLz+64/t2Q8eP7E1Zi31iyvwFQfF4engla07vYLe4+seXHi4EiW3DG8xOQ8QNYdudpv8J9
+ CYBf28xbG84g2/EU/ug3j/JySSJN6UdmJ9jjXK/LPcQd8nT+Hf+xX6Yxj8J3dWQf86692q8yd1PO
+ oWa/eUteAFX3nPkmQn7harawRJ4YB/6c+4eVZ9JfVC/3oyvxZa+HVkiq3oK7nvTFsHaITulIh19U
+ U25RDVz7zSN3XoqgebCIjoS0HiJWBkVmH3+I+77za3f+5g/93k/5oVBetflqfsOKSl/xGT90fn72
+ RfgbDR/R4eby0i/RA/Rh8NQjfvDB2Z6HhthxK3KGjq/LAL/iCCSgbIfFrhraX7T1NnQ8Mb505qdF
+ O/xaInBdwsQ13vBVB+NdCIIZF3PPqzzgzVh8C/f0N6zDOVAsCTruUwmYcwFAO7aBw6++F/aUHxMc
+ 1r3Ot+xyzEk8DGR6Dy4qDivnWXaBxO88oit74da5k06VKjT716+ff+Ds2h1fdJUfViz4aj+wWOGX
+ v+i1+Jr6ZVD5Ok9xffJT9DW8X58o/397VxtraVWdzzn3zgxQgQEKSP1ATSWhlaaJ/dNUrYxNTEYL
+ CRoxVRtpy4/GtJo2TfnRxFT7o/5ppLRNjKlaCtoMatKG4h/LxJg2/eE/EezwVQlWKylcYGBm7syc
+ t896nrX2Xvs97713mLngPTPvvtyz9seznrX22muv+56Zwx1Mc+xJZyiNJZkCcWZ+meMMDceksAkA
+ gxedMpZ+jN2Hsk4YckcGpE8P8JL843rwO0faX+jJH8P17DA7MccF43W7Diz6xJnHapVPhBpL3/YL
+ wrJPjghLfocjblDLDJQM+GuxT5z0xWfxpZVWZr8jDiGl6MwutsCXfQLe2BvSAyb8LUaS36FvaxVX
+ 48dzRdyIc37t0PBQwoL88Tj42BeyAEvwhiciCLv84QLCGBcU/ZW94keKn+yHP0RQ1ebn3WSOSnDz
+ 0ZvecH/w7VS58wuWRe53rrPPZ/2xnb5+omEOh5Gb5rXOI7Z1HqJwXMdC/EShuoCkibHW3Y7jQSR7
+ jb7xJzdor8CQUmY3+ZvGUPN1w8s/OpH2F/ONvwYKO8ZHorATQ/EVfeIMqFb5Km58wuqdg4WKcU75
+ lOLnkaRQnIHDl45DcQWBr0NgQXHfLK/8/LKeGEiQz1PnFXYIUh7RTsqDXj7JP6NjT+zMv+kfHnvf
+ m/5FTDv7dTkKlsXwlrd8Fn8i+Jnyk4VBr8HVvH7y2FFy7ElnKI0ltY7J0qlnaLjxCcvjxbhZ9Hpx
+ tSmLP+JnqR+X27rRdB7Og3PIY4YdwEb6eeqcHJ/0aC/ITW6B1znKvy3tkI47qRa4v7RvX4l9+M59
+ X8Dhi3bcr7oORSzIn9iXxr6QBXnMVKFhhF3P5mmnjg1rTXGTXvEjxa/Eg7UqIkLVPz/2/jf8LXtL
+ 8NI7pSXw+AsP/NlsNvn00CXhKfsh5UMrp8/D0uFqHfstHfSbdSUF6JQMyro2uZBVWve4MclT0iwk
+ lwyU5Cnm/Rio73bNHc/agndY7IfryX83X/QanLtoovKJsI4dVPaRL+GiXw1/EwiEKc4hS9DT3SEZ
+ +kPS3WqE47bFTkPsg8yPKfObzeZTK09OQPB0uc5egetYbd3yBfoCYh0dH5fpsFTMWEd6Yk08fT8q
+ ndvxCeDkp8yFfTB98uhNV38q0ez4rsVg+doXH7gVSfA5OF/816Xzw8SOdJktOQThOrKiuZzMhVj3
+ 3AE+P2FJ33ko+usePmalzJFWWVHsowNgsr+wLh76bd3sdx0WAy0u8fb1enbq/mPf/f3UeXqkYYpb
+ mbDthJupQ632hbcRei7L5bF94VvxSjLOKUkaMv2h5rzBH/K07GR+513goR/hORR8XN8WBon76xsU
+ jxcfhqMsMBA0Z3SWJ0FLqoQr6+Lhcrw4QeFBh+dtE2ixD3woFBOz3zt60+u/EKrLIi0uy9m++MAH
+ 4fyXcD/3lA2waPQuL0/Pt8kssLtll9TvWOnYBL4x1noctsa+kIUng/OYE7RfBOhSMbB1N1DsY0bm
+ wz/ISDJDG5/L7CbtxHpacPNFr8GRSS/F/mb8JQ7hRy+uRpX0o0iEGV4OrDcSi3R3SMY5DckgzdJx
+ DX/YG+LHHI/3dPidj+ZNPzXaj3PGvI3dElHVHOJnOE4QKJyPyzQjZOtU947HkaPEExCbB4EdR+Fh
+ xydiHRLF6hj8+MiRm66+J6kvTVc3Ymnc7Tn6Dw9cP+26r+MA/HfD+2ECxsseh+ZjHaoVEb9rpaOx
+ 4LZuh2+HnYA+1mXP68BYK+uupuvhhgjAi/RIu7BuGIP7kbiUvUwjv1pc4u3r9exUPtnROO+nzssh
+ vtIvhSP5h4lwt3aEb17jHFzyHABQHAYkcPQrSfKb/lBzXt1Wi48cOy07mX8jHvphcXB/fDz8ZOQw
+ uoV94asWF48A7bjbhBtOYwjr4dv3xVHlsdXS+jwYRxwNg3/M+NlusnLj0Zte962is2Qdi8tyty8+
+ dN10euKbSIQrIqmbS8lD9G2aYC4PFy1liYpGFC2pF0XX9xTCbc30sq9LrBRLxY9RFk/xT+4wqbSM
+ 9UgyTBjOWsFrKAOxTkOBq365ootQ5DDxhV6Kh0HMbhMnw3nyc9n5kn9tIGwb9bLQfxuTBfRDMgI5
+ JIFfaI7bFjsL5JjI/DYMjM2npqLo54x5G9cdFhrFAzvnOgPgONpxnOmHpWJGONnRehS95Ib4wet0
+ bscnjHc+f+rk6vT69Ruu5m/4zbrL1F+evyXcKKq3XPvdbmX2a7hkj9hx1ssdp2fZYXfQxpJMAZsu
+ HZvXWPriCbwr9vSDz9jRCr/T+rWkXQHwmvzjuvvFdVt2u+iGnvxx/wwXdky/+G96MbSNJH3iDKhW
+ +SpOxRnrhImo2je95DcDhanihy1DMeYNjlb1ja/dF90GppGuT73AhxShvda2Bb7sExpb2gEm/C0G
+ uL+0b1+ouBo/s8Ai0vDEOiYZnvTDy8e+kAV5zFQNp/Gk+AERP0wNF01xkx73m/3vJo/Op7NfXfZi
+ ZXtVVGPXyyzvfOSK6fwYnrQ6PHHZoXoS5suks2dyah2g0jEFjaXvyYp1JSk7bXKZHZ+GpgHb9YXk
+ koHin8w5f+inS2J8nI79cCg7Pp/9d/MtX+Bc1USxvxm/7xvC+Rb9wgJZGZ8mEAiDF5tGAk2+IRn6
+ Q5JWei+Oa/jhT30Sgd+naqdHzWHmd54yn/CyZ+eD+BjO9IplG1v8PB6WD5wgUDgfl2lGyNbxzSa+
+ ui+z4zwBgeQ67bh1N4z57xzZvWv/ZP9VTyX40naX/wkrQv/bP/+TrrvgHUiOgzxiZYmyBRhdKkmt
+ Y7J0HIaxLrMup61TTx3ite7zXHcHaE88pPWklb5hdH2KPsfB7xxIssBnST5TtxZ2TL8smF4MBSz6
+ xBlQrdg3BTSNpU8+5y36hMkvmZNeGNQyVpxPVuq+yNPbF3kAbGTyx255X6/PH+MGF3plX/DjVOw4
+ HqI2BjTt21ciLmKOfQKHryY+tGzrUGR4vKiRt+oRwPWIh+Lr4YCyCMKu7Hh8qregAS7zYIzfuvCt
+ Iyurv362FCvbrqKTNr703fse3jP7ybEv4QQ/GMmsrPHdluSxpMEcxsPrnqyOz9nguQE1+0lX1cnj
+ Y9IyiXNy1eTTepj3YyCx26Vbmqed5Gb4G0laxwN8iQddtsq3CT/3kS/hol/VLniaQNgw4pckrDf7
+ zuPQH5Jyu3113LbYaZk1yvyYMb/ZbD412o9zxryNkRHWI8ppFA/DcYJA4XxcpsNSMSM+2TFWxDN4
+ aEEvXI9jwBT+R+Z7jux+/Ucm+6fHEmzpu8rYpd9GbwP4P6Zn//jQ5/A3iLfmS6VDjUsIHeaCQuC1
+ AnBb1yWr6wASYKK/7rbLutNG0to8m0m/vOhZ0hFZ1gkiP3s+X4uL1rMfFZd4+3o9O5Uv9t3fT50X
+ v+z29aofyS/vLoh6axnHevksDhGvJIGnvSRlr9zi1kSPH1Xh9O1k5o14bL54bhvQuL4tDBL31+Ha
+ N+INvB2TSfKY5DhYsR60pEo4m7c8wpf0CdCL8+CjC589csPr/ghGTPGsahaXs7at3Png7+JQ70Dy
+ n89NMguQFMgWpoCPmT0GaNbj0gjP68QsszFH4rFcMz1r/XVLKkuuCgDIeQ3Okak5AfXTus83/kIn
+ DFKvbKSYb/kIDwdNWfakpvlBfu4r4iRPKy7563y8tLEPzOlS6lJRzy6pzRt+SPKyYWVIAr/QHLct
+ dhbIMZH5bRgYm0+N9uOcMW/jusNCo3gYjrwECufjMh2WihnxyY75YXF0HjBEk93JEfyDER8/euPr
+ Ph/zZ5v0zDvbtpX2c9dD181Odl+bzaZv5qHjUjEFyq1RCLxW8LKXIpOBBMRlV1ERkdsq60ruxSco
+ GSyXHklHpOmlZutsLgs+YMVOxm1S5Hp2Kp/0Nc77qfPyI9yJuCW7iE+4WzvCN6+8jdBzWS9fvdrG
+ qqtpMN9PkuQ3/aHmvMEf8rTsZH7nXeChH+ExFHysYuJD8ri/vjHxqNjwGIOHdqQnVuw/aAsPZoiH
+ wFe/aIH70RPT1RvXb7hqqT+2wO1u8nL2/KH7Rpv88LXfnZ+YvRWfQ/mqZUFJvno7lBIDSRV4XquS
+ VJFMUKg5VLLNYUwqA5g9NRko9nk95U9xHVkc+Cyb5A0/TL8smF4MZa/oE2dAtWLf/dJY+uRz3qJP
+ Ovklc+IPg1rGStlntWM98vT2RR6sNTL5Y8Wpr9fnj3GDCz232/CHvSE74SdkaYZzvuCxtYgLeoRq
+ DH/xRVyJQ6wDhgXDlSLjY1/IgjyyQ3q8GI/Hg6PEE5DJ5K4Xj+355bO9WNl2LcbnTLO3iEjCO7Dh
+ 85GLngsKgY2Vo558nAAmgD5efCLx8JX1SDE3QH3DaFyecHwsfucwVOBdFrzclD+8Sz5BHJIYkqnd
+ 1+vZGcZJ3wnoTPVDvvX16Df9cN/dro9a4ZffA+yXN+I0IO1y236SlD3b4UDr8Z+RnUzvvCo2yU+b
+ j7ga3se1aAWJ++vwXLQsXDYmD+2IRlliRclpSZVwNo8vK374g/Wj+F1Wt+It4F2EnQMvnvXnwE5j
+ i3iLuNp1X8ORv5l3jLngYVC2+OWPSxNFBEBmWRF+qTQmfX8dScXkoiFDyEC5/JiR+bAPieQt665X
+ x7RSDNq8E3ChDhOfWS32pV/5hKvjxM/t5iK46BfjEfy8dG4Xc7nYkN/2ZfOGH5KhPySBX2iO2xY7
+ C+SYyPw2DIzNp1aKEBDcF9djh4VG8QCCRUpAsKBDO0WAxfmLGet48UavFKv5/Jx4C5hCze7Z/5aw
+ v2O8RTyxPnsrcuBLzI1SFJgNXix0OW1dl50dyxZf93mfpgmvFqVoWDJCQfqGqGOoYWRj52EPL0je
+ wGdJvOCmID9MvyyYXgwFLPrEGVDN5qVWcSqqWCdMREWfMPmV9cKglrFiDqRW9Y2v3Rd53JxpZV7q
+ BT6k8fb4Y7wR3uZP2Q7pW/+1v7Rv8wEt9oVeGgOHr7yPug4YFuSPx8HHvpAFeWSH9HhRhMKuv628
+ 68WLzo23gBGFkL1TiulzQ67c/dBH8cth/w7JMPC3iJ6smyWXXQque7zsUvkYwpPYk5SQmnxaJ7xe
+ Auq7XdO3sUvi47TSvN8Sxw3wuT4B/qLLswU/95Ev4aJfcNDtQjaBsGHEL0mgm33ncegPSfe7EY7b
+ FjsNsQ8yP6bMbzabT+2VfMKCF0dPTrtbj7733HkLmELNblyB/vy5M7a3iJPJ13CJ8beIusTlbRxv
+ l4WICy7sEusSMosjgiw2pq/k1hOU65VQp0vvPznj0kfAo0jFfC0ujih23DCLRuLlOPYB2bNT+aSv
+ cd5PnS9uo9PXi426OW08NtGXfvmjqOmSR5wGJPC0l6Ts2YEMtB7/GdnJ9M674K/NR1wN72O9XfMh
+ edxfh5fiBjyPMXhMWt5IKL+C1nnw0cLvTbvpzYfP8r8F5HY3efGs3wRxLiz9/fcvXNkz/Wt8zu6j
+ VoTyJWYSeVFQluluMgcxr3UPErMwrSMLS3EjxMKdigtHsqdlZW2/ONQxUTKArs2bv/TLRfhFJP1O
+ /JzUOOMG+QGo8z2/nbfald8xNjO8nMA10ubxLbaejEAOSSPsN8c1/GEP2Jdkp89t48zvfITZfGql
+ CNm5Yt7GdWeFRnGwfCAvgcL5uEzTc1vHNxsz6K4XfuaqW/GvMB+N2XNVWozH5hFYvfP7+6az6R3d
+ tPuFthgge+ySUqQiFLlp+mVdudZ/sokkLkUgrq3ppUa7Nvb5gg9YseMTxKUi2Nfr2al80tdY+rwk
+ SZ9uFTO2b3OrTHg8iCr++qgVvI3Qc6lLHnEakMDRryQVX/NgoPX4z8hOpt/IX5uPuBrexy/DE9aD
+ k9nsDw7vf/WO/9dscthezr5n38tpYsm4D3aru/7nvz6Gvy7+FD5sepFy1sKEJLXLWoVfqnRX++v2
+ ExVf5ZJ7kqtI1JQv69TfpPjEaXnRoB7vjhbcfLWXcekYiv20nmjSPnORWvSL8QCv/PDAuB0WJfA3
+ UlEsV92jqrEXhyg2jUy+l67jG/6w91LtFNLUyfzOx1WbT03F188Z8zauJ6taZmEuOPISKJyPy7Ql
+ 2GTyHMafxFPV3+Cp6oRNjE0RiCswxqMfgQOPv3rX+vrtk9n0A7lK6bJ7EYrcNF1mZRG4hH4dbZ5N
+ 41IsFtYdFXiXBV9o0IFdFglTIW6xmBS9np0y3/Dn/chQ5a9+cbvZP/qhdfnh/b7gbTS/paDLy22E
+ d60Ejn4mSX7TH2rOG/whT8tO5nfeBR76YXFyf3y8HU9YID2A3wr68Rfec8WPsytjXxFQdo7R2DAC
+ q1/G28RudscEbxN1aVJRwuWNu0ICu8x+iS2VrWiNT1ip+CAmisuAjEAOyaHTcRyLCc/hDOxsxe9+
+ E2Z2U1Mx25YnrAeRX+PbvxTboe5YsIai0p+zt4n/+/DHupP+NhHXjk8gcfsMP1CsSvUin34ilyec
+ eKYwvdTqk43mCz5gxY5PUN8vK3hCv+j17JR5t6tx3k+1S7eKGbuUlT/vV7hwkKP2pVeEFp5YjBff
+ Ec6hIiR7bbEoRnr88VPktOwUUnMI9hCnBR4WrfDYcfHDKYbkcX99Y+JxPqjbGAaem8/n49s/xmvr
+ l02ybGvlcw5hbxNPrN+OS/4BphqTmTmtUFgRwIILpLD/5LUJNpOpuHBkeF+nYlr3+VpkxEID6FJP
+ jnCB6jFvM0mfAH+pfLJbxwXg+8hFatGvhj+KhlPwcsJ+I7FGd4dk6A9J52yE4xr+sDfEjzlG/3T4
+ nY/2TT812o9zxryN3RJR1RziZzhOENfBnzvn3cpt49u/FNAtun5TtkCNy00EVr/8yL7ZrLsDtw9v
+ E33Jq4ULJqff+gBApktPRSS3KaTGImRjn9+smFCNuMTb1+vZqXyyq7H0VU3qvPj5Cndy8XL/svtu
+ V+jea7213JcuOaMT3rUSeNpLkvEwnqHW40dVOH07mX8jHvphcXJ/fHyqf4aF/wfwwenK7Nbn333l
+ f2RzY3/rCLS3ZWv8iIgIHOhWdp98+Gbk6p/ioxC/xGm7tH6JLZXHJ6zxCctrnp6suul38fuqPvPC
+ 4Sv+afKB6clIpVGeegTGgnXqsdoQufKVR9+DJ67b8Psd3+Y1i8WqVC9q6idyeVKJZ4rek4mtCy5Z
+ 8HFSpShm3PiEVZ7AGJ+d9YTVzSf/PlmZ/eXz77783g2TaFw4pQjENTgl8AjaPAKrBx59G35Z4G3T
+ 2eQ94xPW4ts6SzbFZUDWRxG9Hc7jobD7Ot9eokg18qXa2Yrf+Qgzu6np7a29XcZ+MW9je7a2HaJ/
+ 34lu8ukj+1/9n0ll7J5BBMaCdQbB20h194FHr8P/VH0bPgpxM56QVoRTEpcnpvEJa9PixTjh8ocs
+ T1BDQffihQrRFDsVk4HiCI4oLsEfsqF3vgUem4/zMwUfW9HCf3ir1x2Yz1f/4vD+yx5s+MbBGUdg
+ LFhnHMJNCA789xt3zY//yXQ6uwUPAedFkvNyQE0lDNLexljztzNl3efrWDDiCIce7470qR7zBk36
+ NoxW+UIv/kDdEU7U4hbfdjb8USycgpccPI3EWr7qZr2MQ39IOmcjHNfwh73Mi/6WdhpiH2R+5+OK
+ zaemYmbxm78AS59fX9312aPvuuQHCTJ2tzECythtJBypBiLw5ceu3L3afQIrvz+bTi/WJfVr5EUl
+ tKxIsLmsRcMRNg+CFrdYTIpeXNcFPtkRTvqqHnVefoQ7UdSSf/Qj+eXdBdErQrrkqVhBwVgVF0jg
+ 6VeSLI69YlHs9PjjSeu07BRSc0gbXOChH/IY3afxj2rdMb1gz+3Pvn3vM1l97G9/BDz7tp94ZByI
+ wIFHL95tRWsy/QQ+hHNluaS5SOEGlGKzUGScM823RcYvfeaDivHldkr8XhRZRKi/6BeLSPD75Q47
+ Q0Wn7BegUpxM38ahPySDNEvHbYudzBv9zI8585fN5q11kyfR+6vn1n/2c5PfnL6oyfH15Y5Am8kv
+ t7WRv0Rg99efeMvsxPEP4dL/Fr5fHwuluKSixMsdJ2XzmGhxi8WkFKV4dlngE6Fw0lcVqfP0ye0W
+ PudhsaIf7nnMx0ay7BWhhScWYM0M92kSeNpLUvYMMdB6/Bs+GUF1SzuZ3nmTv0+A4CsnT87uOvzu
+ Sx/I0LH/ykTA0/GVMTZaGYgAfjPbeV99Yt98Mv8Q7vz7cCAX9YtDHbt+Kj5tkfFLH8Uj45Lpyqfj
+ r+PE70WRRYQ8i0WRRQQqph9FIswMFR2zVoqS6eXxBkWHvEGaZS4msN/Yy7zob2kn80Y/+OfdM/jL
+ k3tOzlbvPrxv77ex2Q2qZiiO8uWMgJ3l2HZKBA4+ft7up2fvXem6D+Et434Ugt2bFRO6vUExKXpx
+ XYlTcalFKMbjExbjZUUKDWIdJfAb+Fly97OvufSfJ784XefC+PJTj8BYsH7qR7CBA/f+4JLz1ifv
+ xyPJh/FJ+rcDZXdKLRWf8Qlr8e1j80RlgcN3ebLzJ6fyRBhFChDgvg22uyd7ZveMf4DuubbDRFyB
+ HebW6E4Tgft+dPn568fxa266fd1sdj3elOCfKPOjo1x8uzY+YfnbRASyFCv0c/HCb0l4ZDad3Y8P
+ Th2c7971b4ffceFTTdzHwY6LwFiwdtyRbO3Q+fc9+dru+ORd+Dfa9uE22q91fq1pWe3Kb/c4wfn2
+ mEsxS09q0nPbTtTiFotiwx9PLk7R/JkS+NIfXDdFoxST0B+SztkIx700O92T3by7fzJbOXhsNv/m
+ kesve7LhHAc7PgJtJu94d0cHhyKw51+fvGblJJ68Jp0Vr+uBubwUmygPC8VJRy+cipFXO5qweXVc
+ WNFBt85jHRMBqx3hm9deETqV4kW/oBeS/MYz1Hr88Xavm89/BP2D80l3cNKt3P/sb+x9bEh9nFue
+ CHhWLo/Do6dbRAB/Uvyqb/z4WvwPItdPZngCm3TvxCftL81aLAKYMGmtjjlUFWIxykVq5z9hwfGn
+ 8Ynzb+Fzbvcfn3YHD++77Hu+o1GcJREYC9ZZcpAbbgMF7Px7/+/nVqbHr5lMZ9fgf8y+Bk8g7OPh
+ 60345P0q/8ddK156hCJVFDM+oGGmFjVPGce/0k9YcPEE/Hwcny44hBJ6CNs7BPcOrc92HzryzvN/
+ CEc3eAzbMELjwhJFYCxYS3RY2+4qfvXzRcefeuNkPkUBQyHDNxLiGpSna3DrX4Nixtuv4vXKPWF1
+ MDvt5j/sJrNDkIcwftgK1ImTq4eeX7nwsfFfktn2TFgawrFgLc1RvcKOfqfbdeHzz188Pd5dMpkd
+ 3zudrO6dTE/unUxWICeXwJu9+EN/jPGNMYqa+jEnd9cg1vBEt4aiszaxD2HOpmvzebcG3TX8Mru1
+ ldnkGfwt3RrK4Rp41ubT2drhC161NvmV6XFRjK9jBGoE/h//xb5CiJqhJQAAAABJRU5ErkJggg==
+ installModes:
+ - supported: true
+ type: OwnNamespace
+ - supported: false
+ type: SingleNamespace
+ - supported: false
+ type: MultiNamespace
+ - supported: false
+ type: AllNamespaces
+ install:
+ strategy: deployment
+ spec:
+ deployments:
+ - name: clickhouse-operator
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: clickhouse-operator
+ template:
+ metadata:
+ labels:
+ app: clickhouse-operator
+ spec:
+ containers:
+ - env:
+ - name: OPERATOR_POD_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: OPERATOR_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: OPERATOR_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: OPERATOR_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: OPERATOR_POD_SERVICE_ACCOUNT
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.serviceAccountName
+ - name: OPERATOR_CONTAINER_CPU_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: requests.cpu
+ - name: OPERATOR_CONTAINER_CPU_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: limits.cpu
+ - name: OPERATOR_CONTAINER_MEM_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: requests.memory
+ - name: OPERATOR_CONTAINER_MEM_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: limits.memory
+ - name: WATCH_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ image: docker.io/altinity/clickhouse-operator:0.23.4
+ imagePullPolicy: Always
+ name: clickhouse-operator
+ - image: docker.io/altinity/metrics-exporter:0.23.4
+ imagePullPolicy: Always
+ name: metrics-exporter
+ serviceAccountName: clickhouse-operator
+ permissions:
+ - serviceAccountName: clickhouse-operator
+ rules:
+ #
+ # Core API group
+ #
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - services
+ - persistentvolumeclaims
+ - secrets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ #
+ # apps.* resources
+ #
+ - apiGroups:
+ - apps
+ resources:
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - apps
+ resources:
+ - replicasets
+ verbs:
+ - get
+ - patch
+ - update
+ - delete
+ # The operator deployment personally, identified by name
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ resourceNames:
+ - clickhouse-operator
+ verbs:
+ - get
+ - patch
+ - update
+ - delete
+ #
+ # policy.* resources
+ #
+ - apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ #
+ # apiextensions
+ #
+ - apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ # clickhouse - related resources
+ - apiGroups:
+ - clickhouse.altinity.com
+ #
+ # The operator's specific Custom Resources
+ #
+
+ resources:
+ - clickhouseinstallations
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallationtemplates
+ - clickhouseoperatorconfigurations
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations/finalizers
+ - clickhouseinstallationtemplates/finalizers
+ - clickhouseoperatorconfigurations/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations/status
+ - clickhouseinstallationtemplates/status
+ - clickhouseoperatorconfigurations/status
+ verbs:
+ - get
+ - update
+ - patch
+ - create
+ - delete
+ # clickhouse-keeper - related resources
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations/status
+ verbs:
+ - get
+ - update
+ - patch
+ - create
+ - delete
diff --git a/deploy/operatorhub/0.23.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
new file mode 100644
index 000000000..f3eff2751
--- /dev/null
+++ b/deploy/operatorhub/0.23.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -0,0 +1,1219 @@
+# Template Parameters:
+#
+# KIND=ClickHouseInstallation
+# SINGULAR=clickhouseinstallation
+# PLURAL=clickhouseinstallations
+# SHORT=chi
+# OPERATOR_VERSION=0.23.4
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhouseinstallations.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.23.4
+spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseInstallation
+ singular: clickhouseinstallation
+ plural: clickhouseinstallations
+ shortNames:
+ - chi
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: status
+ type: string
+ description: CHI status
+ jsonPath: .status.status
+ - name: hosts-unchanged
+ type: integer
+ description: Unchanged hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUnchanged
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: hosts-delete
+ type: integer
+ description: Hosts to be deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDelete
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more ClickHouse clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: "Current ClickHouseInstallation manifest status, contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other"
+ properties:
+ chop-version:
+ type: string
+ description: "ClickHouse operator version"
+ chop-commit:
+ type: string
+ description: "ClickHouse operator git commit SHA"
+ chop-date:
+ type: string
+ description: "ClickHouse operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this CHI"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized CHI requested"
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized CHI completed"
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ restart:
+ type: string
+ description: |
+ In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile.
+ This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts.
+ enum:
+ - ""
+ - "RollingUpdate"
+ troubleshoot:
+ !!merge <<: *TypeStringBool
+ description: |
+ Allows to troubleshoot Pods during CrashLoopBack state.
+ This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start.
+ Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts
+ and give time to troubleshoot via CLI.
+ Liveness and Readiness probes are disabled as well.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ templating:
+ type: object
+ # nullable: true
+ description: |
+ Optional, applicable inside ClickHouseInstallationTemplate only.
+ Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)."
+ properties:
+ policy:
+ type: string
+ description: |
+ When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate
+ will be auto-added into ClickHouseInstallation, selectable by `chiSelector`.
+ Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly.
+ enum:
+ - ""
+ - "auto"
+ - "manual"
+ chiSelector:
+ type: object
+ description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ reconciling:
+ type: object
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "yes" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for one `Service` resource which will created by `clickhouse-operator` which cover all clusters in whole `chi` resource"
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ zookeeper: &TypeZookeeperConfig
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
+ currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
+ More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
+ # nullable: true
+ properties:
+ nodes:
+ type: array
+ description: "describe every available zookeeper cluster node for interaction"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - host
+ properties:
+ host:
+ type: string
+ description: "dns name or ip address for Zookeeper node"
+ port:
+ type: integer
+ description: "TCP port which used to connect to Zookeeper node"
+ minimum: 0
+ maximum: 65535
+ secure:
+ !!merge <<: *TypeStringBool
+ description: "if a secure connection to Zookeeper is required"
+ session_timeout_ms:
+ type: integer
+ description: "session timeout during connect to Zookeeper"
+ operation_timeout_ms:
+ type: integer
+ description: "one operation timeout during Zookeeper transactions"
+ root:
+ type: string
+ description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)"
+ identity:
+ type: string
+ description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ users:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure password hashed, authorization restrictions, database level security row filters etc.
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of settings profile
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of resource quotas
+ More details: https://clickhouse.tech/docs/en/operations/quotas/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ every key in this object is the file name
+ every value in this object is the file content
+ you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
+ each key could contains prefix like USERS, COMMON, HOST or config.d, users.d, cond.d, wrong prefixes will ignored, subfolders also will ignored
+ More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes ClickHouse clusters layout and allows change settings on cluster-level, shard-level and replica-level
+ every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
+ all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
+ Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of ClickHouse servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zookeeper:
+ !!merge <<: *TypeZookeeperConfig
+ description: |
+ optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.zookeeper` settings
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ schemaPolicy:
+ type: object
+ description: |
+ describes how schema is propagated within replicas and shards
+ properties:
+ replica:
+ type: string
+ description: "how schema is propagated within a replica"
+ enum:
+ # List SchemaPolicyReplicaXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ shard:
+ type: string
+ description: "how schema is propagated between shards"
+ enum:
+ # List SchemaPolicyShardXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ - "DistributedTablesOnly"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: optional, open secure ports for cluster
+ secret:
+ type: object
+ description: "optional, shared secret value to secure cluster communications"
+ properties:
+ auto:
+ !!merge <<: *TypeStringBool
+ description: "Auto-generate shared secret value to secure cluster communications"
+ value:
+ description: "Cluster shared secret value in plain text"
+ type: string
+ valueFrom:
+ description: "Cluster shared secret source"
+ type: object
+ properties:
+ secretKeyRef:
+ description: |
+ Selects a key of a secret in the clickhouse installation namespace.
+ Should not be used if value is not empty.
+ type: object
+ properties:
+ name:
+ description: |
+ Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ key:
+ description: The key of the secret to select from. Must be a valid secret key.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be defined
+ type: boolean
+ required:
+ - name
+ - key
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ type:
+ type: string
+ description: "DEPRECATED - to be removed soon"
+ shardsCount:
+ type: integer
+ description: "how much shards for current ClickHouse cluster will run in Kubernetes, each shard contains shared-nothing part of data and contains set of replicas, cluster contains 1 shard by default"
+ replicasCount:
+ type: integer
+ description: "how much replicas in each shards for current ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, every shard contains 1 replica by default"
+ shards:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration`, cluster-level `chi.spec.configuration.clusters` settings for each shard separately, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ definitionType:
+ type: string
+ description: "DEPRECATED - to be removed soon"
+ weight:
+ type: integer
+ description: |
+ optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ internalReplication:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise
+ allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard
+ override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates`
+ replicasCount:
+ type: integer
+ description: |
+ optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ shard contains 1 replica by default
+ override cluster-level `chi.spec.configuration.clusters.layout.replicasCount`
+ minimum: 1
+ replicas:
+ type: array
+ description: |
+ optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards`
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates`
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]`
+ More info: https://clickhouse.tech/docs/en/interfaces/tcp/
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]`
+ More info: https://clickhouse.tech/docs/en/interfaces/http/
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]`
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about aviailable template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: "use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`, More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: "allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: "allows define format for generated `Service` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about aviailable template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ useTemplates:
+ type: array
+ description: "list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `Chi` manifest during render Kubernetes resources to create related ClickHouse clusters"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "name of `ClickHouseInstallationTemplate` (chit) resource"
+ namespace:
+ type: string
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ useType:
+ type: string
+ description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
+ enum:
+ # List useTypeXXX constants from model
+ - ""
+ - "merge"
diff --git a/deploy/operatorhub/0.23.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
new file mode 100644
index 000000000..d8ef8ba5e
--- /dev/null
+++ b/deploy/operatorhub/0.23.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -0,0 +1,1219 @@
+# Template Parameters:
+#
+# KIND=ClickHouseInstallationTemplate
+# SINGULAR=clickhouseinstallationtemplate
+# PLURAL=clickhouseinstallationtemplates
+# SHORT=chit
+# OPERATOR_VERSION=0.23.4
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhouseinstallationtemplates.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.23.4
+spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseInstallationTemplate
+ singular: clickhouseinstallationtemplate
+ plural: clickhouseinstallationtemplates
+ shortNames:
+ - chit
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: status
+ type: string
+ description: CHI status
+ jsonPath: .status.status
+ - name: hosts-unchanged
+ type: integer
+ description: Unchanged hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUnchanged
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: hosts-delete
+ type: integer
+ description: Hosts to be deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDelete
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more ClickHouse clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: "Current ClickHouseInstallation manifest status, contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other"
+ properties:
+ chop-version:
+ type: string
+ description: "ClickHouse operator version"
+ chop-commit:
+ type: string
+ description: "ClickHouse operator git commit SHA"
+ chop-date:
+ type: string
+ description: "ClickHouse operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this CHI"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized CHI requested"
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized CHI completed"
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ restart:
+ type: string
+ description: |
+ In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile.
+ This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts.
+ enum:
+ - ""
+ - "RollingUpdate"
+ troubleshoot:
+ !!merge <<: *TypeStringBool
+ description: |
+ Allows to troubleshoot Pods during CrashLoopBack state.
+ This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start.
+ Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts
+ and give time to troubleshoot via CLI.
+ Liveness and Readiness probes are disabled as well.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ templating:
+ type: object
+ # nullable: true
+ description: |
+ Optional, applicable inside ClickHouseInstallationTemplate only.
+ Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)."
+ properties:
+ policy:
+ type: string
+ description: |
+ When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate
+ will be auto-added into ClickHouseInstallation, selectable by `chiSelector`.
+ Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly.
+ enum:
+ - ""
+ - "auto"
+ - "manual"
+ chiSelector:
+ type: object
+ description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ reconciling:
+ type: object
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "yes" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for one `Service` resource which will created by `clickhouse-operator` which cover all clusters in whole `chi` resource"
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ zookeeper: &TypeZookeeperConfig
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
+ currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
+ More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
+ # nullable: true
+ properties:
+ nodes:
+ type: array
+ description: "describe every available zookeeper cluster node for interaction"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - host
+ properties:
+ host:
+ type: string
+ description: "dns name or ip address for Zookeeper node"
+ port:
+ type: integer
+ description: "TCP port which used to connect to Zookeeper node"
+ minimum: 0
+ maximum: 65535
+ secure:
+ !!merge <<: *TypeStringBool
+ description: "if a secure connection to Zookeeper is required"
+ session_timeout_ms:
+ type: integer
+ description: "session timeout during connect to Zookeeper"
+ operation_timeout_ms:
+ type: integer
+ description: "one operation timeout during Zookeeper transactions"
+ root:
+ type: string
+ description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)"
+ identity:
+ type: string
+ description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ users:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure password hashed, authorization restrictions, database level security row filters etc.
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of settings profile
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of resource quotas
+ More details: https://clickhouse.tech/docs/en/operations/quotas/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ every key in this object is the file name
+ every value in this object is the file content
+ you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
+ each key could contains prefix like USERS, COMMON, HOST or config.d, users.d, cond.d, wrong prefixes will ignored, subfolders also will ignored
+ More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes ClickHouse clusters layout and allows change settings on cluster-level, shard-level and replica-level
+ every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
+ all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
+ Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of ClickHouse servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zookeeper:
+ !!merge <<: *TypeZookeeperConfig
+ description: |
+ optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.zookeeper` settings
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ schemaPolicy:
+ type: object
+ description: |
+ describes how schema is propagated within replicas and shards
+ properties:
+ replica:
+ type: string
+ description: "how schema is propagated within a replica"
+ enum:
+ # List SchemaPolicyReplicaXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ shard:
+ type: string
+ description: "how schema is propagated between shards"
+ enum:
+ # List SchemaPolicyShardXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ - "DistributedTablesOnly"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: optional, open secure ports for cluster
+ secret:
+ type: object
+ description: "optional, shared secret value to secure cluster communications"
+ properties:
+ auto:
+ !!merge <<: *TypeStringBool
+ description: "Auto-generate shared secret value to secure cluster communications"
+ value:
+ description: "Cluster shared secret value in plain text"
+ type: string
+ valueFrom:
+ description: "Cluster shared secret source"
+ type: object
+ properties:
+ secretKeyRef:
+ description: |
+ Selects a key of a secret in the clickhouse installation namespace.
+ Should not be used if value is not empty.
+ type: object
+ properties:
+ name:
+ description: |
+ Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ key:
+ description: The key of the secret to select from. Must be a valid secret key.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be defined
+ type: boolean
+ required:
+ - name
+ - key
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ type:
+ type: string
+ description: "DEPRECATED - to be removed soon"
+ shardsCount:
+ type: integer
+ description: "how much shards for current ClickHouse cluster will run in Kubernetes, each shard contains shared-nothing part of data and contains set of replicas, cluster contains 1 shard by default"
+ replicasCount:
+ type: integer
+ description: "how much replicas in each shards for current ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, every shard contains 1 replica by default"
+ shards:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration`, cluster-level `chi.spec.configuration.clusters` settings for each shard separately, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ definitionType:
+ type: string
+ description: "DEPRECATED - to be removed soon"
+ weight:
+ type: integer
+ description: |
+ optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ internalReplication:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise
+ allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard
+ override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates`
+ replicasCount:
+ type: integer
+ description: |
+ optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ shard contains 1 replica by default
+ override cluster-level `chi.spec.configuration.clusters.layout.replicasCount`
+ minimum: 1
+ replicas:
+ type: array
+ description: |
+ optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards`
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates`
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]`
+ More info: https://clickhouse.tech/docs/en/interfaces/tcp/
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]`
+ More info: https://clickhouse.tech/docs/en/interfaces/http/
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]`
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about aviailable template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: "use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`, More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: "allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: "allows define format for generated `Service` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about aviailable template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ useTemplates:
+ type: array
+ description: "list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `Chi` manifest during render Kubernetes resources to create related ClickHouse clusters"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "name of `ClickHouseInstallationTemplate` (chit) resource"
+ namespace:
+ type: string
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ useType:
+ type: string
+ description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
+ enum:
+ # List useTypeXXX constants from model
+ - ""
+ - "merge"
diff --git a/deploy/operatorhub/0.23.4/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.23.4/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
new file mode 100644
index 000000000..07fdeca31
--- /dev/null
+++ b/deploy/operatorhub/0.23.4/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -0,0 +1,263 @@
+# Template Parameters:
+#
+# OPERATOR_VERSION=0.23.4
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
+ labels:
+ clickhouse-keeper.altinity.com/chop: 0.23.4
+spec:
+ group: clickhouse-keeper.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseKeeperInstallation
+ singular: clickhousekeeperinstallation
+ plural: clickhousekeeperinstallations
+ shortNames:
+ - chk
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: status
+ type: string
+ description: CHK status
+ jsonPath: .status.status
+ - name: replicas
+ type: integer
+ description: Replica count
+ priority: 1 # show in wide view
+ jsonPath: .status.replicas
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ type: object
+ required:
+ - spec
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one ClickHouse Keeper cluster"
+ properties:
+ apiVersion:
+ type: string
+ description: |
+ APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ kind:
+ type: string
+ description: |
+ Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ metadata:
+ type: object
+ status:
+ type: object
+ description: |
+ Current ClickHouseKeeperInstallation status, contains many fields like overall status, desired replicas and ready replica list with their endpoints
+ properties:
+ chop-version:
+ type: string
+ description: "ClickHouse operator version"
+ chop-commit:
+ type: string
+ description: "ClickHouse operator git commit SHA"
+ chop-date:
+ type: string
+ description: "ClickHouse operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this CHI"
+ status:
+ type: string
+ description: "Status"
+ replicas:
+ type: integer
+ format: int32
+ description: Replicas is the number of number of desired replicas in the cluster
+ readyReplicas:
+ type: array
+ description: ReadyReplicas is the array of endpoints of those ready replicas in the cluster
+ items:
+ type: object
+ properties:
+ host:
+ type: string
+ description: dns name or ip address for Keeper node
+ port:
+ type: integer
+ minimum: 0
+ maximum: 65535
+ description: TCP port which used to connect to Keeper node
+ secure:
+ type: string
+ description: if a secure connection to Keeper is required
+ normalized:
+ type: object
+ description: "Normalized CHK requested"
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized CHK completed"
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: KeeperSpec defines the desired state of a Keeper cluster
+ properties:
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ replicas:
+ type: integer
+ format: int32
+ description: |
+ Replicas is the expected size of the keeper cluster.
+ The valid range of size is from 1 to 7.
+ minimum: 1
+ maximum: 7
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ settings:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-keeper` instance"
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes ClickHouseKeeper clusters layout and allows change settings on cluster-level and replica-level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of ClickHouseKeeper servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how many replicas
+ # nullable: true
+ properties:
+ replicasCount:
+ type: integer
+ description: "how many replicas in ClickHouseKeeper cluster"
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: "allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
diff --git a/deploy/operatorhub/0.23.4/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.4/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml
new file mode 100644
index 000000000..b53ef91d1
--- /dev/null
+++ b/deploy/operatorhub/0.23.4/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml
@@ -0,0 +1,415 @@
+# Template Parameters:
+#
+# NONE
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhouseoperatorconfigurations.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.23.4
+spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseOperatorConfiguration
+ singular: clickhouseoperatorconfiguration
+ plural: clickhouseoperatorconfigurations
+ shortNames:
+ - chopconf
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: namespaces
+ type: string
+ description: Watch namespaces
+ jsonPath: .status
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ schema:
+ openAPIV3Schema:
+ type: object
+ description: "allows customize `clickhouse-operator` settings, need restart clickhouse-operator pod after adding, more details https://github.com/Altinity/clickhouse-operator/blob/master/docs/operator_configuration.md"
+ x-kubernetes-preserve-unknown-fields: true
+ properties:
+ status:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ Allows to define settings of the clickhouse-operator.
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/config/config.yaml
+ Check into etc-clickhouse-operator* ConfigMaps if you need more control
+ x-kubernetes-preserve-unknown-fields: true
+ properties:
+ watch:
+ type: object
+ description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
+ properties:
+ namespaces:
+ type: array
+ description: "List of namespaces where clickhouse-operator watches for events."
+ items:
+ type: string
+ clickhouse:
+ type: object
+ description: "Clickhouse related parameters used by clickhouse-operator"
+ properties:
+ configuration:
+ type: object
+ properties:
+ file:
+ type: object
+ properties:
+ path:
+ type: object
+ description: |
+ Each 'path' can be either absolute or relative.
+ In case path is absolute - it is used as is.
+ In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
+ properties:
+ common:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
+ Default value - config.d
+ host:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
+ Default value - conf.d
+ user:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files with users settings are located.
+ Files are common for all instances within a CHI.
+ Default value - users.d
+ user:
+ type: object
+ description: "Default parameters for any user which will create"
+ properties:
+ default:
+ type: object
+ properties:
+ profile:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ quota:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ networksIP:
+ type: array
+ description: "ClickHouse server configuration `...` for any "
+ items:
+ type: string
+ password:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ network:
+ type: object
+ description: "Default network parameters for any user which will create"
+ properties:
+ hostRegexpTemplate:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ configurationRestartPolicy:
+ type: object
+ description: "Configuration restart policy describes what configuration changes require ClickHouse restart"
+ properties:
+ rules:
+ type: array
+ description: "Array of set of rules per specified ClickHouse versions"
+ items:
+ type: object
+ properties:
+ version:
+ type: string
+ description: "ClickHouse version expression"
+ rules:
+ type: array
+ description: "Set of configuration rules for specified ClickHouse version"
+ items:
+ type: object
+ description: "setting: value pairs for configuration restart policy"
+ access:
+ type: object
+ description: "parameters which use for connect to clickhouse from clickhouse-operator deployment"
+ properties:
+ scheme:
+ type: string
+ description: "The scheme to user for connecting to ClickHouse. Possible values: http, https, auto"
+ username:
+ type: string
+ description: "ClickHouse username to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
+ password:
+ type: string
+ description: "ClickHouse password to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
+ rootCA:
+ type: string
+ description: "Root certificate authority that clients use when verifying server certificates. Used for https connection to ClickHouse"
+ secret:
+ type: object
+ properties:
+ namespace:
+ type: string
+ description: "Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
+ name:
+ type: string
+ description: "Name of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
+ port:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "Port to be used by operator to connect to ClickHouse instances"
+ timeouts:
+ type: object
+ description: "Timeouts used to limit connection and queries from the operator to ClickHouse instances, In seconds"
+ properties:
+ connect:
+ type: integer
+ minimum: 1
+ maximum: 10
+ description: "Timout to setup connection from the operator to ClickHouse instances. In seconds."
+ query:
+ type: integer
+ minimum: 1
+ maximum: 600
+ description: "Timout to perform SQL query from the operator to ClickHouse instances. In seconds."
+ metrics:
+ type: object
+ description: "parameters which use for connect to fetch metrics from clickhouse by clickhouse-operator"
+ properties:
+ timeouts:
+ type: object
+ description: |
+ Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances
+ Specified in seconds.
+ properties:
+ collect:
+ type: integer
+ minimum: 1
+ maximum: 600
+ description: |
+ Timeout used to limit metrics collection request. In seconds.
+ Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
+ All collected metrics are returned.
+ template:
+ type: object
+ description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
+ properties:
+ chi:
+ type: object
+ properties:
+ policy:
+ type: string
+ description: |
+ CHI template updates handling policy
+ Possible policy values:
+ - ReadOnStart. Accept CHIT updates on the operators start only.
+ - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply news CHITs on next regular reconcile of the CHI
+ enum:
+ - ""
+ - "ReadOnStart"
+ - "ApplyOnNextReconcile"
+ path:
+ type: string
+ description: "Path to folder where ClickHouseInstallationTemplate .yaml manifests are located."
+ reconcile:
+ type: object
+ description: "allow tuning reconciling process"
+ properties:
+ runtime:
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileCHIsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "How many goroutines will be used to reconcile CHIs in parallel, 10 by default"
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet:
+ type: object
+ description: "Allow change default behavior for reconciling StatefulSet which generated by clickhouse-operator"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ Possible options:
+ 1. abort - do nothing, just break the process and wait for admin.
+ 2. delete - delete newly created problematic StatefulSet.
+ 3. ignore (default) - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for created/updated StatefulSet to be Ready"
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for created/updated StatefulSet status"
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ Possible options:
+ 1. abort - do nothing, just break the process and wait for admin.
+ 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
+ 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ host:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude: &TypeStringBool
+ type: string
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be excluded from a ClickHouse cluster"
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ annotation:
+ type: object
+ description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
+ properties:
+ include:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
+ include annotations with names from the following list
+ items:
+ type: string
+ exclude:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
+ exclude annotations with names from the following list
+ items:
+ type: string
+ label:
+ type: object
+ description: "defines which metadata.labels will include or exclude during render StatefulSet, Pod, PVC resources"
+ properties:
+ include:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
+ include labels from the following list
+ items:
+ type: string
+ exclude:
+ type: array
+ items:
+ type: string
+ description: |
+ When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
+ exclude labels from the following list
+ appendScope:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether to append *Scope* labels to StatefulSet and Pod
+ - "LabelShardScopeIndex"
+ - "LabelReplicaScopeIndex"
+ - "LabelCHIScopeIndex"
+ - "LabelCHIScopeCycleSize"
+ - "LabelCHIScopeCycleIndex"
+ - "LabelCHIScopeCycleOffset"
+ - "LabelClusterScopeIndex"
+ - "LabelClusterScopeCycleSize"
+ - "LabelClusterScopeCycleIndex"
+ - "LabelClusterScopeCycleOffset"
+ statefulSet:
+ type: object
+ description: "define StatefulSet-specific parameters"
+ properties:
+ revisionHistoryLimit:
+ type: integer
+ description: "revisionHistoryLimit is the maximum number of revisions that will be\nmaintained in the StatefulSet's revision history. \nLook details in `statefulset.spec.revisionHistoryLimit`\n"
+ pod:
+ type: object
+ description: "define pod specific parameters"
+ properties:
+ terminationGracePeriod:
+ type: integer
+ description: "Optional duration in seconds the pod needs to terminate gracefully. \nLook details in `pod.spec.terminationGracePeriodSeconds`\n"
+ logger:
+ type: object
+ description: "allow setup clickhouse-operator logger behavior"
+ properties:
+ logtostderr:
+ type: string
+ description: "boolean, allows logs to stderr"
+ alsologtostderr:
+ type: string
+ description: "boolean allows logs to stderr and files both"
+ v:
+ type: string
+ description: "verbosity level of clickhouse-operator log, default - 1 max - 9"
+ stderrthreshold:
+ type: string
+ vmodule:
+ type: string
+ description: |
+ Comma-separated list of filename=N, where filename (can be a pattern) must have no .go ext, and N is a V level.
+ Ex.: file*=2 sets the 'V' to 2 in all files with names like file*.
+ log_backtrace_at:
+ type: string
+ description: |
+ It can be set to a file and line number with a logging line.
+ Ex.: file.go:123
+ Each time when this line is being executed, a stack trace will be written to the Info log.
diff --git a/dev/generate_helm_chart.sh b/dev/generate_helm_chart.sh
index 309ee2e60..cba8278df 100755
--- a/dev/generate_helm_chart.sh
+++ b/dev/generate_helm_chart.sh
@@ -182,7 +182,7 @@ function update_deployment_resource() {
a_data="${annotations}" yq e -i '.podAnnotations |= env(a_data)' "${values_yaml}"
yq e -i '.spec.template.metadata.annotations = {}' "${file}"
- yq e -i '.spec.template.metadata.labels |= "{{ include \"altinity-clickhouse-operator.selectorLabels\" . | nindent 8 }}"' "${file}"
+ yq e -i '.spec.template.metadata.labels |= "{{ include \"altinity-clickhouse-operator.labels\" . | nindent 8 }}"' "${file}"
yq e -i '.spec.template.metadata.annotations += {"{{ toYaml .Values.podAnnotations | nindent 8 }}": null}' "${file}"
yq e -i '.spec.template.spec.imagePullSecrets |= "{{ toYaml .Values.imagePullSecrets | nindent 8 }}"' "${file}"
yq e -i '.spec.template.spec.serviceAccountName |= "{{ include \"altinity-clickhouse-operator.serviceAccountName\" . }}"' "${file}"
diff --git a/go.mod b/go.mod
index e58148c0a..2a94758ea 100644
--- a/go.mod
+++ b/go.mod
@@ -32,7 +32,6 @@ require (
github.com/go-logr/logr v1.2.4
github.com/golang/glog v1.0.0
github.com/google/uuid v1.4.0
- github.com/gosimple/slug v1.12.0
github.com/imdario/mergo v0.3.15
github.com/juliangruber/go-intersect v1.0.0
github.com/kubernetes-sigs/yaml v1.1.0
@@ -73,7 +72,6 @@ require (
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/gookit/color v1.4.2 // indirect
- github.com/gosimple/unidecode v1.0.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@@ -103,7 +101,7 @@ require (
golang.org/x/tools v0.9.1 // indirect
gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/protobuf v1.31.0 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/component-base v0.27.2 // indirect
diff --git a/go.sum b/go.sum
index 19488b6e7..6a3d0d4f5 100644
--- a/go.sum
+++ b/go.sum
@@ -225,10 +225,6 @@ github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b0
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gosimple/slug v1.12.0 h1:xzuhj7G7cGtd34NXnW/yF0l+AGNfWqwgh/IXgFy7dnc=
-github.com/gosimple/slug v1.12.0/go.mod h1:UiRaFH+GEilHstLUmcBgWcI42viBN7mAb818JrYOeFQ=
-github.com/gosimple/unidecode v1.0.1 h1:hZzFTMMqSswvf0LBJZCZgThIZrpDHFXux9KeGmn6T/o=
-github.com/gosimple/unidecode v1.0.1/go.mod h1:CP0Cr1Y1kogOtx0bJblKzsVWrqYaqfNOnHzpgWw4Awc=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
@@ -804,8 +800,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
-google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go
new file mode 100644
index 000000000..b4d84a316
--- /dev/null
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go
@@ -0,0 +1,109 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+)
+
+// ChkStatus defines status section of ClickHouseKeeper resource
+type ChkStatus struct {
+ CHOpVersion string `json:"chop-version,omitempty" yaml:"chop-version,omitempty"`
+ CHOpCommit string `json:"chop-commit,omitempty" yaml:"chop-commit,omitempty"`
+ CHOpDate string `json:"chop-date,omitempty" yaml:"chop-date,omitempty"`
+ CHOpIP string `json:"chop-ip,omitempty" yaml:"chop-ip,omitempty"`
+
+ Status string `json:"status,omitempty" yaml:"status,omitempty"`
+
+ // Replicas is the number of number of desired replicas in the cluster
+ Replicas int32 `json:"replicas,omitempty"`
+
+ // ReadyReplicas is the number of number of ready replicas in the cluster
+ ReadyReplicas []apiChi.ChiZookeeperNode `json:"readyReplicas,omitempty"`
+
+ Pods []string `json:"pods,omitempty" yaml:"pods,omitempty"`
+ PodIPs []string `json:"pod-ips,omitempty" yaml:"pod-ips,omitempty"`
+ FQDNs []string `json:"fqdns,omitempty" yaml:"fqdns,omitempty"`
+ NormalizedCHK *ClickHouseKeeperInstallation `json:"normalized,omitempty" yaml:"normalized,omitempty"`
+ NormalizedCHKCompleted *ClickHouseKeeperInstallation `json:"normalizedCompleted,omitempty" yaml:"normalizedCompleted,omitempty"`
+}
+
+// CopyFrom copies the state of a given ChiStatus f into the receiver ChiStatus of the call.
+func (s *ChkStatus) CopyFrom(from *ChkStatus, opts apiChi.CopyCHIStatusOptions) {
+ if s == nil || from == nil {
+ return
+ }
+
+ if opts.InheritableFields {
+ }
+
+ if opts.MainFields {
+ s.CHOpVersion = from.CHOpVersion
+ s.CHOpCommit = from.CHOpCommit
+ s.CHOpDate = from.CHOpDate
+ s.CHOpIP = from.CHOpIP
+ s.Status = from.Status
+ s.Replicas = from.Replicas
+ s.ReadyReplicas = from.ReadyReplicas
+ s.Pods = from.Pods
+ s.PodIPs = from.PodIPs
+ s.FQDNs = from.FQDNs
+ s.NormalizedCHK = from.NormalizedCHK
+ }
+
+ if opts.Normalized {
+ s.NormalizedCHK = from.NormalizedCHK
+ }
+
+ if opts.WholeStatus {
+ s.CHOpVersion = from.CHOpVersion
+ s.CHOpCommit = from.CHOpCommit
+ s.CHOpDate = from.CHOpDate
+ s.CHOpIP = from.CHOpIP
+ s.Status = from.Status
+ s.Replicas = from.Replicas
+ s.ReadyReplicas = from.ReadyReplicas
+ s.Pods = from.Pods
+ s.PodIPs = from.PodIPs
+ s.FQDNs = from.FQDNs
+ s.NormalizedCHK = from.NormalizedCHK
+ s.NormalizedCHKCompleted = from.NormalizedCHKCompleted
+ }
+}
+
+// HasNormalizedCHKCompleted is a checker
+func (s *ChkStatus) HasNormalizedCHKCompleted() bool {
+ return s.GetNormalizedCHKCompleted() != nil
+}
+
+// HasNormalizedCHK is a checker
+func (s *ChkStatus) HasNormalizedCHK() bool {
+ return s.GetNormalizedCHK() != nil
+}
+
+// ClearNormalizedCHK clears normalized CHK in status
+func (s *ChkStatus) ClearNormalizedCHK() {
+ s.NormalizedCHK = nil
+}
+
+// GetNormalizedCHK gets target CHK
+func (s *ChkStatus) GetNormalizedCHK() *ClickHouseKeeperInstallation {
+ return s.NormalizedCHK
+}
+
+// GetNormalizedCHKCompleted gets completed CHI
+func (s *ChkStatus) GetNormalizedCHKCompleted() *ClickHouseKeeperInstallation {
+ return s.NormalizedCHKCompleted
+}
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go
index 4b107090d..712b2b5ae 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go
@@ -29,10 +29,16 @@ import (
// ClickHouseKeeperInstallation defines a ClickHouse Keeper ChkCluster
type ClickHouseKeeperInstallation struct {
- meta.TypeMeta `json:",inline" yaml:",inline"`
- meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
- Spec ChkSpec `json:"spec" yaml:"spec"`
- Status *ChkStatus `json:"status,omitempty" yaml:"status,omitempty"`
+ meta.TypeMeta `json:",inline" yaml:",inline"`
+ meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
+
+ Spec ChkSpec `json:"spec" yaml:"spec"`
+ Status *ChkStatus `json:"status,omitempty" yaml:"status,omitempty"`
+
+ Runtime ClickHouseKeeperInstallationRuntime `json:"-" yaml:"-"`
+}
+
+type ClickHouseKeeperInstallationRuntime struct {
statusCreatorMutex sync.Mutex `json:"-" yaml:"-"`
}
@@ -48,8 +54,8 @@ func (chk *ClickHouseKeeperInstallation) EnsureStatus() *ChkStatus {
}
// Otherwise, we need to acquire a lock to initialize the field.
- chk.statusCreatorMutex.Lock()
- defer chk.statusCreatorMutex.Unlock()
+ chk.Runtime.statusCreatorMutex.Lock()
+ defer chk.Runtime.statusCreatorMutex.Unlock()
// Note that we have to check this property again to avoid a TOCTOU bug.
if chk.Status == nil {
chk.Status = &ChkStatus{}
@@ -153,8 +159,8 @@ func (chk *ClickHouseKeeperInstallation) MergeFrom(from *ClickHouseKeeperInstall
// ChkSpec defines spec section of ClickHouseKeeper resource
type ChkSpec struct {
- Configuration *ChkConfiguration `json:"configuration,omitempty" yaml:"configuration,omitempty"`
- Templates *ChkTemplates `json:"templates,omitempty" yaml:"templates,omitempty"`
+ Configuration *ChkConfiguration `json:"configuration,omitempty" yaml:"configuration,omitempty"`
+ Templates *apiChi.ChiTemplates `json:"templates,omitempty" yaml:"templates,omitempty"`
}
func (spec ChkSpec) GetConfiguration() *ChkConfiguration {
@@ -168,7 +174,7 @@ func (spec ChkSpec) EnsureConfiguration() *ChkConfiguration {
return spec.Configuration
}
-func (spec ChkSpec) GetTemplates() *ChkTemplates {
+func (spec ChkSpec) GetTemplates() *apiChi.ChiTemplates {
return spec.Templates
}
@@ -270,242 +276,6 @@ func (c *ChkClusterLayout) GetReplicasCount() int {
return c.ReplicasCount
}
-// ChkTemplates defines templates section of .spec
-type ChkTemplates struct {
- PodTemplates []apiChi.ChiPodTemplate `json:"podTemplates,omitempty" yaml:"podTemplates,omitempty"`
- VolumeClaimTemplates []apiChi.ChiVolumeClaimTemplate `json:"volumeClaimTemplates,omitempty" yaml:"volumeClaimTemplates,omitempty"`
- ServiceTemplates []apiChi.ChiServiceTemplate `json:"serviceTemplates,omitempty" yaml:"serviceTemplates,omitempty"`
-
- // Index maps template name to template itself
- PodTemplatesIndex *apiChi.PodTemplatesIndex `json:",omitempty" yaml:",omitempty" testdiff:"ignore"`
- VolumeClaimTemplatesIndex *apiChi.VolumeClaimTemplatesIndex `json:",omitempty" yaml:",omitempty" testdiff:"ignore"`
- ServiceTemplatesIndex *apiChi.ServiceTemplatesIndex `json:",omitempty" yaml:",omitempty" testdiff:"ignore"`
-}
-
-// NewChkTemplates creates new ChkTemplates object
-func NewChkTemplates() *ChkTemplates {
- return new(ChkTemplates)
-}
-
-// Len returns accumulated len of all templates
-func (templates *ChkTemplates) Len() int {
- if templates == nil {
- return 0
- }
-
- return 0 +
- len(templates.PodTemplates) +
- len(templates.VolumeClaimTemplates) +
- len(templates.ServiceTemplates)
-}
-
-// MergeFrom merges from specified object
-func (templates *ChkTemplates) MergeFrom(from *ChkTemplates, _type apiChi.MergeType) *ChkTemplates {
- if from.Len() == 0 {
- return templates
- }
-
- if templates == nil {
- templates = NewChkTemplates()
- }
-
- // Merge sections
-
- templates.mergePodTemplates(from)
- templates.mergeVolumeClaimTemplates(from)
- templates.mergeServiceTemplates(from)
-
- return templates
-}
-
-// mergePodTemplates merges pod templates section
-func (templates *ChkTemplates) mergePodTemplates(from *ChkTemplates) {
- if len(from.PodTemplates) == 0 {
- return
- }
-
- // We have templates to merge from
- // Loop over all 'from' templates and either copy it in case no such template in receiver or merge it
- for fromIndex := range from.PodTemplates {
- fromTemplate := &from.PodTemplates[fromIndex]
-
- // Try to find entry with the same name among local templates in receiver
- sameNameFound := false
- for toIndex := range templates.PodTemplates {
- toTemplate := &templates.PodTemplates[toIndex]
- if toTemplate.Name == fromTemplate.Name {
- // Receiver already have such a template
- sameNameFound = true
-
- //toSpec := &toTemplate.Spec
- //fromSpec := &fromTemplate.Spec
- //_ = mergo.Merge(toSpec, *fromSpec, mergo.WithGrowSlice, mergo.WithOverride, mergo.WithOverrideEmptySlice)
-
- // Merge `to` template with `from` template
- _ = mergo.Merge(toTemplate, *fromTemplate, mergo.WithSliceDeepMerge)
- // Receiver `to` template is processed
- break
- }
- }
-
- if !sameNameFound {
- // Receiver does not have template with such a name
- // Append template from `from`
- templates.PodTemplates = append(templates.PodTemplates, *fromTemplate.DeepCopy())
- }
- }
-}
-
-// mergeVolumeClaimTemplates merges volume claim templates section
-func (templates *ChkTemplates) mergeVolumeClaimTemplates(from *ChkTemplates) {
- if len(from.VolumeClaimTemplates) == 0 {
- return
- }
-
- // We have templates to merge from
- // Loop over all 'from' templates and either copy it in case no such template in receiver or merge it
- for fromIndex := range from.VolumeClaimTemplates {
- fromTemplate := &from.VolumeClaimTemplates[fromIndex]
-
- // Try to find entry with the same name among local templates in receiver
- sameNameFound := false
- for toIndex := range templates.VolumeClaimTemplates {
- toTemplate := &templates.VolumeClaimTemplates[toIndex]
- if toTemplate.Name == fromTemplate.Name {
- // Receiver already have such a template
- sameNameFound = true
- // Merge `to` template with `from` template
- _ = mergo.Merge(toTemplate, *fromTemplate, mergo.WithSliceDeepMerge)
- // Receiver `to` template is processed
- break
- }
- }
-
- if !sameNameFound {
- // Receiver does not have template with such a name
- // Append template from `from`
- templates.VolumeClaimTemplates = append(templates.VolumeClaimTemplates, *fromTemplate.DeepCopy())
- }
- }
-}
-
-// mergeServiceTemplates merges service templates section
-func (templates *ChkTemplates) mergeServiceTemplates(from *ChkTemplates) {
- if len(from.ServiceTemplates) == 0 {
- return
- }
-
- // We have templates to merge from
- // Loop over all 'from' templates and either copy it in case no such template in receiver or merge it
- for fromIndex := range from.ServiceTemplates {
- fromTemplate := &from.ServiceTemplates[fromIndex]
-
- // Try to find entry with the same name among local templates in receiver
- sameNameFound := false
- for toIndex := range templates.ServiceTemplates {
- toTemplate := &templates.ServiceTemplates[toIndex]
- if toTemplate.Name == fromTemplate.Name {
- // Receiver already have such a template
- sameNameFound = true
- // Merge `to` template with `from` template
- _ = mergo.Merge(toTemplate, *fromTemplate, mergo.WithSliceDeepCopy)
- // Receiver `to` template is processed
- break
- }
- }
-
- if !sameNameFound {
- // Receiver does not have template with such a name
- // Append template from `from`
- templates.ServiceTemplates = append(templates.ServiceTemplates, *fromTemplate.DeepCopy())
- }
- }
-}
-
-// GetPodTemplatesIndex returns index of pod templates
-func (templates *ChkTemplates) GetPodTemplatesIndex() *apiChi.PodTemplatesIndex {
- if templates == nil {
- return nil
- }
- return templates.PodTemplatesIndex
-}
-
-// EnsurePodTemplatesIndex ensures index exists
-func (templates *ChkTemplates) EnsurePodTemplatesIndex() *apiChi.PodTemplatesIndex {
- if templates == nil {
- return nil
- }
- if templates.PodTemplatesIndex != nil {
- return templates.PodTemplatesIndex
- }
- templates.PodTemplatesIndex = apiChi.NewPodTemplatesIndex()
- return templates.PodTemplatesIndex
-}
-
-// GetVolumeClaimTemplatesIndex returns index of VolumeClaim templates
-func (templates *ChkTemplates) GetVolumeClaimTemplatesIndex() *apiChi.VolumeClaimTemplatesIndex {
- if templates == nil {
- return nil
- }
- return templates.VolumeClaimTemplatesIndex
-}
-
-// EnsureVolumeClaimTemplatesIndex ensures index exists
-func (templates *ChkTemplates) EnsureVolumeClaimTemplatesIndex() *apiChi.VolumeClaimTemplatesIndex {
- if templates == nil {
- return nil
- }
- if templates.VolumeClaimTemplatesIndex != nil {
- return templates.VolumeClaimTemplatesIndex
- }
- templates.VolumeClaimTemplatesIndex = apiChi.NewVolumeClaimTemplatesIndex()
- return templates.VolumeClaimTemplatesIndex
-}
-
-// GetServiceTemplatesIndex returns index of Service templates
-func (templates *ChkTemplates) GetServiceTemplatesIndex() *apiChi.ServiceTemplatesIndex {
- if templates == nil {
- return nil
- }
- return templates.ServiceTemplatesIndex
-}
-
-// EnsureServiceTemplatesIndex ensures index exists
-func (templates *ChkTemplates) EnsureServiceTemplatesIndex() *apiChi.ServiceTemplatesIndex {
- if templates == nil {
- return nil
- }
- if templates.ServiceTemplatesIndex != nil {
- return templates.ServiceTemplatesIndex
- }
- templates.ServiceTemplatesIndex = apiChi.NewServiceTemplatesIndex()
- return templates.ServiceTemplatesIndex
-}
-
-func (t *ChkTemplates) GetPodTemplates() []apiChi.ChiPodTemplate {
- if t == nil {
- return nil
- }
-
- return t.PodTemplates
-}
-
-func (t *ChkTemplates) GetVolumeClaimTemplates() []apiChi.ChiVolumeClaimTemplate {
- if t == nil {
- return nil
- }
-
- return t.VolumeClaimTemplates
-}
-
-func (t *ChkTemplates) GetServiceTemplates() []apiChi.ChiServiceTemplate {
- if t == nil {
- return nil
- }
-
- return t.ServiceTemplates
-}
-
func (spec *ChkSpec) GetPath() string {
switch {
case spec.GetConfiguration().GetSettings().Has("keeper_server/storage_path"):
@@ -541,96 +311,6 @@ func (spec *ChkSpec) GetPrometheusPort() int {
return spec.GetPort("prometheus/port", -1)
}
-// ChkStatus defines status section of ClickHouseKeeper resource
-type ChkStatus struct {
- CHOpVersion string `json:"chop-version,omitempty" yaml:"chop-version,omitempty"`
- CHOpCommit string `json:"chop-commit,omitempty" yaml:"chop-commit,omitempty"`
- CHOpDate string `json:"chop-date,omitempty" yaml:"chop-date,omitempty"`
- CHOpIP string `json:"chop-ip,omitempty" yaml:"chop-ip,omitempty"`
-
- Status string `json:"status,omitempty" yaml:"status,omitempty"`
-
- // Replicas is the number of number of desired replicas in the cluster
- Replicas int32 `json:"replicas,omitempty"`
-
- // ReadyReplicas is the number of number of ready replicas in the cluster
- ReadyReplicas []apiChi.ChiZookeeperNode `json:"readyReplicas,omitempty"`
-
- Pods []string `json:"pods,omitempty" yaml:"pods,omitempty"`
- PodIPs []string `json:"pod-ips,omitempty" yaml:"pod-ips,omitempty"`
- FQDNs []string `json:"fqdns,omitempty" yaml:"fqdns,omitempty"`
- NormalizedCHK *ClickHouseKeeperInstallation `json:"normalized,omitempty" yaml:"normalized,omitempty"`
- NormalizedCHKCompleted *ClickHouseKeeperInstallation `json:"normalizedCompleted,omitempty" yaml:"normalizedCompleted,omitempty"`
-}
-
-// CopyFrom copies the state of a given ChiStatus f into the receiver ChiStatus of the call.
-func (s *ChkStatus) CopyFrom(from *ChkStatus, opts apiChi.CopyCHIStatusOptions) {
- if s == nil || from == nil {
- return
- }
-
- if opts.InheritableFields {
- }
-
- if opts.MainFields {
- s.CHOpVersion = from.CHOpVersion
- s.CHOpCommit = from.CHOpCommit
- s.CHOpDate = from.CHOpDate
- s.CHOpIP = from.CHOpIP
- s.Status = from.Status
- s.Replicas = from.Replicas
- s.ReadyReplicas = from.ReadyReplicas
- s.Pods = from.Pods
- s.PodIPs = from.PodIPs
- s.FQDNs = from.FQDNs
- s.NormalizedCHK = from.NormalizedCHK
- }
-
- if opts.Normalized {
- s.NormalizedCHK = from.NormalizedCHK
- }
-
- if opts.WholeStatus {
- s.CHOpVersion = from.CHOpVersion
- s.CHOpCommit = from.CHOpCommit
- s.CHOpDate = from.CHOpDate
- s.CHOpIP = from.CHOpIP
- s.Status = from.Status
- s.Replicas = from.Replicas
- s.ReadyReplicas = from.ReadyReplicas
- s.Pods = from.Pods
- s.PodIPs = from.PodIPs
- s.FQDNs = from.FQDNs
- s.NormalizedCHK = from.NormalizedCHK
- s.NormalizedCHKCompleted = from.NormalizedCHKCompleted
- }
-}
-
-// HasNormalizedCHKCompleted is a checker
-func (s *ChkStatus) HasNormalizedCHKCompleted() bool {
- return s.GetNormalizedCHKCompleted() != nil
-}
-
-// HasNormalizedCHK is a checker
-func (s *ChkStatus) HasNormalizedCHK() bool {
- return s.GetNormalizedCHK() != nil
-}
-
-// ClearNormalizedCHK clears normalized CHK in status
-func (s *ChkStatus) ClearNormalizedCHK() {
- s.NormalizedCHK = nil
-}
-
-// GetNormalizedCHK gets target CHK
-func (s *ChkStatus) GetNormalizedCHK() *ClickHouseKeeperInstallation {
- return s.NormalizedCHK
-}
-
-// GetNormalizedCHKCompleted gets completed CHI
-func (s *ChkStatus) GetNormalizedCHKCompleted() *ClickHouseKeeperInstallation {
- return s.NormalizedCHKCompleted
-}
-
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClickHouseKeeperList defines a list of ClickHouseKeeper resources
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go
index 667e0c26d..420f808ec 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go
@@ -105,7 +105,7 @@ func (in *ChkSpec) DeepCopyInto(out *ChkSpec) {
}
if in.Templates != nil {
in, out := &in.Templates, &out.Templates
- *out = new(ChkTemplates)
+ *out = new(clickhousealtinitycomv1.ChiTemplates)
(*in).DeepCopyInto(*out)
}
return
@@ -169,58 +169,6 @@ func (in *ChkStatus) DeepCopy() *ChkStatus {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChkTemplates) DeepCopyInto(out *ChkTemplates) {
- *out = *in
- if in.PodTemplates != nil {
- in, out := &in.PodTemplates, &out.PodTemplates
- *out = make([]clickhousealtinitycomv1.ChiPodTemplate, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.VolumeClaimTemplates != nil {
- in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
- *out = make([]clickhousealtinitycomv1.ChiVolumeClaimTemplate, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.ServiceTemplates != nil {
- in, out := &in.ServiceTemplates, &out.ServiceTemplates
- *out = make([]clickhousealtinitycomv1.ChiServiceTemplate, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.PodTemplatesIndex != nil {
- in, out := &in.PodTemplatesIndex, &out.PodTemplatesIndex
- *out = new(clickhousealtinitycomv1.PodTemplatesIndex)
- (*in).DeepCopyInto(*out)
- }
- if in.VolumeClaimTemplatesIndex != nil {
- in, out := &in.VolumeClaimTemplatesIndex, &out.VolumeClaimTemplatesIndex
- *out = new(clickhousealtinitycomv1.VolumeClaimTemplatesIndex)
- (*in).DeepCopyInto(*out)
- }
- if in.ServiceTemplatesIndex != nil {
- in, out := &in.ServiceTemplatesIndex, &out.ServiceTemplatesIndex
- *out = new(clickhousealtinitycomv1.ServiceTemplatesIndex)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkTemplates.
-func (in *ChkTemplates) DeepCopy() *ChkTemplates {
- if in == nil {
- return nil
- }
- out := new(ChkTemplates)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClickHouseKeeperInstallation) DeepCopyInto(out *ClickHouseKeeperInstallation) {
*out = *in
@@ -232,7 +180,7 @@ func (in *ClickHouseKeeperInstallation) DeepCopyInto(out *ClickHouseKeeperInstal
*out = new(ChkStatus)
(*in).DeepCopyInto(*out)
}
- out.statusCreatorMutex = in.statusCreatorMutex
+ out.Runtime = in.Runtime
return
}
@@ -286,3 +234,20 @@ func (in *ClickHouseKeeperInstallationList) DeepCopyObject() runtime.Object {
}
return nil
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClickHouseKeeperInstallationRuntime) DeepCopyInto(out *ClickHouseKeeperInstallationRuntime) {
+ *out = *in
+ out.statusCreatorMutex = in.statusCreatorMutex
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickHouseKeeperInstallationRuntime.
+func (in *ClickHouseKeeperInstallationRuntime) DeepCopy() *ClickHouseKeeperInstallationRuntime {
+ if in == nil {
+ return nil
+ }
+ out := new(ClickHouseKeeperInstallationRuntime)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
index 520c70452..ecfd9cb27 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
@@ -113,46 +113,46 @@ func (chi *ClickHouseInstallation) FillSelfCalculatedAddressInfo() {
host *ChiHost,
address *HostAddress,
) error {
- cluster.Address.Namespace = chi.Namespace
- cluster.Address.CHIName = chi.Name
- cluster.Address.ClusterName = cluster.Name
- cluster.Address.ClusterIndex = address.ClusterIndex
-
- shard.Address.Namespace = chi.Namespace
- shard.Address.CHIName = chi.Name
- shard.Address.ClusterName = cluster.Name
- shard.Address.ClusterIndex = address.ClusterIndex
- shard.Address.ShardName = shard.Name
- shard.Address.ShardIndex = address.ShardIndex
-
- replica.Address.Namespace = chi.Namespace
- replica.Address.CHIName = chi.Name
- replica.Address.ClusterName = cluster.Name
- replica.Address.ClusterIndex = address.ClusterIndex
- replica.Address.ReplicaName = replica.Name
- replica.Address.ReplicaIndex = address.ReplicaIndex
-
- host.Address.Namespace = chi.Namespace
+ cluster.Runtime.Address.Namespace = chi.Namespace
+ cluster.Runtime.Address.CHIName = chi.Name
+ cluster.Runtime.Address.ClusterName = cluster.Name
+ cluster.Runtime.Address.ClusterIndex = address.ClusterIndex
+
+ shard.Runtime.Address.Namespace = chi.Namespace
+ shard.Runtime.Address.CHIName = chi.Name
+ shard.Runtime.Address.ClusterName = cluster.Name
+ shard.Runtime.Address.ClusterIndex = address.ClusterIndex
+ shard.Runtime.Address.ShardName = shard.Name
+ shard.Runtime.Address.ShardIndex = address.ShardIndex
+
+ replica.Runtime.Address.Namespace = chi.Namespace
+ replica.Runtime.Address.CHIName = chi.Name
+ replica.Runtime.Address.ClusterName = cluster.Name
+ replica.Runtime.Address.ClusterIndex = address.ClusterIndex
+ replica.Runtime.Address.ReplicaName = replica.Name
+ replica.Runtime.Address.ReplicaIndex = address.ReplicaIndex
+
+ host.Runtime.Address.Namespace = chi.Namespace
// Skip StatefulSet as impossible to self-calculate
// host.Address.StatefulSet = CreateStatefulSetName(host)
- host.Address.CHIName = chi.Name
- host.Address.ClusterName = cluster.Name
- host.Address.ClusterIndex = address.ClusterIndex
- host.Address.ShardName = shard.Name
- host.Address.ShardIndex = address.ShardIndex
- host.Address.ReplicaName = replica.Name
- host.Address.ReplicaIndex = address.ReplicaIndex
- host.Address.HostName = host.Name
- host.Address.CHIScopeIndex = address.CHIScopeAddress.Index
- host.Address.CHIScopeCycleSize = address.CHIScopeAddress.CycleSpec.Size
- host.Address.CHIScopeCycleIndex = address.CHIScopeAddress.CycleAddress.CycleIndex
- host.Address.CHIScopeCycleOffset = address.CHIScopeAddress.CycleAddress.Index
- host.Address.ClusterScopeIndex = address.ClusterScopeAddress.Index
- host.Address.ClusterScopeCycleSize = address.ClusterScopeAddress.CycleSpec.Size
- host.Address.ClusterScopeCycleIndex = address.ClusterScopeAddress.CycleAddress.CycleIndex
- host.Address.ClusterScopeCycleOffset = address.ClusterScopeAddress.CycleAddress.Index
- host.Address.ShardScopeIndex = address.ReplicaIndex
- host.Address.ReplicaScopeIndex = address.ShardIndex
+ host.Runtime.Address.CHIName = chi.Name
+ host.Runtime.Address.ClusterName = cluster.Name
+ host.Runtime.Address.ClusterIndex = address.ClusterIndex
+ host.Runtime.Address.ShardName = shard.Name
+ host.Runtime.Address.ShardIndex = address.ShardIndex
+ host.Runtime.Address.ReplicaName = replica.Name
+ host.Runtime.Address.ReplicaIndex = address.ReplicaIndex
+ host.Runtime.Address.HostName = host.Name
+ host.Runtime.Address.CHIScopeIndex = address.CHIScopeAddress.Index
+ host.Runtime.Address.CHIScopeCycleSize = address.CHIScopeAddress.CycleSpec.Size
+ host.Runtime.Address.CHIScopeCycleIndex = address.CHIScopeAddress.CycleAddress.CycleIndex
+ host.Runtime.Address.CHIScopeCycleOffset = address.CHIScopeAddress.CycleAddress.Index
+ host.Runtime.Address.ClusterScopeIndex = address.ClusterScopeAddress.Index
+ host.Runtime.Address.ClusterScopeCycleSize = address.ClusterScopeAddress.CycleSpec.Size
+ host.Runtime.Address.ClusterScopeCycleIndex = address.ClusterScopeAddress.CycleAddress.CycleIndex
+ host.Runtime.Address.ClusterScopeCycleOffset = address.ClusterScopeAddress.CycleAddress.Index
+ host.Runtime.Address.ShardScopeIndex = address.ReplicaIndex
+ host.Runtime.Address.ReplicaScopeIndex = address.ShardIndex
return nil
},
@@ -170,10 +170,10 @@ func (chi *ClickHouseInstallation) FillCHIPointer() {
host *ChiHost,
address *HostAddress,
) error {
- cluster.CHI = chi
- shard.CHI = chi
- replica.CHI = chi
- host.CHI = chi
+ cluster.Runtime.CHI = chi
+ shard.Runtime.CHI = chi
+ replica.Runtime.CHI = chi
+ host.Runtime.CHI = chi
return nil
},
)
@@ -346,7 +346,7 @@ func (chi *ClickHouseInstallation) MergeFrom(from *ClickHouseInstallation, _type
(&chi.Spec).MergeFrom(&from.Spec, _type)
// Copy service attributes
- chi.Attributes = from.Attributes
+ chi.EnsureRuntime().attributes = from.EnsureRuntime().attributes
chi.EnsureStatus().CopyFrom(from.Status, CopyCHIStatusOptions{
InheritableFields: true,
@@ -707,6 +707,26 @@ func (chi *ClickHouseInstallation) YAML(opts CopyCHIOptions) string {
return string(yamlBytes)
}
+func (chi *ClickHouseInstallation) EnsureRuntime() *ClickHouseInstallationRuntime {
+ if chi == nil {
+ return nil
+ }
+
+ // Assume that most of the time, we'll see a non-nil value.
+ if chi.runtime != nil {
+ return chi.runtime
+ }
+
+ // Otherwise, we need to acquire a lock to initialize the field.
+ chi.runtimeCreatorMutex.Lock()
+ defer chi.runtimeCreatorMutex.Unlock()
+ // Note that we have to check this property again to avoid a TOCTOU bug.
+ if chi.runtime == nil {
+ chi.runtime = &ClickHouseInstallationRuntime{}
+ }
+ return chi.runtime
+}
+
// EnsureStatus ensures status
func (chi *ClickHouseInstallation) EnsureStatus() *ChiStatus {
if chi == nil {
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
index f33ef92b7..0ad29983b 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
@@ -27,7 +27,10 @@ type Cluster struct {
Secret *ClusterSecret `json:"secret,omitempty" yaml:"secret,omitempty"`
Layout *ChiClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"`
- // Internal data
+ Runtime ClusterRuntime `json:"-" yaml:"-"`
+}
+
+type ClusterRuntime struct {
Address ChiClusterAddress `json:"-" yaml:"-"`
CHI *ClickHouseInstallation `json:"-" yaml:"-" testdiff:"ignore"`
}
@@ -157,12 +160,12 @@ func (cluster *Cluster) GetServiceTemplate() (*ChiServiceTemplate, bool) {
return nil, false
}
name := cluster.Templates.GetClusterServiceTemplate()
- return cluster.CHI.GetServiceTemplate(name)
+ return cluster.Runtime.CHI.GetServiceTemplate(name)
}
// GetCHI gets parent CHI
func (cluster *Cluster) GetCHI() *ClickHouseInstallation {
- return cluster.CHI
+ return cluster.Runtime.CHI
}
// GetShard gets shard with specified index
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go b/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go
index 27fa8b8f4..c3a0ce3b8 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go
@@ -594,33 +594,33 @@ func (c *OperatorConfig) unlistCHITemplate(template *ClickHouseInstallation) {
}
// FindTemplate finds specified template within possibly specified namespace
-func (c *OperatorConfig) FindTemplate(use *ChiUseTemplate, fallbackNamespace string) *ClickHouseInstallation {
+func (c *OperatorConfig) FindTemplate(templateRef *ChiTemplateRef, fallbackNamespace string) *ClickHouseInstallation {
c.Template.CHI.Runtime.mutex.RLock()
defer c.Template.CHI.Runtime.mutex.RUnlock()
// Try to find direct match
- for _, _template := range c.Template.CHI.Runtime.Templates {
- if _template.MatchFullName(use.Namespace, use.Name) {
- // Direct match, found result
- return _template
+ for _, template := range c.Template.CHI.Runtime.Templates {
+ if template.MatchFullName(templateRef.Namespace, templateRef.Name) {
+ // Exact match, found the result
+ return template
}
}
- // Direct match is not possible.
- // Let's try to find by name only
+ // Exact match is not possible.
+ // Let's try to find by name only in "predefined" namespace
- if use.Namespace != "" {
- // With fully-specified template namespace+name pair direct (full name) only match is applicable
+ if templateRef.Namespace != "" {
+ // With fully-specified template namespace+name pair exact match is applicable only
// This is strange situation, however
return nil
}
- // Look for templates with specified name in explicitly specified namespace
+ // Look for templates with specified name in "predefined" namespace
- for _, _template := range c.Template.CHI.Runtime.Templates {
- if _template.MatchFullName(fallbackNamespace, use.Name) {
- // Found template with searched name in specified namespace
- return _template
+ for _, template := range c.Template.CHI.Runtime.Templates {
+ if template.MatchFullName(fallbackNamespace, templateRef.Name) {
+ // Found template with searched name in "predefined" namespace
+ return template
}
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_host.go b/pkg/apis/clickhouse.altinity.com/v1/type_host.go
index f13b85ab1..c2d3ec7d3 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_host.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_host.go
@@ -15,15 +15,17 @@
package v1
import (
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
+ apps "k8s.io/api/apps/v1"
+ core "k8s.io/api/core/v1"
+
+ "github.com/altinity/clickhouse-operator/pkg/apis/swversion"
)
// ChiHost defines host (a data replica within a shard) of .spec.configuration.clusters[n].shards[m]
type ChiHost struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
// DEPRECATED - to be removed soon
- Port int32 `json:"port,omitempty" yaml:"port,omitempty"`
+ Port int32 `json:"port,omitempty" yaml:"port,omitempty"`
Insecure *StringBool `json:"insecure,omitempty" yaml:"insecure,omitempty"`
Secure *StringBool `json:"secure,omitempty" yaml:"secure,omitempty"`
@@ -36,15 +38,19 @@ type ChiHost struct {
Files *Settings `json:"files,omitempty" yaml:"files,omitempty"`
Templates *ChiTemplateNames `json:"templates,omitempty" yaml:"templates,omitempty"`
+ Runtime ChiHostRuntime `json:"-" yaml:"-"`
+}
+
+type ChiHostRuntime struct {
// Internal data
Address ChiHostAddress `json:"-" yaml:"-"`
Config ChiHostConfig `json:"-" yaml:"-"`
- Version *CHVersion `json:"-" yaml:"-"`
+ Version *swversion.SoftWareVersion `json:"-" yaml:"-"`
reconcileAttributes *ChiHostReconcileAttributes `json:"-" yaml:"-" testdiff:"ignore"`
// CurStatefulSet is a current stateful set, fetched from k8s
- CurStatefulSet *appsv1.StatefulSet `json:"-" yaml:"-" testdiff:"ignore"`
+ CurStatefulSet *apps.StatefulSet `json:"-" yaml:"-" testdiff:"ignore"`
// DesiredStatefulSet is a desired stateful set - reconcile target
- DesiredStatefulSet *appsv1.StatefulSet `json:"-" yaml:"-" testdiff:"ignore"`
+ DesiredStatefulSet *apps.StatefulSet `json:"-" yaml:"-" testdiff:"ignore"`
CHI *ClickHouseInstallation `json:"-" yaml:"-" testdiff:"ignore"`
}
@@ -53,10 +59,10 @@ func (host *ChiHost) GetReconcileAttributes() *ChiHostReconcileAttributes {
if host == nil {
return nil
}
- if host.reconcileAttributes == nil {
- host.reconcileAttributes = NewChiHostReconcileAttributes()
+ if host.Runtime.reconcileAttributes == nil {
+ host.Runtime.reconcileAttributes = NewChiHostReconcileAttributes()
}
- return host.reconcileAttributes
+ return host.Runtime.reconcileAttributes
}
// InheritSettingsFrom inherits settings from specified shard and replica
@@ -107,9 +113,6 @@ func (host *ChiHost) MergeFrom(from *ChiHost) {
if (host == nil) || (from == nil) {
return
}
- if isUnassigned(host.Port) {
- host.Port = from.Port
- }
host.Insecure = host.Insecure.MergeFrom(from.Insecure)
host.Secure = host.Secure.MergeFrom(from.Secure)
@@ -138,7 +141,7 @@ func (host *ChiHost) GetHostTemplate() (*ChiHostTemplate, bool) {
return nil, false
}
name := host.Templates.GetHostTemplate()
- return host.CHI.GetHostTemplate(name)
+ return host.Runtime.CHI.GetHostTemplate(name)
}
// GetPodTemplate gets pod template
@@ -147,7 +150,7 @@ func (host *ChiHost) GetPodTemplate() (*ChiPodTemplate, bool) {
return nil, false
}
name := host.Templates.GetPodTemplate()
- return host.CHI.GetPodTemplate(name)
+ return host.Runtime.CHI.GetPodTemplate(name)
}
// GetServiceTemplate gets service template
@@ -156,7 +159,7 @@ func (host *ChiHost) GetServiceTemplate() (*ChiServiceTemplate, bool) {
return nil, false
}
name := host.Templates.GetReplicaServiceTemplate()
- return host.CHI.GetServiceTemplate(name)
+ return host.Runtime.CHI.GetServiceTemplate(name)
}
// GetStatefulSetReplicasNum gets stateful set replica num
@@ -165,7 +168,7 @@ func (host *ChiHost) GetStatefulSetReplicasNum(shutdown bool) *int32 {
switch {
case shutdown:
num = 0
- case host.CHI.IsStopped():
+ case host.IsStopped():
num = 0
default:
num = 1
@@ -197,7 +200,7 @@ func (host *ChiHost) GetCHI() *ClickHouseInstallation {
if host == nil {
return nil
}
- return host.CHI
+ return host.Runtime.CHI
}
// HasCHI checks whether host has CHI
@@ -208,18 +211,22 @@ func (host *ChiHost) HasCHI() bool {
// GetCluster gets cluster
func (host *ChiHost) GetCluster() *Cluster {
// Host has to have filled Address
- return host.GetCHI().FindCluster(host.Address.ClusterName)
+ return host.GetCHI().FindCluster(host.Runtime.Address.ClusterName)
}
// GetShard gets shard
func (host *ChiHost) GetShard() *ChiShard {
// Host has to have filled Address
- return host.GetCHI().FindShard(host.Address.ClusterName, host.Address.ShardName)
+ return host.GetCHI().FindShard(host.Runtime.Address.ClusterName, host.Runtime.Address.ShardName)
}
// GetAncestor gets ancestor of a host
func (host *ChiHost) GetAncestor() *ChiHost {
- return host.GetCHI().GetAncestor().FindHost(host.Address.ClusterName, host.Address.ShardName, host.Address.HostName)
+ return host.GetCHI().GetAncestor().FindHost(
+ host.Runtime.Address.ClusterName,
+ host.Runtime.Address.ShardName,
+ host.Runtime.Address.HostName,
+ )
}
// HasAncestor checks whether host has an ancestor
@@ -274,23 +281,23 @@ func (w WhichStatefulSet) DesiredStatefulSet() bool {
}
// WalkVolumeMounts walks VolumeMount(s)
-func (host *ChiHost) WalkVolumeMounts(which WhichStatefulSet, f func(volumeMount *corev1.VolumeMount)) {
+func (host *ChiHost) WalkVolumeMounts(which WhichStatefulSet, f func(volumeMount *core.VolumeMount)) {
if host == nil {
return
}
- var sts *appsv1.StatefulSet
+ var sts *apps.StatefulSet
switch {
case which.DesiredStatefulSet():
if !host.HasDesiredStatefulSet() {
return
}
- sts = host.DesiredStatefulSet
+ sts = host.Runtime.DesiredStatefulSet
case which.CurStatefulSet():
if !host.HasCurStatefulSet() {
return
}
- sts = host.CurStatefulSet
+ sts = host.Runtime.CurStatefulSet
default:
return
}
@@ -363,7 +370,7 @@ func (host *ChiHost) IsFirst() bool {
return false
}
- return host.Address.CHIScopeIndex == 0
+ return host.Runtime.Address.CHIScopeIndex == 0
}
// HasCurStatefulSet checks whether host has CurStatefulSet
@@ -372,7 +379,7 @@ func (host *ChiHost) HasCurStatefulSet() bool {
return false
}
- return host.CurStatefulSet != nil
+ return host.Runtime.CurStatefulSet != nil
}
// HasDesiredStatefulSet checks whether host has DesiredStatefulSet
@@ -381,5 +388,5 @@ func (host *ChiHost) HasDesiredStatefulSet() bool {
return false
}
- return host.DesiredStatefulSet != nil
+ return host.Runtime.DesiredStatefulSet != nil
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_host_address.go b/pkg/apis/clickhouse.altinity.com/v1/type_host_address.go
index 97d9b9e6f..22bffe916 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_host_address.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_host_address.go
@@ -18,32 +18,33 @@ import "fmt"
// ChiHostAddress defines address of a host within ClickHouseInstallation
type ChiHostAddress struct {
- Namespace string `json:"namespace" yaml:"namespace"`
- StatefulSet string `json:"statefulSet" yaml:"statefulSet"`
- FQDN string `json:"fqdn" yaml:"fqdn"`
- CHIName string `json:"chiName" yaml:"chiName"`
- ClusterName string `json:"clusterName" yaml:"clusterName"`
- ClusterIndex int `json:"clusterIndex" yaml:"clusterIndex"`
- ShardName string `json:"shardName" yaml:"shardName"`
- ShardIndex int `json:"shardIndex" yaml:"shardIndex"`
- ShardScopeIndex int `json:"shardScopeIndex" yaml:"shardScopeIndex"`
- ReplicaName string `json:"replicaName" yaml:"replicaName"`
- ReplicaIndex int `json:"replicaIndex" yaml:"replicaIndex"`
- ReplicaScopeIndex int `json:"replicaScopeIndex" yaml:"replicaScopeIndex"`
- HostName string `json:"hostName" yaml:"hostName"`
- CHIScopeIndex int `json:"chiScopeIndex" yaml:"chiScopeIndex"`
- CHIScopeCycleSize int `json:"chiScopeCycleSize" yaml:"chiScopeCycleSize"`
- CHIScopeCycleIndex int `json:"chiScopeCycleIndex" yaml:"chiScopeCycleIndex"`
- CHIScopeCycleOffset int `json:"chiScopeCycleOffset" yaml:"chiScopeCycleOffset"`
- ClusterScopeIndex int `json:"clusterScopeIndex" yaml:"clusterScopeIndex"`
- ClusterScopeCycleSize int `json:"clusterScopeCycleSize" yaml:"clusterScopeCycleSize"`
- ClusterScopeCycleIndex int `json:"clusterScopeCycleIndex" yaml:"clusterScopeCycleIndex"`
- ClusterScopeCycleOffset int `json:"clusterScopeCycleOffset" yaml:"clusterScopeCycleOffset"`
+ Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
+ StatefulSet string `json:"statefulSet,omitempty" yaml:"statefulSet,omitempty"`
+ FQDN string `json:"fqdn,omitempty" yaml:"fqdn,omitempty"`
+ CHIName string `json:"chiName,omitempty" yaml:"chiName,omitempty"`
+ ClusterName string `json:"clusterName,omitempty" yaml:"clusterName,omitempty"`
+ ClusterIndex int `json:"clusterIndex,omitempty" yaml:"clusterIndex,omitempty"`
+ ShardName string `json:"shardName,omitempty" yaml:"shardName,omitempty"`
+ ShardIndex int `json:"shardIndex,omitempty" yaml:"shardIndex,omitempty"`
+ ShardScopeIndex int `json:"shardScopeIndex,omitempty" yaml:"shardScopeIndex,omitempty"`
+ ReplicaName string `json:"replicaName,omitempty" yaml:"replicaName,omitempty"`
+ ReplicaIndex int `json:"replicaIndex,omitempty" yaml:"replicaIndex,omitempty"`
+ ReplicaScopeIndex int `json:"replicaScopeIndex,omitempty" yaml:"replicaScopeIndex,omitempty"`
+ HostName string `json:"hostName,omitempty" yaml:"hostName,omitempty"`
+ CHIScopeIndex int `json:"chiScopeIndex,omitempty" yaml:"chiScopeIndex,omitempty"`
+ CHIScopeCycleSize int `json:"chiScopeCycleSize,omitempty" yaml:"chiScopeCycleSize,omitempty"`
+ CHIScopeCycleIndex int `json:"chiScopeCycleIndex,omitempty" yaml:"chiScopeCycleIndex,omitempty"`
+ CHIScopeCycleOffset int `json:"chiScopeCycleOffset,omitempty" yaml:"chiScopeCycleOffset,omitempty"`
+ ClusterScopeIndex int `json:"clusterScopeIndex,omitempty" yaml:"clusterScopeIndex,omitempty"`
+ ClusterScopeCycleSize int `json:"clusterScopeCycleSize,omitempty" yaml:"clusterScopeCycleSize,omitempty"`
+ ClusterScopeCycleIndex int `json:"clusterScopeCycleIndex,omitempty" yaml:"clusterScopeCycleIndex,omitempty"`
+ ClusterScopeCycleOffset int `json:"clusterScopeCycleOffset,omitempty" yaml:"clusterScopeCycleOffset,omitempty"`
}
// CompactString creates compact string representation
func (a ChiHostAddress) CompactString() string {
- return fmt.Sprintf("ns:%s|chi:%s|clu:%s|sha:%s|rep:%s|host:%s", a.Namespace, a.CHIName, a.ClusterName, a.ShardName, a.ReplicaName, a.HostName)
+ return fmt.Sprintf("ns:%s|chi:%s|clu:%s|sha:%s|rep:%s|host:%s",
+ a.Namespace, a.CHIName, a.ClusterName, a.ShardName, a.ReplicaName, a.HostName)
}
// ClusterNameString creates cluster+host pair
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_replica.go b/pkg/apis/clickhouse.altinity.com/v1/type_replica.go
index 3b6a741e7..fa0fd1cdf 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_replica.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_replica.go
@@ -36,7 +36,7 @@ func (replica *ChiReplica) GetServiceTemplate() (*ChiServiceTemplate, bool) {
return nil, false
}
name := replica.Templates.GetReplicaServiceTemplate()
- return replica.CHI.GetServiceTemplate(name)
+ return replica.Runtime.CHI.GetServiceTemplate(name)
}
// HasShardsCount checks whether replica has shards count specified
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_shard.go b/pkg/apis/clickhouse.altinity.com/v1/type_shard.go
index 29f922a70..8f6be0fde 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_shard.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_shard.go
@@ -36,7 +36,7 @@ func (shard *ChiShard) GetServiceTemplate() (*ChiServiceTemplate, bool) {
return nil, false
}
name := shard.Templates.GetShardServiceTemplate()
- return shard.CHI.GetServiceTemplate(name)
+ return shard.Runtime.CHI.GetServiceTemplate(name)
}
// HasReplicasCount checks whether shard has replicas count specified
@@ -70,11 +70,11 @@ func (shard *ChiShard) FindHost(needle interface{}) (res *ChiHost) {
shard.WalkHosts(func(host *ChiHost) error {
switch v := needle.(type) {
case string:
- if host.Address.HostName == v {
+ if host.Runtime.Address.HostName == v {
res = host
}
case int:
- if host.Address.ShardScopeIndex == v {
+ if host.Runtime.Address.ShardScopeIndex == v {
res = host
}
}
@@ -107,12 +107,12 @@ func (shard *ChiShard) HostsCount() int {
// GetCHI gets CHI of the shard
func (shard *ChiShard) GetCHI() *ClickHouseInstallation {
- return shard.CHI
+ return shard.Runtime.CHI
}
// GetCluster gets cluster of the shard
func (shard *ChiShard) GetCluster() *Cluster {
- return shard.CHI.Spec.Configuration.Clusters[shard.Address.ClusterIndex]
+ return shard.Runtime.CHI.Spec.Configuration.Clusters[shard.Runtime.Address.ClusterIndex]
}
// HasWeight checks whether shard has applicable weight value specified
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_status.go b/pkg/apis/clickhouse.altinity.com/v1/type_status.go
index cae884fe7..70e9d27e7 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_status.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_status.go
@@ -73,7 +73,7 @@ type ChiStatus struct {
NormalizedCHI *ClickHouseInstallation `json:"normalized,omitempty" yaml:"normalized,omitempty"`
NormalizedCHICompleted *ClickHouseInstallation `json:"normalizedCompleted,omitempty" yaml:"normalizedCompleted,omitempty"`
HostsWithTablesCreated []string `json:"hostsWithTablesCreated,omitempty" yaml:"hostsWithTablesCreated,omitempty"`
- UsedTemplates []*ChiUseTemplate `json:"usedTemplates,omitempty" yaml:"usedTemplates,omitempty"`
+ UsedTemplates []*ChiTemplateRef `json:"usedTemplates,omitempty" yaml:"usedTemplates,omitempty"`
mu sync.RWMutex `json:"-" yaml:"-"`
}
@@ -175,9 +175,9 @@ func (s *ChiStatus) SyncHostTablesCreated() {
}
// PushUsedTemplate pushes used template to the list of used templates
-func (s *ChiStatus) PushUsedTemplate(usedTemplate *ChiUseTemplate) {
+func (s *ChiStatus) PushUsedTemplate(templateRef *ChiTemplateRef) {
doWithWriteLock(s, func(s *ChiStatus) {
- s.UsedTemplates = append(s.UsedTemplates, usedTemplate)
+ s.UsedTemplates = append(s.UsedTemplates, templateRef)
})
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_templates.go b/pkg/apis/clickhouse.altinity.com/v1/type_templates.go
index 58614e8b5..21b514f48 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_templates.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_templates.go
@@ -23,6 +23,34 @@ func NewChiTemplates() *ChiTemplates {
return new(ChiTemplates)
}
+func (templates *ChiTemplates) GetHostTemplates() []ChiHostTemplate {
+ if templates == nil {
+ return nil
+ }
+ return templates.HostTemplates
+}
+
+func (templates *ChiTemplates) GetPodTemplates() []ChiPodTemplate {
+ if templates == nil {
+ return nil
+ }
+ return templates.PodTemplates
+}
+
+func (templates *ChiTemplates) GetVolumeClaimTemplates() []ChiVolumeClaimTemplate {
+ if templates == nil {
+ return nil
+ }
+ return templates.VolumeClaimTemplates
+}
+
+func (templates *ChiTemplates) GetServiceTemplates() []ChiServiceTemplate {
+ if templates == nil {
+ return nil
+ }
+ return templates.ServiceTemplates
+}
+
// Len returns accumulated len of all templates
func (templates *ChiTemplates) Len() int {
if templates == nil {
@@ -37,7 +65,20 @@ func (templates *ChiTemplates) Len() int {
}
// MergeFrom merges from specified object
-func (templates *ChiTemplates) MergeFrom(from *ChiTemplates, _type MergeType) *ChiTemplates {
+func (templates *ChiTemplates) MergeFrom(_from any, _type MergeType) *ChiTemplates {
+ // Typed from
+ var from *ChiTemplates
+
+ // Ensure type
+ switch typed := _from.(type) {
+ case *ChiTemplates:
+ from = typed
+ default:
+ return templates
+ }
+
+ // Sanity check
+
if from.Len() == 0 {
return templates
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go
index 6ef74b253..d890ff4d3 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/types.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/types.go
@@ -39,12 +39,34 @@ const (
type ClickHouseInstallation struct {
meta.TypeMeta `json:",inline" yaml:",inline"`
meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
- Spec ChiSpec `json:"spec" yaml:"spec"`
- Status *ChiStatus `json:"status,omitempty" yaml:"status,omitempty"`
- Attributes ComparableAttributes `json:"-" yaml:"-"`
+ Spec ChiSpec `json:"spec" yaml:"spec"`
+ Status *ChiStatus `json:"status,omitempty" yaml:"status,omitempty"`
- statusCreatorMutex sync.Mutex `json:"-" yaml:"-"`
+ runtime *ClickHouseInstallationRuntime `json:"-" yaml:"-"`
+ statusCreatorMutex sync.Mutex `json:"-" yaml:"-"`
+ runtimeCreatorMutex sync.Mutex `json:"-" yaml:"-"`
+}
+
+type ClickHouseInstallationRuntime struct {
+ attributes *ComparableAttributes `json:"-" yaml:"-"`
+}
+
+func (runtime *ClickHouseInstallationRuntime) EnsureAttributes() *ComparableAttributes {
+ if runtime == nil {
+ return nil
+ }
+
+ // Assume that most of the time, we'll see a non-nil value.
+ if runtime.attributes != nil {
+ return runtime.attributes
+ }
+
+ // Note that we have to check this property again to avoid a TOCTOU bug.
+ if runtime.attributes == nil {
+ runtime.attributes = &ComparableAttributes{}
+ }
+ return runtime.attributes
}
// ComparableAttributes specifies CHI attributes that are comparable
@@ -76,21 +98,21 @@ type ClickHouseOperatorConfiguration struct {
// ChiSpec defines spec section of ClickHouseInstallation resource
type ChiSpec struct {
- TaskID *string `json:"taskID,omitempty" yaml:"taskID,omitempty"`
- Stop *StringBool `json:"stop,omitempty" yaml:"stop,omitempty"`
- Restart string `json:"restart,omitempty" yaml:"restart,omitempty"`
- Troubleshoot *StringBool `json:"troubleshoot,omitempty" yaml:"troubleshoot,omitempty"`
- NamespaceDomainPattern string `json:"namespaceDomainPattern,omitempty" yaml:"namespaceDomainPattern,omitempty"`
- Templating *ChiTemplating `json:"templating,omitempty" yaml:"templating,omitempty"`
- Reconciling *ChiReconciling `json:"reconciling,omitempty" yaml:"reconciling,omitempty"`
- Defaults *ChiDefaults `json:"defaults,omitempty" yaml:"defaults,omitempty"`
- Configuration *Configuration `json:"configuration,omitempty" yaml:"configuration,omitempty"`
- Templates *ChiTemplates `json:"templates,omitempty" yaml:"templates,omitempty"`
- UseTemplates []ChiUseTemplate `json:"useTemplates,omitempty" yaml:"useTemplates,omitempty"`
-}
-
-// ChiUseTemplate defines UseTemplate section of ClickHouseInstallation resource
-type ChiUseTemplate struct {
+ TaskID *string `json:"taskID,omitempty" yaml:"taskID,omitempty"`
+ Stop *StringBool `json:"stop,omitempty" yaml:"stop,omitempty"`
+ Restart string `json:"restart,omitempty" yaml:"restart,omitempty"`
+ Troubleshoot *StringBool `json:"troubleshoot,omitempty" yaml:"troubleshoot,omitempty"`
+ NamespaceDomainPattern string `json:"namespaceDomainPattern,omitempty" yaml:"namespaceDomainPattern,omitempty"`
+ Templating *ChiTemplating `json:"templating,omitempty" yaml:"templating,omitempty"`
+ Reconciling *ChiReconciling `json:"reconciling,omitempty" yaml:"reconciling,omitempty"`
+ Defaults *ChiDefaults `json:"defaults,omitempty" yaml:"defaults,omitempty"`
+ Configuration *Configuration `json:"configuration,omitempty" yaml:"configuration,omitempty"`
+ Templates *ChiTemplates `json:"templates,omitempty" yaml:"templates,omitempty"`
+ UseTemplates []*ChiTemplateRef `json:"useTemplates,omitempty" yaml:"useTemplates,omitempty"`
+}
+
+// ChiTemplateRef defines UseTemplate section of ClickHouseInstallation resource
+type ChiTemplateRef struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
UseType string `json:"useType,omitempty" yaml:"useType,omitempty"`
@@ -122,6 +144,10 @@ func (s CHISelector) Matches(labels map[string]string) bool {
// Labels have the key specified in selector, but selector value is not the same as labels value
// Selector does not match the labels
return false
+ } else {
+ // Selector value and label value are equal
+ // So far label matches selector
+ // Continue iteration to next value
}
}
@@ -152,8 +178,8 @@ func (t *ChiTemplating) SetPolicy(p string) {
t.Policy = p
}
-// GetCHISelector gets CHI selector
-func (t *ChiTemplating) GetCHISelector() CHISelector {
+// GetSelector gets CHI selector
+func (t *ChiTemplating) GetSelector() CHISelector {
if t == nil {
return nil
}
@@ -583,15 +609,17 @@ type ChiShard struct {
// TODO refactor into map[string]ChiHost
Hosts []*ChiHost `json:"replicas,omitempty" yaml:"replicas,omitempty"`
- // Internal data
-
- Address ChiShardAddress `json:"-" yaml:"-"`
- CHI *ClickHouseInstallation `json:"-" yaml:"-" testdiff:"ignore"`
+ Runtime ChiShardRuntime `json:"-" yaml:"-"`
// DefinitionType is DEPRECATED - to be removed soon
DefinitionType string `json:"definitionType,omitempty" yaml:"definitionType,omitempty"`
}
+type ChiShardRuntime struct {
+ Address ChiShardAddress `json:"-" yaml:"-"`
+ CHI *ClickHouseInstallation `json:"-" yaml:"-" testdiff:"ignore"`
+}
+
// ChiReplica defines item of a replica section of .spec.configuration.clusters[n].replicas
// TODO unify with ChiShard based on HostsSet
type ChiReplica struct {
@@ -603,8 +631,10 @@ type ChiReplica struct {
// TODO refactor into map[string]ChiHost
Hosts []*ChiHost `json:"shards,omitempty" yaml:"shards,omitempty"`
- // Internal data
+ Runtime ChiReplicaRuntime `json:"-" yaml:"-"`
+}
+type ChiReplicaRuntime struct {
Address ChiReplicaAddress `json:"-" yaml:"-"`
CHI *ClickHouseInstallation `json:"-" yaml:"-" testdiff:"ignore"`
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
index e3d73185c..013b84a2d 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
@@ -22,6 +22,7 @@ limitations under the License.
package v1
import (
+ swversion "github.com/altinity/clickhouse-operator/pkg/apis/swversion"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
@@ -49,22 +50,6 @@ func (in CHISelector) DeepCopy() CHISelector {
return *out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CHVersion) DeepCopyInto(out *CHVersion) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CHVersion.
-func (in *CHVersion) DeepCopy() *CHVersion {
- if in == nil {
- return nil
- }
- out := new(CHVersion)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChiCleanup) DeepCopyInto(out *ChiCleanup) {
*out = *in
@@ -222,33 +207,7 @@ func (in *ChiHost) DeepCopyInto(out *ChiHost) {
*out = new(ChiTemplateNames)
**out = **in
}
- out.Address = in.Address
- out.Config = in.Config
- if in.Version != nil {
- in, out := &in.Version, &out.Version
- *out = new(CHVersion)
- **out = **in
- }
- if in.reconcileAttributes != nil {
- in, out := &in.reconcileAttributes, &out.reconcileAttributes
- *out = new(ChiHostReconcileAttributes)
- **out = **in
- }
- if in.CurStatefulSet != nil {
- in, out := &in.CurStatefulSet, &out.CurStatefulSet
- *out = new(appsv1.StatefulSet)
- (*in).DeepCopyInto(*out)
- }
- if in.DesiredStatefulSet != nil {
- in, out := &in.DesiredStatefulSet, &out.DesiredStatefulSet
- *out = new(appsv1.StatefulSet)
- (*in).DeepCopyInto(*out)
- }
- if in.CHI != nil {
- in, out := &in.CHI, &out.CHI
- *out = new(ClickHouseInstallation)
- (*in).DeepCopyInto(*out)
- }
+ in.Runtime.DeepCopyInto(&out.Runtime)
return
}
@@ -333,6 +292,49 @@ func (in *ChiHostReconcileAttributesCounters) DeepCopy() *ChiHostReconcileAttrib
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChiHostRuntime) DeepCopyInto(out *ChiHostRuntime) {
+ *out = *in
+ out.Address = in.Address
+ out.Config = in.Config
+ if in.Version != nil {
+ in, out := &in.Version, &out.Version
+ *out = new(swversion.SoftWareVersion)
+ **out = **in
+ }
+ if in.reconcileAttributes != nil {
+ in, out := &in.reconcileAttributes, &out.reconcileAttributes
+ *out = new(ChiHostReconcileAttributes)
+ **out = **in
+ }
+ if in.CurStatefulSet != nil {
+ in, out := &in.CurStatefulSet, &out.CurStatefulSet
+ *out = new(appsv1.StatefulSet)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.DesiredStatefulSet != nil {
+ in, out := &in.DesiredStatefulSet, &out.DesiredStatefulSet
+ *out = new(appsv1.StatefulSet)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.CHI != nil {
+ in, out := &in.CHI, &out.CHI
+ *out = new(ClickHouseInstallation)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiHostRuntime.
+func (in *ChiHostRuntime) DeepCopy() *ChiHostRuntime {
+ if in == nil {
+ return nil
+ }
+ out := new(ChiHostRuntime)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChiHostTemplate) DeepCopyInto(out *ChiHostTemplate) {
*out = *in
@@ -498,12 +500,7 @@ func (in *ChiReplica) DeepCopyInto(out *ChiReplica) {
}
}
}
- out.Address = in.Address
- if in.CHI != nil {
- in, out := &in.CHI, &out.CHI
- *out = new(ClickHouseInstallation)
- (*in).DeepCopyInto(*out)
- }
+ in.Runtime.DeepCopyInto(&out.Runtime)
return
}
@@ -533,6 +530,28 @@ func (in *ChiReplicaAddress) DeepCopy() *ChiReplicaAddress {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChiReplicaRuntime) DeepCopyInto(out *ChiReplicaRuntime) {
+ *out = *in
+ out.Address = in.Address
+ if in.CHI != nil {
+ in, out := &in.CHI, &out.CHI
+ *out = new(ClickHouseInstallation)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiReplicaRuntime.
+func (in *ChiReplicaRuntime) DeepCopy() *ChiReplicaRuntime {
+ if in == nil {
+ return nil
+ }
+ out := new(ChiReplicaRuntime)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChiServiceTemplate) DeepCopyInto(out *ChiServiceTemplate) {
*out = *in
@@ -590,12 +609,7 @@ func (in *ChiShard) DeepCopyInto(out *ChiShard) {
}
}
}
- out.Address = in.Address
- if in.CHI != nil {
- in, out := &in.CHI, &out.CHI
- *out = new(ClickHouseInstallation)
- (*in).DeepCopyInto(*out)
- }
+ in.Runtime.DeepCopyInto(&out.Runtime)
return
}
@@ -625,6 +639,28 @@ func (in *ChiShardAddress) DeepCopy() *ChiShardAddress {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChiShardRuntime) DeepCopyInto(out *ChiShardRuntime) {
+ *out = *in
+ out.Address = in.Address
+ if in.CHI != nil {
+ in, out := &in.CHI, &out.CHI
+ *out = new(ClickHouseInstallation)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiShardRuntime.
+func (in *ChiShardRuntime) DeepCopy() *ChiShardRuntime {
+ if in == nil {
+ return nil
+ }
+ out := new(ChiShardRuntime)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChiSpec) DeepCopyInto(out *ChiSpec) {
*out = *in
@@ -670,8 +706,14 @@ func (in *ChiSpec) DeepCopyInto(out *ChiSpec) {
}
if in.UseTemplates != nil {
in, out := &in.UseTemplates, &out.UseTemplates
- *out = make([]ChiUseTemplate, len(*in))
- copy(*out, *in)
+ *out = make([]*ChiTemplateRef, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(ChiTemplateRef)
+ **out = **in
+ }
+ }
}
return
}
@@ -741,11 +783,11 @@ func (in *ChiStatus) DeepCopyInto(out *ChiStatus) {
}
if in.UsedTemplates != nil {
in, out := &in.UsedTemplates, &out.UsedTemplates
- *out = make([]*ChiUseTemplate, len(*in))
+ *out = make([]*ChiTemplateRef, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
- *out = new(ChiUseTemplate)
+ *out = new(ChiTemplateRef)
**out = **in
}
}
@@ -780,6 +822,22 @@ func (in *ChiTemplateNames) DeepCopy() *ChiTemplateNames {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChiTemplateRef) DeepCopyInto(out *ChiTemplateRef) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiTemplateRef.
+func (in *ChiTemplateRef) DeepCopy() *ChiTemplateRef {
+ if in == nil {
+ return nil
+ }
+ out := new(ChiTemplateRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChiTemplates) DeepCopyInto(out *ChiTemplates) {
*out = *in
@@ -867,22 +925,6 @@ func (in *ChiTemplating) DeepCopy() *ChiTemplating {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiUseTemplate) DeepCopyInto(out *ChiUseTemplate) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiUseTemplate.
-func (in *ChiUseTemplate) DeepCopy() *ChiUseTemplate {
- if in == nil {
- return nil
- }
- out := new(ChiUseTemplate)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChiVolumeClaimTemplate) DeepCopyInto(out *ChiVolumeClaimTemplate) {
*out = *in
@@ -957,8 +999,13 @@ func (in *ClickHouseInstallation) DeepCopyInto(out *ClickHouseInstallation) {
*out = new(ChiStatus)
(*in).DeepCopyInto(*out)
}
- in.Attributes.DeepCopyInto(&out.Attributes)
+ if in.runtime != nil {
+ in, out := &in.runtime, &out.runtime
+ *out = new(ClickHouseInstallationRuntime)
+ (*in).DeepCopyInto(*out)
+ }
out.statusCreatorMutex = in.statusCreatorMutex
+ out.runtimeCreatorMutex = in.runtimeCreatorMutex
return
}
@@ -1013,6 +1060,27 @@ func (in *ClickHouseInstallationList) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClickHouseInstallationRuntime) DeepCopyInto(out *ClickHouseInstallationRuntime) {
+ *out = *in
+ if in.attributes != nil {
+ in, out := &in.attributes, &out.attributes
+ *out = new(ComparableAttributes)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickHouseInstallationRuntime.
+func (in *ClickHouseInstallationRuntime) DeepCopy() *ClickHouseInstallationRuntime {
+ if in == nil {
+ return nil
+ }
+ out := new(ClickHouseInstallationRuntime)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClickHouseInstallationTemplate) DeepCopyInto(out *ClickHouseInstallationTemplate) {
*out = *in
@@ -1024,8 +1092,13 @@ func (in *ClickHouseInstallationTemplate) DeepCopyInto(out *ClickHouseInstallati
*out = new(ChiStatus)
(*in).DeepCopyInto(*out)
}
- in.Attributes.DeepCopyInto(&out.Attributes)
+ if in.runtime != nil {
+ in, out := &in.runtime, &out.runtime
+ *out = new(ClickHouseInstallationRuntime)
+ (*in).DeepCopyInto(*out)
+ }
out.statusCreatorMutex = in.statusCreatorMutex
+ out.runtimeCreatorMutex = in.runtimeCreatorMutex
return
}
@@ -1188,6 +1261,23 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
*out = new(ChiClusterLayout)
(*in).DeepCopyInto(*out)
}
+ in.Runtime.DeepCopyInto(&out.Runtime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
+func (in *Cluster) DeepCopy() *Cluster {
+ if in == nil {
+ return nil
+ }
+ out := new(Cluster)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRuntime) DeepCopyInto(out *ClusterRuntime) {
+ *out = *in
out.Address = in.Address
if in.CHI != nil {
in, out := &in.CHI, &out.CHI
@@ -1197,12 +1287,12 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
-func (in *Cluster) DeepCopy() *Cluster {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRuntime.
+func (in *ClusterRuntime) DeepCopy() *ClusterRuntime {
if in == nil {
return nil
}
- out := new(Cluster)
+ out := new(ClusterRuntime)
in.DeepCopyInto(out)
return out
}
diff --git a/pkg/apis/metrics/exporter.go b/pkg/apis/metrics/exporter.go
index 622c4998d..0fc6ee585 100644
--- a/pkg/apis/metrics/exporter.go
+++ b/pkg/apis/metrics/exporter.go
@@ -25,13 +25,14 @@ import (
log "github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
- "k8s.io/apimachinery/pkg/apis/meta/v1"
+ core "k8s.io/api/core/v1"
kube "k8s.io/client-go/kubernetes"
- chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/chop"
chopAPI "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned"
- model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ chiNormalizer "github.com/altinity/clickhouse-operator/pkg/model/chi/normalizer"
"github.com/altinity/clickhouse-operator/pkg/model/clickhouse"
)
@@ -148,18 +149,18 @@ func (e *Exporter) newHostFetcher(host *WatchedHost) *ClickHouseMetricsFetcher {
clusterConnectionParams := clickhouse.NewClusterConnectionParamsFromCHOpConfig(chop.Config())
// Adjust base cluster connection params with per-host props
switch clusterConnectionParams.Scheme {
- case chiv1.ChSchemeAuto:
+ case api.ChSchemeAuto:
switch {
- case chiv1.IsPortAssigned(host.HTTPPort):
+ case api.IsPortAssigned(host.HTTPPort):
clusterConnectionParams.Scheme = "http"
clusterConnectionParams.Port = int(host.HTTPPort)
- case chiv1.IsPortAssigned(host.HTTPSPort):
+ case api.IsPortAssigned(host.HTTPSPort):
clusterConnectionParams.Scheme = "https"
clusterConnectionParams.Port = int(host.HTTPSPort)
}
- case chiv1.ChSchemeHTTP:
+ case api.ChSchemeHTTP:
clusterConnectionParams.Port = int(host.HTTPPort)
- case chiv1.ChSchemeHTTPS:
+ case api.ChSchemeHTTPS:
clusterConnectionParams.Port = int(host.HTTPSPort)
}
@@ -371,7 +372,7 @@ func (e *Exporter) deleteWatchedCHI(w http.ResponseWriter, r *http.Request) {
func (e *Exporter) DiscoveryWatchedCHIs(kubeClient kube.Interface, chopClient *chopAPI.Clientset) {
// Get all CHI objects from watched namespace(s)
watchedNamespace := chop.Config().GetInformerNamespace()
- list, err := chopClient.ClickhouseV1().ClickHouseInstallations(watchedNamespace).List(context.TODO(), v1.ListOptions{})
+ list, err := chopClient.ClickhouseV1().ClickHouseInstallations(watchedNamespace).List(context.TODO(), controller.NewListOptions())
if err != nil {
log.V(1).Infof("Error read ClickHouseInstallations %v", err)
return
@@ -396,8 +397,11 @@ func (e *Exporter) DiscoveryWatchedCHIs(kubeClient kube.Interface, chopClient *c
}
log.V(1).Infof("CHI %s/%s is completed, add it", chi.Namespace, chi.Name)
- normalizer := model.NewNormalizer(kubeClient)
- normalized, _ := normalizer.CreateTemplatedCHI(chi, model.NewNormalizerOptions())
+ normalizer := chiNormalizer.NewNormalizer(func(namespace, name string) (*core.Secret, error) {
+ return kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, controller.NewGetOptions())
+ })
+ normalized, _ := normalizer.CreateTemplatedCHI(chi, chiNormalizer.NewOptions())
+
watchedCHI := NewWatchedCHI(normalized)
e.updateWatched(watchedCHI)
}
diff --git a/pkg/apis/metrics/type_watched_chi.go b/pkg/apis/metrics/type_watched_chi.go
index ee8fd03db..c9cda4dc0 100644
--- a/pkg/apis/metrics/type_watched_chi.go
+++ b/pkg/apis/metrics/type_watched_chi.go
@@ -16,7 +16,8 @@ package metrics
import (
"encoding/json"
- v1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
)
// WatchedCHI specifies watched ClickHouseInstallation
@@ -45,13 +46,13 @@ type WatchedHost struct {
}
// NewWatchedCHI creates new watched CHI
-func NewWatchedCHI(c *v1.ClickHouseInstallation) *WatchedCHI {
+func NewWatchedCHI(c *api.ClickHouseInstallation) *WatchedCHI {
chi := &WatchedCHI{}
chi.readFrom(c)
return chi
}
-func (chi *WatchedCHI) readFrom(c *v1.ClickHouseInstallation) {
+func (chi *WatchedCHI) readFrom(c *api.ClickHouseInstallation) {
if chi == nil {
return
}
@@ -60,7 +61,7 @@ func (chi *WatchedCHI) readFrom(c *v1.ClickHouseInstallation) {
chi.Labels = c.Labels
chi.Annotations = c.Annotations
- c.WalkClusters(func(cl *v1.Cluster) error {
+ c.WalkClusters(func(cl *api.Cluster) error {
cluster := &WatchedCluster{}
cluster.readFrom(cl)
chi.Clusters = append(chi.Clusters, cluster)
@@ -100,13 +101,13 @@ func (chi *WatchedCHI) String() string {
return string(bytes)
}
-func (cluster *WatchedCluster) readFrom(c *v1.Cluster) {
+func (cluster *WatchedCluster) readFrom(c *api.Cluster) {
if cluster == nil {
return
}
cluster.Name = c.Name
- c.WalkHosts(func(h *v1.ChiHost) error {
+ c.WalkHosts(func(h *api.ChiHost) error {
host := &WatchedHost{}
host.readFrom(h)
cluster.Hosts = append(cluster.Hosts, host)
@@ -114,12 +115,12 @@ func (cluster *WatchedCluster) readFrom(c *v1.Cluster) {
})
}
-func (host *WatchedHost) readFrom(h *v1.ChiHost) {
+func (host *WatchedHost) readFrom(h *api.ChiHost) {
if host == nil {
return
}
host.Name = h.Name
- host.Hostname = h.Address.FQDN
+ host.Hostname = h.Runtime.Address.FQDN
host.TCPPort = h.TCPPort
host.TLSPort = h.TLSPort
host.HTTPPort = h.HTTPPort
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_ch_version.go b/pkg/apis/swversion/type_software_version.go
similarity index 65%
rename from pkg/apis/clickhouse.altinity.com/v1/type_ch_version.go
rename to pkg/apis/swversion/type_software_version.go
index 0762dac5e..341a03d2a 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_ch_version.go
+++ b/pkg/apis/swversion/type_software_version.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package v1
+package swversion
import (
"strings"
@@ -20,19 +20,19 @@ import (
"github.com/Masterminds/semver/v3"
)
-// CHVersion specifies ClickHouse version and ClickHouse semver
-type CHVersion struct {
- // Version specifies original ClickHouse version reported by VERSION(), such as 21.9.6.24
+// SoftWareVersion specifies software version and software semver
+type SoftWareVersion struct {
+ // Version specifies original software version, such as 21.9.6.24-alpha
Version string
- // Semver specifies semver adaptation, truncated to 3 numbers, such as 21.9.6 for 21.9.6.24 original version
+ // Semver specifies semver adaptation, truncated to 3 numbers, such as 21.9.6 for 21.9.6.24-alpha original version
Semver string
}
-// NewCHVersion creates new ClickHouse version
-func NewCHVersion(str string) *CHVersion {
- // Need to have at least 3 parts in ClickHouse version specification
+// NewSoftWareVersion creates new software version
+func NewSoftWareVersion(str string) *SoftWareVersion {
+ // Need to have at least 3 parts in software version specification
if parts := strings.Split(str, "."); len(parts) >= 3 {
- return &CHVersion{
+ return &SoftWareVersion{
Version: str,
Semver: strings.Join(parts[0:2], "."),
}
@@ -40,8 +40,8 @@ func NewCHVersion(str string) *CHVersion {
return nil
}
-// Matches checks whether ClickHouse version matches specified constraint
-func (v *CHVersion) Matches(constraint string) bool {
+// Matches checks whether software version matches specified constraint
+func (v *SoftWareVersion) Matches(constraint string) bool {
if v == nil {
return false
}
@@ -62,8 +62,8 @@ func (v *CHVersion) Matches(constraint string) bool {
return matches
}
-// IsUnknown checks whether ClickHouse version is unknown
-func (v *CHVersion) IsUnknown() bool {
+// IsUnknown checks whether software version is unknown
+func (v *SoftWareVersion) IsUnknown() bool {
if v == nil {
return true
}
@@ -74,7 +74,7 @@ func (v *CHVersion) IsUnknown() bool {
}
// String makes string
-func (v *CHVersion) String() string {
+func (v *SoftWareVersion) String() string {
if v == nil {
return ""
}
diff --git a/pkg/chop/config_manager.go b/pkg/chop/config_manager.go
index 598dad2f9..d03a3f04c 100644
--- a/pkg/chop/config_manager.go
+++ b/pkg/chop/config_manager.go
@@ -423,22 +423,22 @@ func (cm *ConfigManager) fetchSecretCredentials() {
secret, err := cm.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, controller.NewGetOptions())
if err != nil {
cm.config.ClickHouse.Access.Secret.Runtime.Error = err.Error()
- log.V(1).Warning("Unable to fetch secret '%s/%s'", namespace, name)
+ log.V(1).Warning("Unable to fetch secret: '%s/%s'", namespace, name)
return
}
cm.config.ClickHouse.Access.Secret.Runtime.Fetched = true
- log.V(1).Info("Secret fetched %s/%s :", namespace, name)
+ log.V(1).Info("Secret fetched: '%s/%s'", namespace, name)
// Find username and password from credentials
for key, value := range secret.Data {
switch key {
case "username":
cm.config.ClickHouse.Access.Secret.Runtime.Username = string(value)
- log.V(1).Info("Username read from the secret '%s/%s'", namespace, name)
+ log.V(1).Info("Username read from the secret: '%s/%s'", namespace, name)
case "password":
cm.config.ClickHouse.Access.Secret.Runtime.Password = string(value)
- log.V(1).Info("Password read from the secret '%s/%s'", namespace, name)
+ log.V(1).Info("Password read from the secret: '%s/%s'", namespace, name)
}
}
}
diff --git a/pkg/controller/chi/creator.go b/pkg/controller/chi/creator.go
index d428e4ed3..877d0fedb 100644
--- a/pkg/controller/chi/creator.go
+++ b/pkg/controller/chi/creator.go
@@ -39,7 +39,7 @@ func (c *Controller) createStatefulSet(ctx context.Context, host *api.ChiHost) E
return nil
}
- statefulSet := host.DesiredStatefulSet
+ statefulSet := host.Runtime.DesiredStatefulSet
log.V(1).Info("Create StatefulSet %s/%s", statefulSet.Namespace, statefulSet.Name)
if _, err := c.kubeClient.AppsV1().StatefulSets(statefulSet.Namespace).Create(ctx, statefulSet, controller.NewCreateOptions()); err != nil {
@@ -198,17 +198,23 @@ func (c *Controller) onStatefulSetCreateFailed(ctx context.Context, host *api.Ch
case api.OnStatefulSetCreateFailureActionDelete:
// Delete gracefully failed StatefulSet
- log.V(1).M(host).F().Info("going to DELETE FAILED StatefulSet %s", util.NamespaceNameString(host.DesiredStatefulSet.ObjectMeta))
+ log.V(1).M(host).F().Info(
+ "going to DELETE FAILED StatefulSet %s",
+ util.NamespaceNameString(host.Runtime.DesiredStatefulSet.ObjectMeta))
_ = c.deleteHost(ctx, host)
return c.shouldContinueOnCreateFailed()
case api.OnStatefulSetCreateFailureActionIgnore:
// Ignore error, continue reconcile loop
- log.V(1).M(host).F().Info("going to ignore error %s", util.NamespaceNameString(host.DesiredStatefulSet.ObjectMeta))
+ log.V(1).M(host).F().Info(
+ "going to ignore error %s",
+ util.NamespaceNameString(host.Runtime.DesiredStatefulSet.ObjectMeta))
return errCRUDIgnore
default:
- log.V(1).M(host).F().Error("Unknown c.chop.Config().OnStatefulSetCreateFailureAction=%s", chop.Config().Reconcile.StatefulSet.Create.OnFailure)
+ log.V(1).M(host).F().Error(
+ "Unknown c.chop.Config().OnStatefulSetCreateFailureAction=%s",
+ chop.Config().Reconcile.StatefulSet.Create.OnFailure)
return errCRUDIgnore
}
diff --git a/pkg/controller/chi/deleter.go b/pkg/controller/chi/deleter.go
index 215decd57..72449a056 100644
--- a/pkg/controller/chi/deleter.go
+++ b/pkg/controller/chi/deleter.go
@@ -31,7 +31,7 @@ import (
// deleteHost deletes all kubernetes resources related to replica *chop.ChiHost
func (c *Controller) deleteHost(ctx context.Context, host *api.ChiHost) error {
- log.V(1).M(host).S().Info(host.Address.ClusterNameString())
+ log.V(1).M(host).S().Info(host.Runtime.Address.ClusterNameString())
// Each host consists of:
_ = c.deleteStatefulSet(ctx, host)
@@ -39,7 +39,7 @@ func (c *Controller) deleteHost(ctx context.Context, host *api.ChiHost) error {
_ = c.deleteConfigMap(ctx, host)
_ = c.deleteServiceHost(ctx, host)
- log.V(1).M(host).E().Info(host.Address.ClusterNameString())
+ log.V(1).M(host).E().Info(host.Runtime.Address.ClusterNameString())
return nil
}
@@ -123,11 +123,11 @@ func (c *Controller) deleteStatefulSet(ctx context.Context, host *api.ChiHost) e
// Namespaced name
name := model.CreateStatefulSetName(host)
- namespace := host.Address.Namespace
+ namespace := host.Runtime.Address.Namespace
log.V(1).M(host).F().Info("%s/%s", namespace, name)
var err error
- host.CurStatefulSet, err = c.getStatefulSet(host)
+ host.Runtime.CurStatefulSet, err = c.getStatefulSet(host)
if err != nil {
// Unable to fetch cur StatefulSet, but this is not necessarily an error yet
if apiErrors.IsNotFound(err) {
@@ -141,8 +141,8 @@ func (c *Controller) deleteStatefulSet(ctx context.Context, host *api.ChiHost) e
// Scale StatefulSet down to 0 pods count.
// This is the proper and graceful way to delete StatefulSet
var zero int32 = 0
- host.CurStatefulSet.Spec.Replicas = &zero
- if _, err := c.kubeClient.AppsV1().StatefulSets(namespace).Update(ctx, host.CurStatefulSet, controller.NewUpdateOptions()); err != nil {
+ host.Runtime.CurStatefulSet.Spec.Replicas = &zero
+ if _, err := c.kubeClient.AppsV1().StatefulSets(namespace).Update(ctx, host.Runtime.CurStatefulSet, controller.NewUpdateOptions()); err != nil {
log.V(1).M(host).Error("UNABLE to update StatefulSet %s/%s", namespace, name)
return err
}
@@ -192,7 +192,7 @@ func (c *Controller) deletePVC(ctx context.Context, host *api.ChiHost) error {
log.V(2).M(host).S().P()
defer log.V(2).M(host).E().P()
- namespace := host.Address.Namespace
+ namespace := host.Runtime.Address.Namespace
c.walkDiscoveredPVCs(host, func(pvc *core.PersistentVolumeClaim) {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
@@ -229,7 +229,7 @@ func (c *Controller) deleteConfigMap(ctx context.Context, host *api.ChiHost) err
}
name := model.CreateConfigMapHostName(host)
- namespace := host.Address.Namespace
+ namespace := host.Runtime.Address.Namespace
log.V(1).M(host).F().Info("%s/%s", namespace, name)
if err := c.kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, name, controller.NewDeleteOptions()); err == nil {
@@ -263,7 +263,7 @@ func (c *Controller) deleteServiceHost(ctx context.Context, host *api.ChiHost) e
}
serviceName := model.CreateStatefulSetServiceName(host)
- namespace := host.Address.Namespace
+ namespace := host.Runtime.Address.Namespace
log.V(1).M(host).F().Info("%s/%s", namespace, serviceName)
return c.deleteServiceIfExists(ctx, namespace, serviceName)
}
@@ -276,7 +276,7 @@ func (c *Controller) deleteServiceShard(ctx context.Context, shard *api.ChiShard
}
serviceName := model.CreateShardServiceName(shard)
- namespace := shard.Address.Namespace
+ namespace := shard.Runtime.Address.Namespace
log.V(1).M(shard).F().Info("%s/%s", namespace, serviceName)
return c.deleteServiceIfExists(ctx, namespace, serviceName)
}
@@ -289,7 +289,7 @@ func (c *Controller) deleteServiceCluster(ctx context.Context, cluster *api.Clus
}
serviceName := model.CreateClusterServiceName(cluster)
- namespace := cluster.Address.Namespace
+ namespace := cluster.Runtime.Address.Namespace
log.V(1).M(cluster).F().Info("%s/%s", namespace, serviceName)
return c.deleteServiceIfExists(ctx, namespace, serviceName)
}
@@ -319,15 +319,16 @@ func (c *Controller) deleteServiceIfExists(ctx context.Context, namespace, name
if err != nil {
// No such a service, nothing to delete
+ log.V(1).M(namespace, name).F().Info("Not Found Service: %s/%s err: %v", namespace, name, err)
return nil
}
// Delete service
err = c.kubeClient.CoreV1().Services(namespace).Delete(ctx, name, controller.NewDeleteOptions())
if err == nil {
- log.V(1).M(namespace, name).Info("OK delete Service %s/%s", namespace, name)
+ log.V(1).M(namespace, name).F().Info("OK delete Service: %s/%s", namespace, name)
} else {
- log.V(1).M(namespace, name).F().Error("FAIL delete Service %s/%s err:%v", namespace, name, err)
+ log.V(1).M(namespace, name).F().Error("FAIL delete Service: %s/%s err:%v", namespace, name, err)
}
return err
@@ -341,7 +342,7 @@ func (c *Controller) deleteSecretCluster(ctx context.Context, cluster *api.Clust
}
secretName := model.CreateClusterAutoSecretName(cluster)
- namespace := cluster.Address.Namespace
+ namespace := cluster.Runtime.Address.Namespace
log.V(1).M(cluster).F().Info("%s/%s", namespace, secretName)
return c.deleteSecretIfExists(ctx, namespace, secretName)
}
diff --git a/pkg/controller/chi/getter.go b/pkg/controller/chi/getter.go
index 5630072a3..46dfc02ff 100644
--- a/pkg/controller/chi/getter.go
+++ b/pkg/controller/chi/getter.go
@@ -90,7 +90,7 @@ func (c *Controller) getService(obj interface{}) (*core.Service, error) {
namespace = typedObj.Namespace
case *api.ChiHost:
name = model.CreateStatefulSetServiceName(typedObj)
- namespace = typedObj.Address.Namespace
+ namespace = typedObj.Runtime.Address.Namespace
}
return c.serviceLister.Services(namespace).Get(name)
//return c.kubeClient.CoreV1().Services(namespace).Get(newTask(), name, newGetOptions())
@@ -165,7 +165,7 @@ func (c *Controller) getStatefulSetByMeta(meta *meta.ObjectMeta, byNameOnly bool
func (c *Controller) getStatefulSetByHost(host *api.ChiHost) (*apps.StatefulSet, error) {
// Namespaced name
name := model.CreateStatefulSetName(host)
- namespace := host.Address.Namespace
+ namespace := host.Runtime.Address.Namespace
return c.kubeClient.AppsV1().StatefulSets(namespace).Get(controller.NewContext(), name, controller.NewGetOptions())
}
@@ -186,7 +186,7 @@ func (c *Controller) getPod(obj interface{}) (*core.Pod, error) {
namespace = typedObj.Namespace
case *api.ChiHost:
name = model.CreatePodName(obj)
- namespace = typedObj.Address.Namespace
+ namespace = typedObj.Runtime.Address.Namespace
}
return c.kubeClient.CoreV1().Pods(namespace).Get(controller.NewContext(), name, controller.NewGetOptions())
}
diff --git a/pkg/controller/chi/labeler.go b/pkg/controller/chi/labeler.go
index 970cda8d3..c17d02f88 100644
--- a/pkg/controller/chi/labeler.go
+++ b/pkg/controller/chi/labeler.go
@@ -257,7 +257,7 @@ func (c *Controller) appendLabelReadyOnPod(ctx context.Context, host *api.ChiHos
pod, err := c.getPod(host)
if err != nil {
- log.M(host).F().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err)
+ log.M(host).F().Error("FAIL get pod for host %s err:%v", host.Runtime.Address.NamespaceNameString(), err)
return err
}
@@ -265,7 +265,7 @@ func (c *Controller) appendLabelReadyOnPod(ctx context.Context, host *api.ChiHos
// Modified, need to update
_, err = c.kubeClient.CoreV1().Pods(pod.Namespace).Update(ctx, pod, controller.NewUpdateOptions())
if err != nil {
- log.M(host).F().Error("FAIL setting 'ready' label for host %s err:%v", host.Address.NamespaceNameString(), err)
+ log.M(host).F().Error("FAIL setting 'ready' label for host %s err:%v", host.Runtime.Address.NamespaceNameString(), err)
return err
}
}
@@ -291,7 +291,7 @@ func (c *Controller) deleteLabelReadyPod(ctx context.Context, host *api.ChiHost)
}
if err != nil {
- log.V(1).M(host).F().Info("FAIL get pod for host '%s' err: %v", host.Address.NamespaceNameString(), err)
+ log.V(1).M(host).F().Info("FAIL get pod for host '%s' err: %v", host.Runtime.Address.NamespaceNameString(), err)
return err
}
@@ -313,7 +313,7 @@ func (c *Controller) appendAnnotationReadyOnService(ctx context.Context, host *a
svc, err := c.getService(host)
if err != nil {
- log.M(host).F().Error("FAIL get service for host %s err:%v", host.Address.NamespaceNameString(), err)
+ log.M(host).F().Error("FAIL get service for host %s err:%v", host.Runtime.Address.NamespaceNameString(), err)
return err
}
@@ -321,7 +321,7 @@ func (c *Controller) appendAnnotationReadyOnService(ctx context.Context, host *a
// Modified, need to update
_, err = c.kubeClient.CoreV1().Services(svc.Namespace).Update(ctx, svc, controller.NewUpdateOptions())
if err != nil {
- log.M(host).F().Error("FAIL setting 'ready' annotation for host service %s err:%v", host.Address.NamespaceNameString(), err)
+ log.M(host).F().Error("FAIL setting 'ready' annotation for host service %s err:%v", host.Runtime.Address.NamespaceNameString(), err)
return err
}
}
@@ -347,7 +347,7 @@ func (c *Controller) deleteAnnotationReadyService(ctx context.Context, host *api
return nil
}
if err != nil {
- log.V(1).M(host).F().Info("FAIL get service for host '%s' err: %v", host.Address.NamespaceNameString(), err)
+ log.V(1).M(host).F().Info("FAIL get service for host '%s' err: %v", host.Runtime.Address.NamespaceNameString(), err)
return err
}
diff --git a/pkg/controller/chi/metrics.go b/pkg/controller/chi/metrics.go
index de7e8428b..8b21895f8 100644
--- a/pkg/controller/chi/metrics.go
+++ b/pkg/controller/chi/metrics.go
@@ -29,6 +29,9 @@ type Metrics struct {
// CHIReconcilesCompleted is a number (counter) of completed CHI reconciles.
// In ideal world number of completed reconciles should be equal to CHIReconcilesStarted
CHIReconcilesCompleted otelApi.Int64Counter
+ // CHIReconcilesAborted is a number (counter) of explicitly aborted CHI reconciles.
+ // This counter does not includes reconciles that we not completed due to external rasons, such as operator restart
+ CHIReconcilesAborted otelApi.Int64Counter
// CHIReconcilesTimings is a histogram of durations of successfully completed CHI reconciles
CHIReconcilesTimings otelApi.Float64Histogram
@@ -63,6 +66,11 @@ func createMetrics() *Metrics {
otelApi.WithDescription("number of CHI reconciles completed successfully"),
otelApi.WithUnit("items"),
)
+ CHIReconcilesAborted, _ := metrics.Meter().Int64Counter(
+ "clickhouse_operator_chi_reconciles_aborted",
+ otelApi.WithDescription("number of CHI reconciles aborted"),
+ otelApi.WithUnit("items"),
+ )
CHIReconcilesTimings, _ := metrics.Meter().Float64Histogram(
"clickhouse_operator_chi_reconciles_timings",
otelApi.WithDescription("timings of CHI reconciles completed successfully"),
@@ -114,6 +122,7 @@ func createMetrics() *Metrics {
return &Metrics{
CHIReconcilesStarted: CHIReconcilesStarted,
CHIReconcilesCompleted: CHIReconcilesCompleted,
+ CHIReconcilesAborted: CHIReconcilesAborted,
CHIReconcilesTimings: CHIReconcilesTimings,
HostReconcilesStarted: HostReconcilesStarted,
@@ -141,6 +150,9 @@ func metricsCHIReconcilesStarted(ctx context.Context) {
func metricsCHIReconcilesCompleted(ctx context.Context) {
ensureMetrics().CHIReconcilesCompleted.Add(ctx, 1)
}
+func metricsCHIReconcilesAborted(ctx context.Context) {
+ ensureMetrics().CHIReconcilesAborted.Add(ctx, 1)
+}
func metricsCHIReconcilesTimings(ctx context.Context, seconds float64) {
ensureMetrics().CHIReconcilesTimings.Record(ctx, seconds)
}
diff --git a/pkg/controller/chi/podder.go b/pkg/controller/chi/podder.go
index 30c14ba07..cd46d2303 100644
--- a/pkg/controller/chi/podder.go
+++ b/pkg/controller/chi/podder.go
@@ -25,7 +25,7 @@ import (
func (c *Controller) walkContainers(host *api.ChiHost, f func(container *v1.Container)) {
pod, err := c.getPod(host)
if err != nil {
- log.M(host).F().Error("FAIL get pod for host '%s' err: %v", host.Address.NamespaceNameString(), err)
+ log.M(host).F().Error("FAIL get pod for host '%s' err: %v", host.Runtime.Address.NamespaceNameString(), err)
return
}
@@ -39,7 +39,7 @@ func (c *Controller) walkContainers(host *api.ChiHost, f func(container *v1.Cont
func (c *Controller) walkContainerStatuses(host *api.ChiHost, f func(status *v1.ContainerStatus)) {
pod, err := c.getPod(host)
if err != nil {
- log.M(host).F().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err)
+ log.M(host).F().Error("FAIL get pod for host %s err:%v", host.Runtime.Address.NamespaceNameString(), err)
return
}
diff --git a/pkg/controller/chi/poller.go b/pkg/controller/chi/poller.go
index baa4ea214..32b505136 100644
--- a/pkg/controller/chi/poller.go
+++ b/pkg/controller/chi/poller.go
@@ -26,7 +26,7 @@ import (
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/chop"
"github.com/altinity/clickhouse-operator/pkg/controller"
- model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ "github.com/altinity/clickhouse-operator/pkg/model/k8s"
"github.com/altinity/clickhouse-operator/pkg/util"
)
@@ -42,7 +42,7 @@ func (c *Controller) waitHostNotReady(ctx context.Context, host *api.ChiHost) er
FromConfig(chop.Config()).
SetGetErrorTimeout(0),
func(_ context.Context, sts *apps.StatefulSet) bool {
- return model.IsStatefulSetNotReady(sts)
+ return k8s.IsStatefulSetNotReady(sts)
},
nil,
)
@@ -66,7 +66,7 @@ func (c *Controller) waitHostReady(ctx context.Context, host *api.ChiHost) error
}
_ = c.deleteLabelReadyPod(_ctx, host)
_ = c.deleteAnnotationReadyService(_ctx, host)
- return model.IsStatefulSetGeneration(sts, sts.Generation)
+ return k8s.IsStatefulSetGeneration(sts, sts.Generation)
},
func(_ctx context.Context) {
_ = c.deleteLabelReadyPod(_ctx, host)
@@ -85,7 +85,7 @@ func (c *Controller) waitHostReady(ctx context.Context, host *api.ChiHost) error
func(_ctx context.Context, sts *apps.StatefulSet) bool {
_ = c.deleteLabelReadyPod(_ctx, host)
_ = c.deleteAnnotationReadyService(_ctx, host)
- return model.IsStatefulSetReady(sts)
+ return k8s.IsStatefulSetReady(sts)
},
func(_ctx context.Context) {
_ = c.deleteLabelReadyPod(_ctx, host)
@@ -124,8 +124,8 @@ func (c *Controller) pollHost(
}
opts = opts.Ensure().FromConfig(chop.Config())
- namespace := host.Address.Namespace
- name := host.Address.HostName
+ namespace := host.Runtime.Address.Namespace
+ name := host.Runtime.Address.HostName
return controller.Poll(
ctx,
@@ -157,8 +157,8 @@ func (c *Controller) pollHostStatefulSet(
opts = controller.NewPollerOptions().FromConfig(chop.Config())
}
- namespace := host.Address.Namespace
- name := host.Address.StatefulSet
+ namespace := host.Runtime.Address.Namespace
+ name := host.Runtime.Address.StatefulSet
return controller.Poll(
ctx,
diff --git a/pkg/controller/chi/volumes.go b/pkg/controller/chi/volumes.go
index 8aa2c7497..30033fc95 100644
--- a/pkg/controller/chi/volumes.go
+++ b/pkg/controller/chi/volumes.go
@@ -24,7 +24,7 @@ import (
)
func (c *Controller) walkPVCs(host *api.ChiHost, f func(pvc *core.PersistentVolumeClaim)) {
- namespace := host.Address.Namespace
+ namespace := host.Runtime.Address.Namespace
name := model.CreatePodName(host)
pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(controller.NewContext(), name, controller.NewGetOptions())
if err != nil {
@@ -50,7 +50,7 @@ func (c *Controller) walkPVCs(host *api.ChiHost, f func(pvc *core.PersistentVolu
}
func (c *Controller) walkDiscoveredPVCs(host *api.ChiHost, f func(pvc *core.PersistentVolumeClaim)) {
- namespace := host.Address.Namespace
+ namespace := host.Runtime.Address.Namespace
pvcList, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).List(controller.NewContext(), controller.NewListOptions(model.GetSelectorHostScope(host)))
if err != nil {
diff --git a/pkg/controller/chi/worker-chi-reconciler.go b/pkg/controller/chi/worker-chi-reconciler.go
index f8ba1b725..e9579a91c 100644
--- a/pkg/controller/chi/worker-chi-reconciler.go
+++ b/pkg/controller/chi/worker-chi-reconciler.go
@@ -16,6 +16,7 @@ package chi
import (
"context"
+ "errors"
"fmt"
"math"
"sync"
@@ -30,9 +31,11 @@ import (
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/swversion"
"github.com/altinity/clickhouse-operator/pkg/chop"
"github.com/altinity/clickhouse-operator/pkg/controller"
model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ "github.com/altinity/clickhouse-operator/pkg/model/chi/creator"
"github.com/altinity/clickhouse-operator/pkg/util"
)
@@ -102,22 +105,22 @@ func (w *worker) reconcileCHI(ctx context.Context, old, new *api.ClickHouseInsta
w.walkHosts(ctx, new, actionPlan)
if err := w.reconcile(ctx, new); err != nil {
+ // Something went wrong
w.a.WithEvent(new, eventActionReconcile, eventReasonReconcileFailed).
WithStatusError(new).
M(new).F().
Error("FAILED to reconcile CHI err: %v", err)
w.markReconcileCompletedUnsuccessfully(ctx, new, err)
+ if errors.Is(err, errCRUDAbort) {
+ metricsCHIReconcilesAborted(ctx)
+ }
} else {
+ // Reconcile successful
// Post-process added items
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
}
- w.a.V(1).
- WithEvent(new, eventActionReconcile, eventReasonReconcileInProgress).
- WithStatusAction(new).
- M(new).F().
- Info("remove items scheduled for deletion")
w.clean(ctx, new)
w.dropReplicas(ctx, new, actionPlan)
w.addCHIToMonitoring(new)
@@ -156,7 +159,9 @@ func (w *worker) reconcile(ctx context.Context, chi *api.ClickHouseInstallation)
})
if counters.GetAdd() > 0 && counters.GetFound() == 0 && counters.GetModify() == 0 && counters.GetRemove() == 0 {
- w.a.V(1).M(chi).Info("Looks like we are just adding hosts to a new CHI. Enabling full fan-out mode. CHI: %s/%s", chi.Namespace, chi.Name)
+ w.a.V(1).M(chi).Info(
+ "Looks like we are just adding hosts to a new CHI. Enabling full fan-out mode. CHI: %s/%s",
+ chi.Namespace, chi.Name)
ctx = context.WithValue(ctx, ReconcileShardsAndHostsOptionsCtxKey, &ReconcileShardsAndHostsOptions{
fullFanOut: true,
})
@@ -289,7 +294,7 @@ func (w *worker) reconcileHostConfigMap(ctx context.Context, host *api.ChiHost)
// ConfigMap for a host
configMap := w.task.creator.CreateConfigMapHost(host)
- err := w.reconcileConfigMap(ctx, host.CHI, configMap)
+ err := w.reconcileConfigMap(ctx, host.GetCHI(), configMap)
if err == nil {
w.task.registryReconciled.RegisterConfigMap(configMap.ObjectMeta)
} else {
@@ -337,7 +342,7 @@ func (w *worker) getHostClickHouseVersion(ctx context.Context, host *api.ChiHost
}
w.a.V(1).M(host).F().Info("Get ClickHouse version on host: %s version: %s", host.GetName(), version)
- host.Version = api.NewCHVersion(version)
+ host.Runtime.Version = swversion.NewSoftWareVersion(version)
return version, nil
}
@@ -397,13 +402,13 @@ func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.ChiHost
defer log.V(1).M(host).F().E().Info("reconcile StatefulSet end")
version, _ := w.getHostClickHouseVersion(ctx, host, versionOptions{skipNew: true, skipStoppedAncestor: true})
- host.CurStatefulSet, _ = w.c.getStatefulSet(host, false)
+ host.Runtime.CurStatefulSet, _ = w.c.getStatefulSet(host, false)
- w.a.V(1).M(host).F().Info("Reconcile host %s. ClickHouse version: %s", host.GetName(), version)
+ w.a.V(1).M(host).F().Info("Reconcile host: %s. ClickHouse version: %s", host.GetName(), version)
// In case we have to force-restart host
// We'll do it via replicas: 0 in StatefulSet.
if w.shouldForceRestartHost(host) {
- w.a.V(1).M(host).F().Info("Reconcile host %s. Shutting host down due to force restart", host.GetName())
+ w.a.V(1).M(host).F().Info("Reconcile host: %s. Shutting host down due to force restart", host.GetName())
w.prepareHostStatefulSetWithStatus(ctx, host, true)
_ = w.reconcileStatefulSet(ctx, host, false)
metricsHostReconcilesRestart(ctx)
@@ -412,24 +417,24 @@ func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.ChiHost
}
// We are in place, where we can reconcile StatefulSet to desired configuration.
- w.a.V(1).M(host).F().Info("Reconcile host %s. Reconcile StatefulSet", host.GetName())
+ w.a.V(1).M(host).F().Info("Reconcile host: %s. Reconcile StatefulSet", host.GetName())
w.prepareHostStatefulSetWithStatus(ctx, host, false)
err := w.reconcileStatefulSet(ctx, host, true, opts...)
if err == nil {
- w.task.registryReconciled.RegisterStatefulSet(host.DesiredStatefulSet.ObjectMeta)
+ w.task.registryReconciled.RegisterStatefulSet(host.Runtime.DesiredStatefulSet.ObjectMeta)
} else {
- w.task.registryFailed.RegisterStatefulSet(host.DesiredStatefulSet.ObjectMeta)
+ w.task.registryFailed.RegisterStatefulSet(host.Runtime.DesiredStatefulSet.ObjectMeta)
if err == errCRUDIgnore {
// Pretend nothing happened in case of ignore
err = nil
}
- host.CHI.EnsureStatus().HostFailed()
- w.a.WithEvent(host.CHI, eventActionReconcile, eventReasonReconcileFailed).
- WithStatusAction(host.CHI).
- WithStatusError(host.CHI).
+ host.GetCHI().EnsureStatus().HostFailed()
+ w.a.WithEvent(host.GetCHI(), eventActionReconcile, eventReasonReconcileFailed).
+ WithStatusAction(host.GetCHI()).
+ WithStatusError(host.GetCHI()).
M(host).F().
- Error("FAILED to reconcile StatefulSet for host ", host.GetName())
+ Error("FAILED to reconcile StatefulSet for host: %s", host.GetName())
}
return err
@@ -446,12 +451,12 @@ func (w *worker) reconcileHostService(ctx context.Context, host *api.ChiHost) er
// This is not a problem, service may be omitted
return nil
}
- err := w.reconcileService(ctx, host.CHI, service)
+ err := w.reconcileService(ctx, host.GetCHI(), service)
if err == nil {
- w.a.V(1).M(host).F().Info("DONE Reconcile service of the host %s.", host.GetName())
+ w.a.V(1).M(host).F().Info("DONE Reconcile service of the host: %s", host.GetName())
w.task.registryReconciled.RegisterService(service.ObjectMeta)
} else {
- w.a.V(1).M(host).F().Warning("FAILED Reconcile service of the host %s.", host.GetName())
+ w.a.V(1).M(host).F().Warning("FAILED Reconcile service of the host: %s", host.GetName())
w.task.registryFailed.RegisterService(service.ObjectMeta)
}
return err
@@ -469,7 +474,7 @@ func (w *worker) reconcileCluster(ctx context.Context, cluster *api.Cluster) err
// Add ChkCluster's Service
if service := w.task.creator.CreateServiceCluster(cluster); service != nil {
- if err := w.reconcileService(ctx, cluster.CHI, service); err == nil {
+ if err := w.reconcileService(ctx, cluster.Runtime.CHI, service); err == nil {
w.task.registryReconciled.RegisterService(service.ObjectMeta)
} else {
w.task.registryFailed.RegisterService(service.ObjectMeta)
@@ -479,7 +484,7 @@ func (w *worker) reconcileCluster(ctx context.Context, cluster *api.Cluster) err
// Add ChkCluster's Auto Secret
if cluster.Secret.Source() == api.ClusterSecretSourceAuto {
if secret := w.task.creator.CreateClusterSecret(model.CreateClusterAutoSecretName(cluster)); secret != nil {
- if err := w.reconcileSecret(ctx, cluster.CHI, secret); err == nil {
+ if err := w.reconcileSecret(ctx, cluster.Runtime.CHI, secret); err == nil {
w.task.registryReconciled.RegisterSecret(secret.ObjectMeta)
} else {
w.task.registryFailed.RegisterSecret(secret.ObjectMeta)
@@ -633,7 +638,7 @@ func (w *worker) reconcileShard(ctx context.Context, shard *api.ChiShard) error
// This is not a problem, ServiceShard may be omitted
return nil
}
- err := w.reconcileService(ctx, shard.CHI, service)
+ err := w.reconcileService(ctx, shard.Runtime.CHI, service)
if err == nil {
w.task.registryReconciled.RegisterService(service.ObjectMeta)
} else {
@@ -661,8 +666,8 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.ChiHost) error {
startTime := time.Now()
if host.IsFirst() {
- w.reconcileCHIServicePreliminary(ctx, host.CHI)
- defer w.reconcileCHIServiceFinal(ctx, host.CHI)
+ w.reconcileCHIServicePreliminary(ctx, host.GetCHI())
+ defer w.reconcileCHIServiceFinal(ctx, host.GetCHI())
}
// Check whether ClickHouse is running and accessible and what version is available
@@ -703,7 +708,7 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.ChiHost) error {
w.a.V(1).
M(host).F().
- Info("Reconcile PVCs and check possible data loss for host %s", host.GetName())
+ Info("Reconcile PVCs and check possible data loss for host: %s", host.GetName())
if errIsDataLoss(w.reconcilePVCs(ctx, host, api.DesiredStatefulSet)) {
// In case of data loss detection on existing volumes, we need to:
// 1. recreate StatefulSet
@@ -717,7 +722,7 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.ChiHost) error {
}
w.a.V(1).
M(host).F().
- Info("Data loss detected for host %s. Will do force migrate", host.GetName())
+ Info("Data loss detected for host: %s. Will do force migrate", host.GetName())
}
if err := w.reconcileHostStatefulSet(ctx, host, reconcileHostStatefulSetOpts); err != nil {
@@ -760,14 +765,14 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.ChiHost) error {
// Sometimes service needs some time to start after creation|modification before being accessible for usage
if version, err := w.pollHostForClickHouseVersion(ctx, host); err == nil {
w.a.V(1).
- WithEvent(host.CHI, eventActionReconcile, eventReasonReconcileCompleted).
- WithStatusAction(host.CHI).
+ WithEvent(host.GetCHI(), eventActionReconcile, eventReasonReconcileCompleted).
+ WithStatusAction(host.GetCHI()).
M(host).F().
Info("Reconcile Host completed. Host: %s ClickHouse version running: %s", host.GetName(), version)
} else {
w.a.V(1).
- WithEvent(host.CHI, eventActionReconcile, eventReasonReconcileCompleted).
- WithStatusAction(host.CHI).
+ WithEvent(host.GetCHI(), eventActionReconcile, eventReasonReconcileCompleted).
+ WithStatusAction(host.GetCHI()).
M(host).F().
Warning("Reconcile Host completed. Host: %s Failed to get ClickHouse version: %s", host.GetName(), version)
}
@@ -775,18 +780,18 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.ChiHost) error {
now := time.Now()
hostsCompleted := 0
hostsCount := 0
- host.CHI.EnsureStatus().HostCompleted()
- if host.CHI != nil && host.CHI.Status != nil {
- hostsCompleted = host.CHI.Status.GetHostsCompletedCount()
- hostsCount = host.CHI.Status.GetHostsCount()
+ host.GetCHI().EnsureStatus().HostCompleted()
+ if host.GetCHI() != nil && host.GetCHI().Status != nil {
+ hostsCompleted = host.GetCHI().Status.GetHostsCompletedCount()
+ hostsCount = host.GetCHI().Status.GetHostsCount()
}
w.a.V(1).
- WithEvent(host.CHI, eventActionProgress, eventReasonProgressHostsCompleted).
- WithStatusAction(host.CHI).
+ WithEvent(host.GetCHI(), eventActionProgress, eventReasonProgressHostsCompleted).
+ WithStatusAction(host.GetCHI()).
M(host).F().
Info("[now: %s] %s: %d of %d", now, eventReasonProgressHostsCompleted, hostsCompleted, hostsCount)
- _ = w.c.updateCHIObjectStatus(ctx, host.CHI, UpdateCHIStatusOptions{
+ _ = w.c.updateCHIObjectStatus(ctx, host.GetCHI(), UpdateCHIStatusOptions{
CopyCHIStatusOptions: api.CopyCHIStatusOptions{
MainFields: true,
},
@@ -806,21 +811,21 @@ func (w *worker) reconcilePDB(ctx context.Context, cluster *api.Cluster, pdb *po
pdb.ResourceVersion = cur.ResourceVersion
_, err := w.c.kubeClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Update(ctx, pdb, controller.NewUpdateOptions())
if err == nil {
- log.V(1).Info("PDB updated %s/%s", pdb.Namespace, pdb.Name)
+ log.V(1).Info("PDB updated: %s/%s", pdb.Namespace, pdb.Name)
} else {
- log.Error("FAILED to update PDB %s/%s err: %v", pdb.Namespace, pdb.Name, err)
+ log.Error("FAILED to update PDB: %s/%s err: %v", pdb.Namespace, pdb.Name, err)
return nil
}
case apiErrors.IsNotFound(err):
_, err := w.c.kubeClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Create(ctx, pdb, controller.NewCreateOptions())
if err == nil {
- log.V(1).Info("PDB created %s/%s", pdb.Namespace, pdb.Name)
+ log.V(1).Info("PDB created: %s/%s", pdb.Namespace, pdb.Name)
} else {
- log.Error("FAILED create PDB %s/%s err: %v", pdb.Namespace, pdb.Name, err)
+ log.Error("FAILED create PDB: %s/%s err: %v", pdb.Namespace, pdb.Name, err)
return err
}
default:
- log.Error("FAILED get PDB %s/%s err: %v", pdb.Namespace, pdb.Name, err)
+ log.Error("FAILED get PDB: %s/%s err: %v", pdb.Namespace, pdb.Name, err)
return err
}
@@ -887,21 +892,35 @@ func (w *worker) reconcileService(ctx context.Context, chi *api.ClickHouseInstal
if curService != nil {
// We have the Service - try to update it
+ w.a.V(1).M(chi).F().Info("Service found: %s/%s. Will try to update", service.Namespace, service.Name)
err = w.updateService(ctx, chi, curService, service)
}
if err != nil {
- // The Service is either not found or not updated. Try to recreate it
+ if apiErrors.IsNotFound(err) {
+ // The Service is either not found or not updated. Try to recreate it
+ w.a.V(1).M(chi).F().Info("Service: %s/%s not found. err: %v", service.Namespace, service.Name, err)
+ } else {
+ // The Service is either not found or not updated. Try to recreate it
+ w.a.WithEvent(chi, eventActionUpdate, eventReasonUpdateFailed).
+ WithStatusAction(chi).
+ WithStatusError(chi).
+ M(chi).F().
+ Error("Update Service: %s/%s failed with error: %v", service.Namespace, service.Name, err)
+ }
+
_ = w.c.deleteServiceIfExists(ctx, service.Namespace, service.Name)
err = w.createService(ctx, chi, service)
}
- if err != nil {
+ if err == nil {
+ w.a.V(1).M(chi).F().Info("Service reconcile successful: %s/%s", service.Namespace, service.Name)
+ } else {
w.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed).
WithStatusAction(chi).
WithStatusError(chi).
M(chi).F().
- Error("FAILED to reconcile Service: %s CHI: %s ", service.Name, chi.Name)
+ Error("FAILED to reconcile Service: %s/%s CHI: %s ", service.Namespace, service.Name, chi.Name)
}
return err
@@ -995,16 +1014,16 @@ func (w *worker) reconcileStatefulSet(
return nil
}
- newStatefulSet := host.DesiredStatefulSet
+ newStatefulSet := host.Runtime.DesiredStatefulSet
w.a.V(2).M(host).S().Info(util.NamespaceNameString(newStatefulSet.ObjectMeta))
defer w.a.V(2).M(host).E().Info(util.NamespaceNameString(newStatefulSet.ObjectMeta))
if host.GetReconcileAttributes().GetStatus() == api.ObjectStatusSame {
- w.a.V(2).M(host).F().Info("No need to reconcile THE SAME StatefulSet %s", util.NamespaceNameString(newStatefulSet.ObjectMeta))
+ w.a.V(2).M(host).F().Info("No need to reconcile THE SAME StatefulSet: %s", util.NamespaceNameString(newStatefulSet.ObjectMeta))
if register {
- host.CHI.EnsureStatus().HostUnchanged()
- _ = w.c.updateCHIObjectStatus(ctx, host.CHI, UpdateCHIStatusOptions{
+ host.GetCHI().EnsureStatus().HostUnchanged()
+ _ = w.c.updateCHIObjectStatus(ctx, host.GetCHI(), UpdateCHIStatusOptions{
CopyCHIStatusOptions: api.CopyCHIStatusOptions{
MainFields: true,
},
@@ -1014,12 +1033,12 @@ func (w *worker) reconcileStatefulSet(
}
// Check whether this object already exists in k8s
- host.CurStatefulSet, err = w.c.getStatefulSet(&newStatefulSet.ObjectMeta, false)
+ host.Runtime.CurStatefulSet, err = w.c.getStatefulSet(&newStatefulSet.ObjectMeta, false)
// Report diff to trace
if host.GetReconcileAttributes().GetStatus() == api.ObjectStatusModified {
- w.a.V(1).M(host).F().Info("Need to reconcile MODIFIED StatefulSet %s", util.NamespaceNameString(newStatefulSet.ObjectMeta))
- w.dumpStatefulSetDiff(host, host.CurStatefulSet, newStatefulSet)
+ w.a.V(1).M(host).F().Info("Need to reconcile MODIFIED StatefulSet: %s", util.NamespaceNameString(newStatefulSet.ObjectMeta))
+ w.dumpStatefulSetDiff(host, host.Runtime.CurStatefulSet, newStatefulSet)
}
opt := NewReconcileHostStatefulSetOptionsArr(opts...).First()
@@ -1038,7 +1057,7 @@ func (w *worker) reconcileStatefulSet(
}
// Host has to know current StatefulSet and Pod
- host.CurStatefulSet, _ = w.c.getStatefulSet(&newStatefulSet.ObjectMeta, false)
+ host.Runtime.CurStatefulSet, _ = w.c.getStatefulSet(&newStatefulSet.ObjectMeta, false)
return err
}
@@ -1062,7 +1081,7 @@ func (w *worker) reconcilePVCs(ctx context.Context, host *api.ChiHost, which api
return nil
}
- namespace := host.Address.Namespace
+ namespace := host.Runtime.Address.Namespace
w.a.V(2).M(host).S().Info("host %s/%s", namespace, host.GetName())
defer w.a.V(2).M(host).E().Info("host %s/%s", namespace, host.GetName())
@@ -1122,7 +1141,7 @@ func (w *worker) reconcilePVCFromVolumeMount(
// PVC available. Either fetched or not found and model created (from templates)
pvcName := "pvc-name-unknown-pvc-not-exist"
- namespace := host.Address.Namespace
+ namespace := host.Runtime.Address.Namespace
if pvc != nil {
pvcName = pvc.Name
@@ -1157,12 +1176,12 @@ func (w *worker) reconcilePVCFromVolumeMount(
switch pvcReconciled, err := w.reconcilePVC(ctx, pvc, host, volumeClaimTemplate); err {
case errNilPVC:
- w.a.M(host).F().Error("Unable to reconcile nil PVC %s/%s", namespace, pvcName)
+ w.a.M(host).F().Error("Unable to reconcile nil PVC: %s/%s", namespace, pvcName)
case nil:
w.task.registryReconciled.RegisterPVC(pvcReconciled.ObjectMeta)
default:
w.task.registryFailed.RegisterPVC(pvc.ObjectMeta)
- w.a.M(host).F().Error("Unable to reconcile PVC err: %v", pvc.Namespace, pvc.Name, err)
+ w.a.M(host).F().Error("Unable to reconcile PVC: %s/%s err: %v", pvc.Namespace, pvc.Name, err)
}
// It still may return data loss errors
@@ -1179,7 +1198,7 @@ func (w *worker) fetchPVC(
isModelCreated bool,
err error,
) {
- namespace := host.Address.Namespace
+ namespace := host.Runtime.Address.Namespace
// Try to find volumeClaimTemplate that is used to build this mounted volume
// Volume mount can point not only to volume claim, but also to other entities, such as ConfigMap, for example.
@@ -1215,7 +1234,7 @@ func (w *worker) fetchPVC(
// This is not an error per se, means PVC is not created (yet)?
w.a.V(2).M(host).Info("PVC (%s/%s/%s/%s) not found", namespace, host.GetName(), volumeMount.Name, pvcName)
- if w.task.creator.OperatorShouldCreatePVC(host, volumeClaimTemplate) {
+ if creator.OperatorShouldCreatePVC(host, volumeClaimTemplate) {
// Operator is in charge of PVCs
// Create PVC model.
pvc = w.task.creator.CreatePVC(pvcName, host, &volumeClaimTemplate.Spec)
diff --git a/pkg/controller/chi/worker-deleter.go b/pkg/controller/chi/worker-deleter.go
index 2355d78bc..18c96a54a 100644
--- a/pkg/controller/chi/worker-deleter.go
+++ b/pkg/controller/chi/worker-deleter.go
@@ -26,6 +26,7 @@ import (
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/controller"
model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ "github.com/altinity/clickhouse-operator/pkg/model/chi/normalizer"
"github.com/altinity/clickhouse-operator/pkg/util"
)
@@ -35,9 +36,15 @@ func (w *worker) clean(ctx context.Context, chi *api.ClickHouseInstallation) {
return
}
+ w.a.V(1).
+ WithEvent(chi, eventActionReconcile, eventReasonReconcileInProgress).
+ WithStatusAction(chi).
+ M(chi).F().
+ Info("remove items scheduled for deletion")
+
// Remove deleted items
- w.a.V(1).M(chi).F().Info("Failed to reconcile objects:\n%s", w.task.registryFailed)
- w.a.V(1).M(chi).F().Info("Reconciled objects:\n%s", w.task.registryReconciled)
+ w.a.V(1).M(chi).F().Info("List of objects which have failed to reconcile:\n%s", w.task.registryFailed)
+ w.a.V(1).M(chi).F().Info("List of successfully reconciled objects:\n%s", w.task.registryReconciled)
objs := w.c.discovery(ctx, chi)
need := w.task.registryReconciled
w.a.V(1).M(chi).F().Info("Existing objects:\n%s", objs)
@@ -48,12 +55,6 @@ func (w *worker) clean(ctx context.Context, chi *api.ClickHouseInstallation) {
util.WaitContextDoneOrTimeout(ctx, 1*time.Minute)
}
- w.a.V(1).
- WithEvent(chi, eventActionReconcile, eventReasonReconcileInProgress).
- WithStatusAction(chi).
- M(chi).F().
- Info("remove items scheduled for deletion")
-
chi.EnsureStatus().SyncHostTablesCreated()
}
@@ -125,9 +126,9 @@ func (w *worker) purgeStatefulSet(
m meta.ObjectMeta,
) int {
if shouldPurgeStatefulSet(chi, reconcileFailedObjs, m) {
- w.a.V(1).M(m).F().Info("Delete StatefulSet %s/%s", m.Namespace, m.Name)
+ w.a.V(1).M(m).F().Info("Delete StatefulSet: %s/%s", m.Namespace, m.Name)
if err := w.c.kubeClient.AppsV1().StatefulSets(m.Namespace).Delete(ctx, m.Name, controller.NewDeleteOptions()); err != nil {
- w.a.V(1).M(m).F().Error("FAILED to delete StatefulSet %s/%s, err: %v", m.Namespace, m.Name, err)
+ w.a.V(1).M(m).F().Error("FAILED to delete StatefulSet: %s/%s, err: %v", m.Namespace, m.Name, err)
}
return 1
}
@@ -142,9 +143,9 @@ func (w *worker) purgePVC(
) {
if shouldPurgePVC(chi, reconcileFailedObjs, m) {
if model.GetReclaimPolicy(m) == api.PVCReclaimPolicyDelete {
- w.a.V(1).M(m).F().Info("Delete PVC %s/%s", m.Namespace, m.Name)
+ w.a.V(1).M(m).F().Info("Delete PVC: %s/%s", m.Namespace, m.Name)
if err := w.c.kubeClient.CoreV1().PersistentVolumeClaims(m.Namespace).Delete(ctx, m.Name, controller.NewDeleteOptions()); err != nil {
- w.a.V(1).M(m).F().Error("FAILED to delete PVC %s/%s, err: %v", m.Namespace, m.Name, err)
+ w.a.V(1).M(m).F().Error("FAILED to delete PVC: %s/%s, err: %v", m.Namespace, m.Name, err)
}
}
}
@@ -157,9 +158,9 @@ func (w *worker) purgeConfigMap(
m meta.ObjectMeta,
) {
if shouldPurgeConfigMap(chi, reconcileFailedObjs, m) {
- w.a.V(1).M(m).F().Info("Delete ConfigMap %s/%s", m.Namespace, m.Name)
+ w.a.V(1).M(m).F().Info("Delete ConfigMap: %s/%s", m.Namespace, m.Name)
if err := w.c.kubeClient.CoreV1().ConfigMaps(m.Namespace).Delete(ctx, m.Name, controller.NewDeleteOptions()); err != nil {
- w.a.V(1).M(m).F().Error("FAILED to delete ConfigMap %s/%s, err: %v", m.Namespace, m.Name, err)
+ w.a.V(1).M(m).F().Error("FAILED to delete ConfigMap: %s/%s, err: %v", m.Namespace, m.Name, err)
}
}
}
@@ -171,9 +172,9 @@ func (w *worker) purgeService(
m meta.ObjectMeta,
) {
if shouldPurgeService(chi, reconcileFailedObjs, m) {
- w.a.V(1).M(m).F().Info("Delete Service %s/%s", m.Namespace, m.Name)
+ w.a.V(1).M(m).F().Info("Delete Service: %s/%s", m.Namespace, m.Name)
if err := w.c.kubeClient.CoreV1().Services(m.Namespace).Delete(ctx, m.Name, controller.NewDeleteOptions()); err != nil {
- w.a.V(1).M(m).F().Error("FAILED to delete Service %s/%s, err: %v", m.Namespace, m.Name, err)
+ w.a.V(1).M(m).F().Error("FAILED to delete Service: %s/%s, err: %v", m.Namespace, m.Name, err)
}
}
}
@@ -185,9 +186,9 @@ func (w *worker) purgeSecret(
m meta.ObjectMeta,
) {
if shouldPurgeSecret(chi, reconcileFailedObjs, m) {
- w.a.V(1).M(m).F().Info("Delete Secret %s/%s", m.Namespace, m.Name)
+ w.a.V(1).M(m).F().Info("Delete Secret: %s/%s", m.Namespace, m.Name)
if err := w.c.kubeClient.CoreV1().Secrets(m.Namespace).Delete(ctx, m.Name, controller.NewDeleteOptions()); err != nil {
- w.a.V(1).M(m).F().Error("FAILED to delete Secret %s/%s, err: %v", m.Namespace, m.Name, err)
+ w.a.V(1).M(m).F().Error("FAILED to delete Secret: %s/%s, err: %v", m.Namespace, m.Name, err)
}
}
}
@@ -199,9 +200,9 @@ func (w *worker) purgePDB(
m meta.ObjectMeta,
) {
if shouldPurgePDB(chi, reconcileFailedObjs, m) {
- w.a.V(1).M(m).F().Info("Delete PDB %s/%s", m.Namespace, m.Name)
+ w.a.V(1).M(m).F().Info("Delete PDB: %s/%s", m.Namespace, m.Name)
if err := w.c.kubeClient.PolicyV1().PodDisruptionBudgets(m.Namespace).Delete(ctx, m.Name, controller.NewDeleteOptions()); err != nil {
- w.a.V(1).M(m).F().Error("FAILED to delete PDB %s/%s, err: %v", m.Namespace, m.Name, err)
+ w.a.V(1).M(m).F().Error("FAILED to delete PDB: %s/%s, err: %v", m.Namespace, m.Name, err)
}
}
}
@@ -266,7 +267,7 @@ func (w *worker) deleteCHIProtocol(ctx context.Context, chi *api.ClickHouseInsta
defer w.a.V(2).M(chi).E().P()
var err error
- chi, err = w.normalizer.CreateTemplatedCHI(chi, model.NewNormalizerOptions())
+ chi, err = w.normalizer.CreateTemplatedCHI(chi, normalizer.NewOptions())
if err != nil {
w.a.WithEvent(chi, eventActionDelete, eventReasonDeleteFailed).
WithStatusError(chi).
@@ -289,7 +290,7 @@ func (w *worker) deleteCHIProtocol(ctx context.Context, chi *api.ClickHouseInsta
MainFields: true,
},
}); err != nil {
- w.a.V(1).M(chi).F().Error("UNABLE to write normalized CHI. err:%q", err)
+ w.a.V(1).M(chi).F().Error("UNABLE to write normalized CHI. err: %q", err)
return nil
}
@@ -341,7 +342,7 @@ func (w *worker) canDropReplica(host *api.ChiHost, opts ...*dropReplicaOptions)
// Replica's state has to be kept in Zookeeper for retained volumes.
// ClickHouse expects to have state of the non-empty replica in-place when replica rejoins.
if model.GetReclaimPolicy(pvc.ObjectMeta) == api.PVCReclaimPolicyRetain {
- w.a.V(1).F().Info("PVC %s/%s blocks drop replica. Reclaim policy: %s", api.PVCReclaimPolicyRetain.String())
+ w.a.V(1).F().Info("PVC: %s/%s blocks drop replica. Reclaim policy: %s", api.PVCReclaimPolicyRetain.String())
can = false
}
})
@@ -383,12 +384,12 @@ func (w *worker) dropReplica(ctx context.Context, hostToDrop *api.ChiHost, opts
}
if hostToDrop == nil {
- w.a.V(1).F().Error("FAILED to drop replica. Need to have host to drop. hostToDrop:%s", hostToDrop.GetName())
+ w.a.V(1).F().Error("FAILED to drop replica. Need to have host to drop. hostToDrop: %s", hostToDrop.GetName())
return nil
}
if !w.canDropReplica(hostToDrop, opts...) {
- w.a.V(1).F().Warning("CAN NOT drop replica. hostToDrop:%s", hostToDrop.GetName())
+ w.a.V(1).F().Warning("CAN NOT drop replica. hostToDrop: %s", hostToDrop.GetName())
return nil
}
@@ -399,7 +400,7 @@ func (w *worker) dropReplica(ctx context.Context, hostToDrop *api.ChiHost, opts
}
if hostToRunOn == nil {
- w.a.V(1).F().Error("FAILED to drop replica. hostToRunOn:%s, hostToDrop:%s", hostToRunOn.GetName(), hostToDrop.GetName())
+ w.a.V(1).F().Error("FAILED to drop replica. hostToRunOn: %s, hostToDrop: %s", hostToRunOn.GetName(), hostToDrop.GetName())
return nil
}
@@ -407,15 +408,15 @@ func (w *worker) dropReplica(ctx context.Context, hostToDrop *api.ChiHost, opts
if err == nil {
w.a.V(1).
- WithEvent(hostToRunOn.CHI, eventActionDelete, eventReasonDeleteCompleted).
- WithStatusAction(hostToRunOn.CHI).
+ WithEvent(hostToRunOn.GetCHI(), eventActionDelete, eventReasonDeleteCompleted).
+ WithStatusAction(hostToRunOn.GetCHI()).
M(hostToRunOn).F().
- Info("Drop replica host %s in cluster %s", hostToDrop.GetName(), hostToDrop.Address.ClusterName)
+ Info("Drop replica host: %s in cluster: %s", hostToDrop.GetName(), hostToDrop.Runtime.Address.ClusterName)
} else {
- w.a.WithEvent(hostToRunOn.CHI, eventActionDelete, eventReasonDeleteFailed).
- WithStatusError(hostToRunOn.CHI).
+ w.a.WithEvent(hostToRunOn.GetCHI(), eventActionDelete, eventReasonDeleteFailed).
+ WithStatusError(hostToRunOn.GetCHI()).
M(hostToRunOn).F().
- Error("FAILED to drop replica on host %s with error %v", hostToDrop.GetName(), err)
+ Error("FAILED to drop replica on host: %s with error: %v", hostToDrop.GetName(), err)
}
return err
@@ -435,16 +436,16 @@ func (w *worker) deleteTables(ctx context.Context, host *api.ChiHost) error {
if err == nil {
w.a.V(1).
- WithEvent(host.CHI, eventActionDelete, eventReasonDeleteCompleted).
- WithStatusAction(host.CHI).
+ WithEvent(host.GetCHI(), eventActionDelete, eventReasonDeleteCompleted).
+ WithStatusAction(host.GetCHI()).
M(host).F().
- Info("Deleted tables on host %s replica %d to shard %d in cluster %s",
- host.GetName(), host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("Deleted tables on host: %s replica: %d to shard: %d in cluster: %s",
+ host.GetName(), host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
} else {
- w.a.WithEvent(host.CHI, eventActionDelete, eventReasonDeleteFailed).
- WithStatusError(host.CHI).
+ w.a.WithEvent(host.GetCHI(), eventActionDelete, eventReasonDeleteFailed).
+ WithStatusError(host.GetCHI()).
M(host).F().
- Error("FAILED to delete tables on host %s with error %v", host.GetName(), err)
+ Error("FAILED to delete tables on host: %s with error: %v", host.GetName(), err)
}
return err
@@ -458,22 +459,22 @@ func (w *worker) deleteHost(ctx context.Context, chi *api.ClickHouseInstallation
return nil
}
- w.a.V(2).M(host).S().Info(host.Address.HostName)
- defer w.a.V(2).M(host).E().Info(host.Address.HostName)
+ w.a.V(2).M(host).S().Info(host.Runtime.Address.HostName)
+ defer w.a.V(2).M(host).E().Info(host.Runtime.Address.HostName)
w.a.V(1).
- WithEvent(host.CHI, eventActionDelete, eventReasonDeleteStarted).
- WithStatusAction(host.CHI).
+ WithEvent(host.GetCHI(), eventActionDelete, eventReasonDeleteStarted).
+ WithStatusAction(host.GetCHI()).
M(host).F().
- Info("Delete host %s/%s - started", host.Address.ClusterName, host.GetName())
+ Info("Delete host: %s/%s - started", host.Runtime.Address.ClusterName, host.GetName())
var err error
- if host.CurStatefulSet, err = w.c.getStatefulSet(host); err != nil {
- w.a.WithEvent(host.CHI, eventActionDelete, eventReasonDeleteCompleted).
- WithStatusAction(host.CHI).
+ if host.Runtime.CurStatefulSet, err = w.c.getStatefulSet(host); err != nil {
+ w.a.WithEvent(host.GetCHI(), eventActionDelete, eventReasonDeleteCompleted).
+ WithStatusAction(host.GetCHI()).
M(host).F().
- Info("Delete host %s/%s - completed StatefulSet not found - already deleted? err: %v",
- host.Address.ClusterName, host.GetName(), err)
+ Info("Delete host: %s/%s - completed StatefulSet not found - already deleted? err: %v",
+ host.Runtime.Address.ClusterName, host.GetName(), err)
return nil
}
@@ -498,15 +499,15 @@ func (w *worker) deleteHost(ctx context.Context, chi *api.ClickHouseInstallation
if err == nil {
w.a.V(1).
- WithEvent(host.CHI, eventActionDelete, eventReasonDeleteCompleted).
- WithStatusAction(host.CHI).
+ WithEvent(host.GetCHI(), eventActionDelete, eventReasonDeleteCompleted).
+ WithStatusAction(host.GetCHI()).
M(host).F().
- Info("Delete host %s/%s - completed", host.Address.ClusterName, host.GetName())
+ Info("Delete host: %s/%s - completed", host.Runtime.Address.ClusterName, host.GetName())
} else {
- w.a.WithEvent(host.CHI, eventActionDelete, eventReasonDeleteFailed).
- WithStatusError(host.CHI).
+ w.a.WithEvent(host.GetCHI(), eventActionDelete, eventReasonDeleteFailed).
+ WithStatusError(host.GetCHI()).
M(host).F().
- Error("FAILED Delete host %s/%s - completed", host.Address.ClusterName, host.GetName())
+ Error("FAILED Delete host: %s/%s - completed", host.Runtime.Address.ClusterName, host.GetName())
}
return err
@@ -524,10 +525,10 @@ func (w *worker) deleteShard(ctx context.Context, chi *api.ClickHouseInstallatio
defer w.a.V(2).M(shard).E().P()
w.a.V(1).
- WithEvent(shard.CHI, eventActionDelete, eventReasonDeleteStarted).
- WithStatusAction(shard.CHI).
+ WithEvent(shard.Runtime.CHI, eventActionDelete, eventReasonDeleteStarted).
+ WithStatusAction(shard.Runtime.CHI).
M(shard).F().
- Info("Delete shard %s/%s - started", shard.Address.Namespace, shard.Name)
+ Info("Delete shard: %s/%s - started", shard.Runtime.Address.Namespace, shard.Name)
// Delete Shard Service
_ = w.c.deleteServiceShard(ctx, shard)
@@ -538,10 +539,10 @@ func (w *worker) deleteShard(ctx context.Context, chi *api.ClickHouseInstallatio
})
w.a.V(1).
- WithEvent(shard.CHI, eventActionDelete, eventReasonDeleteCompleted).
- WithStatusAction(shard.CHI).
+ WithEvent(shard.Runtime.CHI, eventActionDelete, eventReasonDeleteCompleted).
+ WithStatusAction(shard.Runtime.CHI).
M(shard).F().
- Info("Delete shard %s/%s - completed", shard.Address.Namespace, shard.Name)
+ Info("Delete shard: %s/%s - completed", shard.Runtime.Address.Namespace, shard.Name)
return nil
}
@@ -558,10 +559,10 @@ func (w *worker) deleteCluster(ctx context.Context, chi *api.ClickHouseInstallat
defer w.a.V(2).M(cluster).E().P()
w.a.V(1).
- WithEvent(cluster.CHI, eventActionDelete, eventReasonDeleteStarted).
- WithStatusAction(cluster.CHI).
+ WithEvent(cluster.Runtime.CHI, eventActionDelete, eventReasonDeleteStarted).
+ WithStatusAction(cluster.Runtime.CHI).
M(cluster).F().
- Info("Delete cluster %s/%s - started", cluster.Address.Namespace, cluster.Name)
+ Info("Delete cluster: %s/%s - started", cluster.Runtime.Address.Namespace, cluster.Name)
// Delete ChkCluster Service
_ = w.c.deleteServiceCluster(ctx, cluster)
@@ -578,10 +579,10 @@ func (w *worker) deleteCluster(ctx context.Context, chi *api.ClickHouseInstallat
})
w.a.V(1).
- WithEvent(cluster.CHI, eventActionDelete, eventReasonDeleteCompleted).
- WithStatusAction(cluster.CHI).
+ WithEvent(cluster.Runtime.CHI, eventActionDelete, eventReasonDeleteCompleted).
+ WithStatusAction(cluster.Runtime.CHI).
M(cluster).F().
- Info("Delete cluster %s/%s - completed", cluster.Address.Namespace, cluster.Name)
+ Info("Delete cluster: %s/%s - completed", cluster.Runtime.Address.Namespace, cluster.Name)
return nil
}
@@ -608,25 +609,30 @@ func (w *worker) deleteCHI(ctx context.Context, old, new *api.ClickHouseInstalla
// described by CRD being deleted. This is may be unexpected and very painful situation,
// so in this case we should agree to delete CHI itself, but has to keep all CHI's child resources.
- var clear bool
+ var purge bool
crd, err := w.c.extClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, "clickhouseinstallations.clickhouse.altinity.com", controller.NewGetOptions())
if err == nil {
+ // CRD is in place
if crd.ObjectMeta.DeletionTimestamp.IsZero() {
- // CRD is not being deleted and operator can delete all child resources.
- w.a.V(1).M(new).F().Info("CRD %s/%s is not being deleted, operator will delete child resources", crd.Namespace, crd.Name)
- clear = true
+ // CRD is not being deleted. It is standard request to delete a CHI.
+ // Operator can delete all child resources.
+ w.a.V(1).M(new).F().Info("CRD: %s/%s is not being deleted, operator will delete child resources", crd.Namespace, crd.Name)
+ purge = true
} else {
- // CRD is being deleted. This may be a mistake, operator should not delete data
- w.a.V(1).M(new).F().Info("CRD %s/%s BEING DELETED, operator will NOT delete child resources", crd.Namespace, crd.Name)
- clear = false
+ // CRD is being deleted.
+ // In most cases, users do not expect to delete all CHIs with all their resources as along with CRD.
+ // Operator should not delete child resources - especially storage, such as PVCs and PVs
+ w.a.V(1).M(new).F().Info("CRD: %s/%s BEING DELETED, operator will NOT delete child resources", crd.Namespace, crd.Name)
+ purge = false
}
} else {
+ // No CRD is available
w.a.V(1).M(new).F().Error("unable to get CRD, got error: %v ", err)
- w.a.V(1).M(new).F().Info("will delete chi %s/%s", new.Namespace, new.Name)
- clear = true
+ w.a.V(1).M(new).F().Info("will delete chi with all resources: %s/%s", new.Namespace, new.Name)
+ purge = true
}
- if clear {
+ if purge {
cur, err := w.c.chopClient.ClickhouseV1().ClickHouseInstallations(new.Namespace).Get(ctx, new.Name, controller.NewGetOptions())
if cur == nil {
return false
@@ -642,17 +648,17 @@ func (w *worker) deleteCHI(ctx context.Context, old, new *api.ClickHouseInstalla
_ = w.deleteCHIProtocol(ctx, new)
} else {
- new.Attributes.SkipOwnerRef = true
+ new.EnsureRuntime().EnsureAttributes().SkipOwnerRef = true
_ = w.reconcileCHI(ctx, old, new)
}
// We need to uninstall finalizer in order to allow k8s to delete CHI resource
w.a.V(2).M(new).F().Info("uninstall finalizer")
if err := w.c.uninstallFinalizer(ctx, new); err != nil {
- w.a.V(1).M(new).F().Error("unable to uninstall finalizer: err:%v", err)
+ w.a.V(1).M(new).F().Error("unable to uninstall finalizer. err: %v", err)
}
- // CHI's child resources were deleted
+ // CHI delete completed
return true
}
diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go
index b33d307ac..787f655de 100644
--- a/pkg/controller/chi/worker.go
+++ b/pkg/controller/chi/worker.go
@@ -35,8 +35,11 @@ import (
"github.com/altinity/clickhouse-operator/pkg/chop"
"github.com/altinity/clickhouse-operator/pkg/controller"
model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ chiCreator "github.com/altinity/clickhouse-operator/pkg/model/chi/creator"
+ "github.com/altinity/clickhouse-operator/pkg/model/chi/normalizer"
"github.com/altinity/clickhouse-operator/pkg/model/chi/schemer"
"github.com/altinity/clickhouse-operator/pkg/model/clickhouse"
+ "github.com/altinity/clickhouse-operator/pkg/model/k8s"
"github.com/altinity/clickhouse-operator/pkg/util"
)
@@ -49,7 +52,7 @@ type worker struct {
a Announcer
//queue workqueue.RateLimitingInterface
queue queue.PriorityQueue
- normalizer *model.Normalizer
+ normalizer *normalizer.Normalizer
schemer *schemer.ClusterSchemer
start time.Time
task task
@@ -57,7 +60,7 @@ type worker struct {
// task represents context of a worker. This also can be called "a reconcile task"
type task struct {
- creator *model.Creator
+ creator *chiCreator.Creator
registryReconciled *model.Registry
registryFailed *model.Registry
cmUpdate time.Time
@@ -65,7 +68,7 @@ type task struct {
}
// newTask creates new context
-func newTask(creator *model.Creator) task {
+func newTask(creator *chiCreator.Creator) task {
return task{
creator: creator,
registryReconciled: model.NewRegistry(),
@@ -83,18 +86,20 @@ func (c *Controller) newWorker(q queue.PriorityQueue, sys bool) *worker {
start = start.Add(api.DefaultReconcileThreadsWarmup)
}
return &worker{
- c: c,
- a: NewAnnouncer().WithController(c),
- queue: q,
- normalizer: model.NewNormalizer(c.kubeClient),
- schemer: nil,
- start: start,
+ c: c,
+ a: NewAnnouncer().WithController(c),
+ queue: q,
+ normalizer: normalizer.NewNormalizer(func(namespace, name string) (*core.Secret, error) {
+ return c.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, controller.NewGetOptions())
+ }),
+ schemer: nil,
+ start: start,
}
}
// newContext creates new reconcile task
func (w *worker) newTask(chi *api.ClickHouseInstallation) {
- w.task = newTask(model.NewCreator(chi))
+ w.task = newTask(chiCreator.NewCreator(chi))
}
// timeToStart specifies time that operator does not accept changes
@@ -146,7 +151,7 @@ func (w *worker) shouldForceRestartHost(host *api.ChiHost) bool {
}
}
- if host.Version.IsUnknown() && podIsCrushed {
+ if host.Runtime.Version.IsUnknown() && podIsCrushed {
w.a.V(1).M(host).F().Info("Host with unknown version and in CrashLoopBackOff should be restarted. It most likely is unable to start due to bad config. Host: %s", host.GetName())
return true
}
@@ -279,7 +284,7 @@ func (w *worker) processReconcilePod(ctx context.Context, cmd *ReconcilePod) err
}
func (w *worker) processDropDns(ctx context.Context, cmd *DropDns) error {
- if chi, err := w.createCHIFromObjectMeta(cmd.initiator, false, model.NewNormalizerOptions()); err == nil {
+ if chi, err := w.createCHIFromObjectMeta(cmd.initiator, false, normalizer.NewOptions()); err == nil {
w.a.V(2).M(cmd.initiator).Info("flushing DNS for CHI %s", chi.Name)
_ = w.ensureClusterSchemer(chi.FirstHost()).CHIDropDnsCache(ctx, chi)
} else {
@@ -322,7 +327,7 @@ func (w *worker) processItem(ctx context.Context, item interface{}) error {
// normalize
func (w *worker) normalize(c *api.ClickHouseInstallation) *api.ClickHouseInstallation {
- chi, err := w.normalizer.CreateTemplatedCHI(c, model.NewNormalizerOptions())
+ chi, err := w.normalizer.CreateTemplatedCHI(c, normalizer.NewOptions())
if err != nil {
w.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed).
WithStatusError(chi).
@@ -332,7 +337,7 @@ func (w *worker) normalize(c *api.ClickHouseInstallation) *api.ClickHouseInstall
ips := w.c.getPodsIPs(chi)
w.a.V(1).M(chi).Info("IPs of the CHI normalizer %s/%s: len: %d %v", chi.Namespace, chi.Name, len(ips), ips)
- opts := model.NewNormalizerOptions()
+ opts := normalizer.NewOptions()
opts.DefaultUserAdditionalIPs = ips
chi, err = w.normalizer.CreateTemplatedCHI(c, opts)
@@ -378,11 +383,11 @@ func (w *worker) ensureFinalizer(ctx context.Context, chi *api.ClickHouseInstall
// updateEndpoints updates endpoints
func (w *worker) updateEndpoints(ctx context.Context, old, new *core.Endpoints) error {
- if chi, err := w.createCHIFromObjectMeta(&new.ObjectMeta, false, model.NewNormalizerOptions()); err == nil {
+ if chi, err := w.createCHIFromObjectMeta(&new.ObjectMeta, false, normalizer.NewOptions()); err == nil {
w.a.V(1).M(chi).Info("updating endpoints for CHI-1 %s", chi.Name)
ips := w.c.getPodsIPs(chi)
w.a.V(1).M(chi).Info("IPs of the CHI-1 update endpoints %s/%s: len: %d %v", chi.Namespace, chi.Name, len(ips), ips)
- opts := model.NewNormalizerOptions()
+ opts := normalizer.NewOptions()
opts.DefaultUserAdditionalIPs = ips
if chi, err := w.createCHIFromObjectMeta(&new.ObjectMeta, false, opts); err == nil {
w.a.V(1).M(chi).Info("Update users IPS-1")
@@ -587,6 +592,7 @@ func (w *worker) waitForIPAddresses(ctx context.Context, chi *api.ClickHouseInst
// No need to wait for stopped CHI
return
}
+ w.a.V(1).M(chi).F().S().Info("wait for IP addresses to be assigned to all pods")
start := time.Now()
w.c.poll(ctx, chi, func(c *api.ClickHouseInstallation, e error) bool {
if len(c.Status.GetPodIPs()) >= len(c.Status.GetPods()) {
@@ -596,11 +602,11 @@ func (w *worker) waitForIPAddresses(ctx context.Context, chi *api.ClickHouseInst
}
if time.Now().Sub(start) > 1*time.Minute {
// Stop polling
- w.a.V(1).M(c).Warning("Not all IP addresses are in place but time has elapsed")
+ w.a.V(1).M(c).Warning("not all IP addresses are in place but time has elapsed")
return false
}
// Continue polling
- w.a.V(1).M(c).Warning("Not all IP addresses are in place")
+ w.a.V(1).M(c).Warning("still waiting - not all IP addresses are in place yet")
return true
})
}
@@ -664,12 +670,14 @@ func (w *worker) finalizeReconcileAndMarkCompleted(ctx context.Context, _chi *ap
return
}
+ w.a.V(1).M(_chi).F().S().Info("finalize reconcile")
+
// Update CHI object
- if chi, err := w.createCHIFromObjectMeta(&_chi.ObjectMeta, true, model.NewNormalizerOptions()); err == nil {
+ if chi, err := w.createCHIFromObjectMeta(&_chi.ObjectMeta, true, normalizer.NewOptions()); err == nil {
w.a.V(1).M(chi).Info("updating endpoints for CHI-2 %s", chi.Name)
ips := w.c.getPodsIPs(chi)
w.a.V(1).M(chi).Info("IPs of the CHI-2 finalize reconcile %s/%s: len: %d %v", chi.Namespace, chi.Name, len(ips), ips)
- opts := model.NewNormalizerOptions()
+ opts := normalizer.NewOptions()
opts.DefaultUserAdditionalIPs = ips
if chi, err := w.createCHIFromObjectMeta(&_chi.ObjectMeta, true, opts); err == nil {
w.a.V(1).M(chi).Info("Update users IPS-2")
@@ -805,13 +813,13 @@ func (w *worker) walkHosts(ctx context.Context, chi *api.ClickHouseInstallation,
chi.WalkHosts(func(host *api.ChiHost) error {
switch {
case host.GetReconcileAttributes().IsAdd():
- w.a.M(host).Info("ADD host: %s", host.Address.CompactString())
+ w.a.M(host).Info("ADD host: %s", host.Runtime.Address.CompactString())
case host.GetReconcileAttributes().IsModify():
- w.a.M(host).Info("MODIFY host: %s", host.Address.CompactString())
+ w.a.M(host).Info("MODIFY host: %s", host.Runtime.Address.CompactString())
case host.GetReconcileAttributes().IsFound():
- w.a.M(host).Info("FOUND host: %s", host.Address.CompactString())
+ w.a.M(host).Info("FOUND host: %s", host.Runtime.Address.CompactString())
default:
- w.a.M(host).Info("UNKNOWN host: %s", host.Address.CompactString())
+ w.a.M(host).Info("UNKNOWN host: %s", host.Runtime.Address.CompactString())
}
return nil
})
@@ -833,7 +841,7 @@ func (w *worker) options(excludeHosts ...*api.ChiHost) *model.ClickHouseConfigFi
// Stringify
str := ""
for _, host := range excludeHosts {
- str += fmt.Sprintf("name: '%s' sts: '%s'", host.GetName(), host.Address.StatefulSet)
+ str += fmt.Sprintf("name: '%s' sts: '%s'", host.GetName(), host.Runtime.Address.StatefulSet)
}
opts := w.baseRemoteServersGeneratorOptions().ExcludeHosts(excludeHosts...)
@@ -854,7 +862,7 @@ func (w *worker) prepareHostStatefulSetWithStatus(ctx context.Context, host *api
// prepareDesiredStatefulSet prepares desired StatefulSet
func (w *worker) prepareDesiredStatefulSet(host *api.ChiHost, shutdown bool) {
- host.DesiredStatefulSet = w.task.creator.CreateStatefulSet(host, shutdown)
+ host.Runtime.DesiredStatefulSet = w.task.creator.CreateStatefulSet(host, shutdown)
}
type migrateTableOptions struct {
@@ -901,7 +909,9 @@ func (w *worker) migrateTables(ctx context.Context, host *api.ChiHost, opts ...*
if !w.shouldMigrateTables(host, opts...) {
w.a.V(1).
M(host).F().
- Info("No need to add tables on host %d to shard %d in cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info(
+ "No need to add tables on host %d to shard %d in cluster %s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return nil
}
@@ -910,7 +920,9 @@ func (w *worker) migrateTables(ctx context.Context, host *api.ChiHost, opts ...*
if w.shouldDropReplica(host, opts...) {
w.a.V(1).
M(host).F().
- Info("Need to drop replica on host %d to shard %d in cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info(
+ "Need to drop replica on host %d to shard %d in cluster %s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
w.dropReplica(ctx, host, &dropReplicaOptions{forceDrop: true})
}
@@ -918,7 +930,9 @@ func (w *worker) migrateTables(ctx context.Context, host *api.ChiHost, opts ...*
WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateStarted).
WithStatusAction(host.GetCHI()).
M(host).F().
- Info("Adding tables on shard/host:%d/%d cluster:%s", host.Address.ShardIndex, host.Address.ReplicaIndex, host.Address.ClusterName)
+ Info(
+ "Adding tables on shard/host:%d/%d cluster:%s",
+ host.Runtime.Address.ShardIndex, host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ClusterName)
err := w.ensureClusterSchemer(host).HostCreateTables(ctx, host)
if err == nil {
@@ -926,14 +940,16 @@ func (w *worker) migrateTables(ctx context.Context, host *api.ChiHost, opts ...*
WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateCompleted).
WithStatusAction(host.GetCHI()).
M(host).F().
- Info("Tables added successfully on shard/host:%d/%d cluster:%s", host.Address.ShardIndex, host.Address.ReplicaIndex, host.Address.ClusterName)
+ Info("Tables added successfully on shard/host:%d/%d cluster:%s",
+ host.Runtime.Address.ShardIndex, host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ClusterName)
host.GetCHI().EnsureStatus().PushHostTablesCreated(model.CreateFQDN(host))
} else {
w.a.V(1).
WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateFailed).
WithStatusAction(host.GetCHI()).
M(host).F().
- Error("ERROR add tables added successfully on shard/host:%d/%d cluster:%s err:%v", host.Address.ShardIndex, host.Address.ReplicaIndex, host.Address.ClusterName, err)
+ Error("ERROR add tables added successfully on shard/host:%d/%d cluster:%s err:%v",
+ host.Runtime.Address.ShardIndex, host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ClusterName, err)
}
return err
}
@@ -995,7 +1011,8 @@ func (w *worker) excludeHost(ctx context.Context, host *api.ChiHost) error {
w.a.V(1).
M(host).F().
- Info("Exclude from cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("Exclude from cluster host %d shard %d cluster %s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
_ = w.excludeHostFromService(ctx, host)
w.excludeHostFromClickHouseCluster(ctx, host)
@@ -1034,13 +1051,15 @@ func (w *worker) includeHost(ctx context.Context, host *api.ChiHost) error {
if !w.shouldIncludeHost(host) {
w.a.V(1).
M(host).F().
- Info("No need to include into cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("No need to include into cluster host %d shard %d cluster %s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return nil
}
w.a.V(1).
M(host).F().
- Info("Include into cluster host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("Include into cluster host %d shard %d cluster %s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
w.includeHostIntoClickHouseCluster(ctx, host)
_ = w.includeHostIntoService(ctx, host)
@@ -1083,7 +1102,8 @@ func (w *worker) excludeHostFromClickHouseCluster(ctx context.Context, host *api
if w.shouldWaitExcludeHost(host) {
w.a.V(1).
M(host).F().
- Info("going to exclude host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("going to exclude host %d shard %d cluster %s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
// Specify in options to exclude host from ClickHouse config file
options := w.options(host)
@@ -1117,33 +1137,39 @@ func (w *worker) shouldExcludeHost(host *api.ChiHost) bool {
case host.IsStopped():
w.a.V(1).
M(host).F().
- Info("Host is stopped, no need to exclude stopped host. Host/shard/cluster %d/%d/%s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("Host is stopped, no need to exclude stopped host. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return false
case host.GetShard().HostsCount() == 1:
w.a.V(1).
M(host).F().
- Info("Host is the only host in the shard (means no replication), no need to exclude. Host/shard/cluster %d/%d/%s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("Host is the only host in the shard (means no replication), no need to exclude. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return false
case w.shouldForceRestartHost(host):
w.a.V(1).
M(host).F().
- Info("Host should be restarted, need to exclude. Host/shard/cluster %d/%d/%s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("Host should be restarted, need to exclude. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return true
case host.GetReconcileAttributes().GetStatus() == api.ObjectStatusNew:
w.a.V(1).
M(host).F().
- Info("Host is new, no need to exclude. Host/shard/cluster %d/%d/%s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("Host is new, no need to exclude. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return false
case host.GetReconcileAttributes().GetStatus() == api.ObjectStatusSame:
w.a.V(1).
M(host).F().
- Info("Host is the same, would not be updated, no need to exclude. Host/shard/cluster %d/%d/%s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("Host is the same, would not be updated, no need to exclude. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return false
}
w.a.V(1).
M(host).F().
- Info("Host should be excluded. Host/shard/cluster %d/%d/%s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("Host should be excluded. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return true
}
@@ -1155,18 +1181,21 @@ func (w *worker) shouldWaitExcludeHost(host *api.ChiHost) bool {
case host.GetCHI().GetReconciling().IsReconcilingPolicyWait():
w.a.V(1).
M(host).F().
- Info("IsReconcilingPolicyWait() need to exclude host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("IsReconcilingPolicyWait() need to exclude host %d shard %d cluster %s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return true
case host.GetCHI().GetReconciling().IsReconcilingPolicyNoWait():
w.a.V(1).
M(host).F().
- Info("IsReconcilingPolicyNoWait() need NOT to exclude host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("IsReconcilingPolicyNoWait() need NOT to exclude host %d shard %d cluster %s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return false
}
w.a.V(1).
M(host).F().
- Info("fallback to operator's settings. host %d shard %d cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("fallback to operator's settings. host %d shard %d cluster %s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return chop.Config().Reconcile.Host.Wait.Exclude.Value()
}
@@ -1176,23 +1205,29 @@ func (w *worker) shouldWaitQueries(host *api.ChiHost) bool {
case host.GetReconcileAttributes().GetStatus() == api.ObjectStatusNew:
w.a.V(1).
M(host).F().
- Info("No need to wait for queries to complete, host is a new one. Host/shard/cluster %d/%d/%s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("No need to wait for queries to complete, host is a new one. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return false
case chop.Config().Reconcile.Host.Wait.Queries.Value():
w.a.V(1).
M(host).F().
- Info("Will wait for queries to complete according to CHOp config 'reconcile.host.wait.queries' setting. Host is not yet in the cluster. Host/shard/cluster %d/%d/%s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("Will wait for queries to complete according to CHOp config 'reconcile.host.wait.queries' setting. "+
+ "Host is not yet in the cluster. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return true
case host.GetCHI().GetReconciling().IsReconcilingPolicyWait():
w.a.V(1).
M(host).F().
- Info("Will wait for queries to complete according to CHI 'reconciling.policy' setting. Host is not yet in the cluster. Host/shard/cluster %d/%d/%s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("Will wait for queries to complete according to CHI 'reconciling.policy' setting. "+
+ "Host is not yet in the cluster. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return true
}
w.a.V(1).
M(host).F().
- Info("Will NOT wait for queries to complete on the host. Host/shard/cluster %d/%d/%s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName)
+ Info("Will NOT wait for queries to complete on the host. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return false
}
@@ -1241,7 +1276,7 @@ func (w *worker) waitHostNoActiveQueries(ctx context.Context, host *api.ChiHost)
}
// createCHIFromObjectMeta
-func (w *worker) createCHIFromObjectMeta(objectMeta *meta.ObjectMeta, isCHI bool, options *model.NormalizerOptions) (*api.ClickHouseInstallation, error) {
+func (w *worker) createCHIFromObjectMeta(objectMeta *meta.ObjectMeta, isCHI bool, options *normalizer.Options) (*api.ClickHouseInstallation, error) {
w.a.V(3).M(objectMeta).S().P()
defer w.a.V(3).M(objectMeta).E().P()
@@ -1324,7 +1359,9 @@ func (w *worker) updateService(
}
if curService.Spec.Type != targetService.Spec.Type {
- return fmt.Errorf("just recreate the service in case of service type change")
+ return fmt.Errorf(
+ "just recreate the service in case of service type change '%s'=>'%s'",
+ curService.Spec.Type, targetService.Spec.Type)
}
// Updating a Service is a complicated business
@@ -1413,13 +1450,9 @@ func (w *worker) updateService(
WithEvent(chi, eventActionUpdate, eventReasonUpdateCompleted).
WithStatusAction(chi).
M(chi).F().
- Info("Update Service %s/%s", newService.Namespace, newService.Name)
+ Info("Update Service success: %s/%s", newService.Namespace, newService.Name)
} else {
- w.a.WithEvent(chi, eventActionUpdate, eventReasonUpdateFailed).
- WithStatusAction(chi).
- WithStatusError(chi).
- M(chi).F().
- Error("Update Service %s/%s failed with error %v", newService.Namespace, newService.Name, err)
+ w.a.M(chi).F().Error("Update Service fail: %s/%s failed with error %v", newService.Namespace, newService.Name)
}
return err
@@ -1438,13 +1471,13 @@ func (w *worker) createService(ctx context.Context, chi *api.ClickHouseInstallat
WithEvent(chi, eventActionCreate, eventReasonCreateCompleted).
WithStatusAction(chi).
M(chi).F().
- Info("Create Service %s/%s", service.Namespace, service.Name)
+ Info("OK Create Service: %s/%s", service.Namespace, service.Name)
} else {
w.a.WithEvent(chi, eventActionCreate, eventReasonCreateFailed).
WithStatusAction(chi).
WithStatusError(chi).
M(chi).F().
- Error("Create Service %s/%s failed with error %v", service.Namespace, service.Name, err)
+ Error("FAILED Create Service: %s/%s err: %v", service.Namespace, service.Name, err)
}
return err
@@ -1477,7 +1510,7 @@ func (w *worker) createSecret(ctx context.Context, chi *api.ClickHouseInstallati
// getStatefulSetStatus gets StatefulSet status
func (w *worker) getStatefulSetStatus(host *api.ChiHost) api.ObjectStatus {
- meta := host.DesiredStatefulSet.ObjectMeta
+ meta := host.Runtime.DesiredStatefulSet.ObjectMeta
w.a.V(2).M(meta).S().Info(util.NamespaceNameString(meta))
defer w.a.V(2).M(meta).E().Info(util.NamespaceNameString(meta))
@@ -1545,22 +1578,22 @@ func (w *worker) createStatefulSet(ctx context.Context, host *api.ChiHost, regis
return nil
}
- statefulSet := host.DesiredStatefulSet
+ statefulSet := host.Runtime.DesiredStatefulSet
w.a.V(2).M(host).S().Info(util.NamespaceNameString(statefulSet.ObjectMeta))
defer w.a.V(2).M(host).E().Info(util.NamespaceNameString(statefulSet.ObjectMeta))
w.a.V(1).
- WithEvent(host.CHI, eventActionCreate, eventReasonCreateStarted).
- WithStatusAction(host.CHI).
+ WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateStarted).
+ WithStatusAction(host.GetCHI()).
M(host).F().
Info("Create StatefulSet %s/%s - started", statefulSet.Namespace, statefulSet.Name)
action := w.c.createStatefulSet(ctx, host)
if register {
- host.CHI.EnsureStatus().HostAdded()
- _ = w.c.updateCHIObjectStatus(ctx, host.CHI, UpdateCHIStatusOptions{
+ host.GetCHI().EnsureStatus().HostAdded()
+ _ = w.c.updateCHIObjectStatus(ctx, host.GetCHI(), UpdateCHIStatusOptions{
CopyCHIStatusOptions: api.CopyCHIStatusOptions{
MainFields: true,
},
@@ -1570,21 +1603,21 @@ func (w *worker) createStatefulSet(ctx context.Context, host *api.ChiHost, regis
switch action {
case nil:
w.a.V(1).
- WithEvent(host.CHI, eventActionCreate, eventReasonCreateCompleted).
- WithStatusAction(host.CHI).
+ WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateCompleted).
+ WithStatusAction(host.GetCHI()).
M(host).F().
Info("Create StatefulSet %s/%s - completed", statefulSet.Namespace, statefulSet.Name)
return nil
case errCRUDAbort:
- w.a.WithEvent(host.CHI, eventActionCreate, eventReasonCreateFailed).
- WithStatusAction(host.CHI).
- WithStatusError(host.CHI).
+ w.a.WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateFailed).
+ WithStatusAction(host.GetCHI()).
+ WithStatusError(host.GetCHI()).
M(host).F().
Error("Create StatefulSet %s/%s - failed with error %v", statefulSet.Namespace, statefulSet.Name, action)
return action
case errCRUDIgnore:
- w.a.WithEvent(host.CHI, eventActionCreate, eventReasonCreateFailed).
- WithStatusAction(host.CHI).
+ w.a.WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateFailed).
+ WithStatusAction(host.GetCHI()).
M(host).F().
Warning("Create StatefulSet %s/%s - error ignored", statefulSet.Namespace, statefulSet.Name)
return nil
@@ -1649,8 +1682,8 @@ func (w *worker) updateStatefulSet(ctx context.Context, host *api.ChiHost, regis
}
// Helpers
- newStatefulSet := host.DesiredStatefulSet
- curStatefulSet := host.CurStatefulSet
+ newStatefulSet := host.Runtime.DesiredStatefulSet
+ curStatefulSet := host.Runtime.CurStatefulSet
w.a.V(2).M(host).S().Info(newStatefulSet.Name)
defer w.a.V(2).M(host).E().Info(newStatefulSet.Name)
@@ -1659,8 +1692,8 @@ func (w *worker) updateStatefulSet(ctx context.Context, host *api.ChiHost, regis
name := newStatefulSet.Name
w.a.V(1).
- WithEvent(host.CHI, eventActionCreate, eventReasonCreateStarted).
- WithStatusAction(host.CHI).
+ WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateStarted).
+ WithStatusAction(host.GetCHI()).
M(host).F().
Info("Update StatefulSet(%s/%s) - started", namespace, name)
@@ -1670,23 +1703,23 @@ func (w *worker) updateStatefulSet(ctx context.Context, host *api.ChiHost, regis
}
action := errCRUDRecreate
- if model.IsStatefulSetReady(curStatefulSet) {
+ if k8s.IsStatefulSetReady(curStatefulSet) {
action = w.c.updateStatefulSet(ctx, curStatefulSet, newStatefulSet, host)
}
switch action {
case nil:
if register {
- host.CHI.EnsureStatus().HostUpdated()
- _ = w.c.updateCHIObjectStatus(ctx, host.CHI, UpdateCHIStatusOptions{
+ host.GetCHI().EnsureStatus().HostUpdated()
+ _ = w.c.updateCHIObjectStatus(ctx, host.GetCHI(), UpdateCHIStatusOptions{
CopyCHIStatusOptions: api.CopyCHIStatusOptions{
MainFields: true,
},
})
}
w.a.V(1).
- WithEvent(host.CHI, eventActionUpdate, eventReasonUpdateCompleted).
- WithStatusAction(host.CHI).
+ WithEvent(host.GetCHI(), eventActionUpdate, eventReasonUpdateCompleted).
+ WithStatusAction(host.GetCHI()).
M(host).F().
Info("Update StatefulSet(%s/%s) - completed", namespace, name)
return nil
@@ -1697,8 +1730,8 @@ func (w *worker) updateStatefulSet(ctx context.Context, host *api.ChiHost, regis
w.a.V(1).M(host).Info("Update StatefulSet(%s/%s) - got ignore. Ignore", namespace, name)
return nil
case errCRUDRecreate:
- w.a.WithEvent(host.CHI, eventActionUpdate, eventReasonUpdateInProgress).
- WithStatusAction(host.CHI).
+ w.a.WithEvent(host.GetCHI(), eventActionUpdate, eventReasonUpdateInProgress).
+ WithStatusAction(host.GetCHI()).
M(host).F().
Info("Update StatefulSet(%s/%s) switch from Update to Recreate", namespace, name)
w.dumpStatefulSetDiff(host, curStatefulSet, newStatefulSet)
@@ -1812,7 +1845,7 @@ func (w *worker) ensureClusterSchemer(host *api.ChiHost) *schemer.ClusterSchemer
case api.ChSchemeHTTPS:
clusterConnectionParams.Port = int(host.HTTPSPort)
}
- w.schemer = schemer.NewClusterSchemer(clusterConnectionParams, host.Version)
+ w.schemer = schemer.NewClusterSchemer(clusterConnectionParams, host.Runtime.Version)
return w.schemer
}
diff --git a/pkg/controller/chk/reconciler.go b/pkg/controller/chk/reconciler.go
index 6cb60ffcd..e94149907 100644
--- a/pkg/controller/chk/reconciler.go
+++ b/pkg/controller/chk/reconciler.go
@@ -32,6 +32,7 @@ import (
log "github.com/altinity/clickhouse-operator/pkg/announcer"
apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/model/chi/normalizer"
// apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
model "github.com/altinity/clickhouse-operator/pkg/model/chk"
"github.com/altinity/clickhouse-operator/pkg/util"
@@ -60,12 +61,13 @@ func (r *ChkReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R
new = &apiChk.ClickHouseKeeperInstallation{}
if err := r.Get(ctx, req.NamespacedName, new); err != nil {
if apiErrors.IsNotFound(err) {
- // Request object not found, could have been deleted after reconcile
- // request. Owned objects are automatically garbage collected. For
- // additional cleanup logic use finalizers.
+ // Request object not found, could have been deleted after reconcile request.
+ // Owned objects are automatically garbage collected.
+ // For additional cleanup logic use finalizers.
// Return and don't requeue
return ctrl.Result{}, nil
}
+ // Return and requeue
return ctrl.Result{}, err
}
@@ -108,12 +110,13 @@ func (r *ChkReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R
dummy := &apiChk.ClickHouseKeeperInstallation{}
if err := r.Get(ctx, req.NamespacedName, dummy); err != nil {
if apiErrors.IsNotFound(err) {
- // Request object not found, could have been deleted after reconcile
- // request. Owned objects are automatically garbage collected. For
- // additional cleanup logic use finalizers.
+ // Request object not found, could have been deleted after reconcile request.
+ // Owned objects are automatically garbage collected.
+ // For additional cleanup logic use finalizers.
// Return and don't requeue
return ctrl.Result{}, nil
}
+ // Return and requeue
return ctrl.Result{}, err
}
@@ -302,7 +305,7 @@ func (r *ChkReconciler) reconcileClusterStatus(chk *apiChk.ClickHouseKeeperInsta
// normalize
func (r *ChkReconciler) normalize(c *apiChk.ClickHouseKeeperInstallation) *apiChk.ClickHouseKeeperInstallation {
- chk, err := model.NewNormalizer().CreateTemplatedCHK(c, model.NewNormalizerOptions())
+ chk, err := model.NewNormalizer().CreateTemplatedCHK(c, normalizer.NewOptions())
if err != nil {
log.V(1).
M(chk).F().
diff --git a/pkg/model/chi/action_plan.go b/pkg/model/chi/action_plan.go
index 603efd01a..c25d066ce 100644
--- a/pkg/model/chi/action_plan.go
+++ b/pkg/model/chi/action_plan.go
@@ -19,14 +19,14 @@ import (
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
- "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/util"
)
// ActionPlan is an action plan with list of differences between two CHIs
type ActionPlan struct {
- old *v1.ClickHouseInstallation
- new *v1.ClickHouseInstallation
+ old *api.ClickHouseInstallation
+ new *api.ClickHouseInstallation
specDiff *messagediff.Diff
specEqual bool
@@ -45,7 +45,7 @@ type ActionPlan struct {
}
// NewActionPlan makes new ActionPlan out of two CHIs
-func NewActionPlan(old, new *v1.ClickHouseInstallation) *ActionPlan {
+func NewActionPlan(old, new *api.ClickHouseInstallation) *ActionPlan {
ap := &ActionPlan{
old: old,
new: new,
@@ -57,21 +57,21 @@ func NewActionPlan(old, new *v1.ClickHouseInstallation) *ActionPlan {
ap.deletionTimestampEqual = ap.timestampEqual(ap.old.DeletionTimestamp, ap.new.DeletionTimestamp)
ap.deletionTimestampDiff, _ = messagediff.DeepDiff(ap.old.DeletionTimestamp, ap.new.DeletionTimestamp)
ap.finalizersDiff, ap.finalizersEqual = messagediff.DeepDiff(ap.old.Finalizers, ap.new.Finalizers)
- ap.attributesDiff, ap.attributesEqual = messagediff.DeepDiff(ap.old.Attributes, ap.new.Attributes)
+ ap.attributesDiff, ap.attributesEqual = messagediff.DeepDiff(ap.old.EnsureRuntime().EnsureAttributes(), ap.new.EnsureRuntime().EnsureAttributes())
} else if old == nil {
ap.specDiff, ap.specEqual = messagediff.DeepDiff(nil, ap.new.Spec)
ap.labelsDiff, ap.labelsEqual = messagediff.DeepDiff(nil, ap.new.Labels)
ap.deletionTimestampEqual = ap.timestampEqual(nil, ap.new.DeletionTimestamp)
ap.deletionTimestampDiff, _ = messagediff.DeepDiff(nil, ap.new.DeletionTimestamp)
ap.finalizersDiff, ap.finalizersEqual = messagediff.DeepDiff(nil, ap.new.Finalizers)
- ap.attributesDiff, ap.attributesEqual = messagediff.DeepDiff(nil, ap.new.Attributes)
+ ap.attributesDiff, ap.attributesEqual = messagediff.DeepDiff(nil, ap.new.EnsureRuntime().EnsureAttributes())
} else if new == nil {
ap.specDiff, ap.specEqual = messagediff.DeepDiff(ap.old.Spec, nil)
ap.labelsDiff, ap.labelsEqual = messagediff.DeepDiff(ap.old.Labels, nil)
ap.deletionTimestampEqual = ap.timestampEqual(ap.old.DeletionTimestamp, nil)
ap.deletionTimestampDiff, _ = messagediff.DeepDiff(ap.old.DeletionTimestamp, nil)
ap.finalizersDiff, ap.finalizersEqual = messagediff.DeepDiff(ap.old.Finalizers, nil)
- ap.attributesDiff, ap.attributesEqual = messagediff.DeepDiff(ap.old.Attributes, nil)
+ ap.attributesDiff, ap.attributesEqual = messagediff.DeepDiff(ap.old.EnsureRuntime().EnsureAttributes(), nil)
} else {
// Both are nil
ap.specDiff = nil
@@ -243,13 +243,13 @@ func (ap *ActionPlan) GetNewHostsNum() int {
func (ap *ActionPlan) GetRemovedHostsNum() int {
var count int
ap.WalkRemoved(
- func(cluster *v1.Cluster) {
+ func(cluster *api.Cluster) {
count += cluster.HostsCount()
},
- func(shard *v1.ChiShard) {
+ func(shard *api.ChiShard) {
count += shard.HostsCount()
},
- func(host *v1.ChiHost) {
+ func(host *api.ChiHost) {
count++
},
)
@@ -258,30 +258,30 @@ func (ap *ActionPlan) GetRemovedHostsNum() int {
// WalkRemoved walk removed cluster items
func (ap *ActionPlan) WalkRemoved(
- clusterFunc func(cluster *v1.Cluster),
- shardFunc func(shard *v1.ChiShard),
- hostFunc func(host *v1.ChiHost),
+ clusterFunc func(cluster *api.Cluster),
+ shardFunc func(shard *api.ChiShard),
+ hostFunc func(host *api.ChiHost),
) {
// TODO refactor to map[string]object handling, instead of slice
for path := range ap.specDiff.Removed {
switch ap.specDiff.Removed[path].(type) {
- case v1.Cluster:
- cluster := ap.specDiff.Removed[path].(v1.Cluster)
+ case api.Cluster:
+ cluster := ap.specDiff.Removed[path].(api.Cluster)
clusterFunc(&cluster)
- case v1.ChiShard:
- shard := ap.specDiff.Removed[path].(v1.ChiShard)
+ case api.ChiShard:
+ shard := ap.specDiff.Removed[path].(api.ChiShard)
shardFunc(&shard)
- case v1.ChiHost:
- host := ap.specDiff.Removed[path].(v1.ChiHost)
+ case api.ChiHost:
+ host := ap.specDiff.Removed[path].(api.ChiHost)
hostFunc(&host)
- case *v1.Cluster:
- cluster := ap.specDiff.Removed[path].(*v1.Cluster)
+ case *api.Cluster:
+ cluster := ap.specDiff.Removed[path].(*api.Cluster)
clusterFunc(cluster)
- case *v1.ChiShard:
- shard := ap.specDiff.Removed[path].(*v1.ChiShard)
+ case *api.ChiShard:
+ shard := ap.specDiff.Removed[path].(*api.ChiShard)
shardFunc(shard)
- case *v1.ChiHost:
- host := ap.specDiff.Removed[path].(*v1.ChiHost)
+ case *api.ChiHost:
+ host := ap.specDiff.Removed[path].(*api.ChiHost)
hostFunc(host)
}
}
@@ -289,30 +289,30 @@ func (ap *ActionPlan) WalkRemoved(
// WalkAdded walk added cluster items
func (ap *ActionPlan) WalkAdded(
- clusterFunc func(cluster *v1.Cluster),
- shardFunc func(shard *v1.ChiShard),
- hostFunc func(host *v1.ChiHost),
+ clusterFunc func(cluster *api.Cluster),
+ shardFunc func(shard *api.ChiShard),
+ hostFunc func(host *api.ChiHost),
) {
// TODO refactor to map[string]object handling, instead of slice
for path := range ap.specDiff.Added {
switch ap.specDiff.Added[path].(type) {
- case v1.Cluster:
- cluster := ap.specDiff.Added[path].(v1.Cluster)
+ case api.Cluster:
+ cluster := ap.specDiff.Added[path].(api.Cluster)
clusterFunc(&cluster)
- case v1.ChiShard:
- shard := ap.specDiff.Added[path].(v1.ChiShard)
+ case api.ChiShard:
+ shard := ap.specDiff.Added[path].(api.ChiShard)
shardFunc(&shard)
- case v1.ChiHost:
- host := ap.specDiff.Added[path].(v1.ChiHost)
+ case api.ChiHost:
+ host := ap.specDiff.Added[path].(api.ChiHost)
hostFunc(&host)
- case *v1.Cluster:
- cluster := ap.specDiff.Added[path].(*v1.Cluster)
+ case *api.Cluster:
+ cluster := ap.specDiff.Added[path].(*api.Cluster)
clusterFunc(cluster)
- case *v1.ChiShard:
- shard := ap.specDiff.Added[path].(*v1.ChiShard)
+ case *api.ChiShard:
+ shard := ap.specDiff.Added[path].(*api.ChiShard)
shardFunc(shard)
- case *v1.ChiHost:
- host := ap.specDiff.Added[path].(*v1.ChiHost)
+ case *api.ChiHost:
+ host := ap.specDiff.Added[path].(*api.ChiHost)
hostFunc(host)
}
}
@@ -320,30 +320,30 @@ func (ap *ActionPlan) WalkAdded(
// WalkModified walk modified cluster items
func (ap *ActionPlan) WalkModified(
- clusterFunc func(cluster *v1.Cluster),
- shardFunc func(shard *v1.ChiShard),
- hostFunc func(host *v1.ChiHost),
+ clusterFunc func(cluster *api.Cluster),
+ shardFunc func(shard *api.ChiShard),
+ hostFunc func(host *api.ChiHost),
) {
// TODO refactor to map[string]object handling, instead of slice
for path := range ap.specDiff.Modified {
switch ap.specDiff.Modified[path].(type) {
- case v1.Cluster:
- cluster := ap.specDiff.Modified[path].(v1.Cluster)
+ case api.Cluster:
+ cluster := ap.specDiff.Modified[path].(api.Cluster)
clusterFunc(&cluster)
- case v1.ChiShard:
- shard := ap.specDiff.Modified[path].(v1.ChiShard)
+ case api.ChiShard:
+ shard := ap.specDiff.Modified[path].(api.ChiShard)
shardFunc(&shard)
- case v1.ChiHost:
- host := ap.specDiff.Modified[path].(v1.ChiHost)
+ case api.ChiHost:
+ host := ap.specDiff.Modified[path].(api.ChiHost)
hostFunc(&host)
- case *v1.Cluster:
- cluster := ap.specDiff.Modified[path].(*v1.Cluster)
+ case *api.Cluster:
+ cluster := ap.specDiff.Modified[path].(*api.Cluster)
clusterFunc(cluster)
- case *v1.ChiShard:
- shard := ap.specDiff.Modified[path].(*v1.ChiShard)
+ case *api.ChiShard:
+ shard := ap.specDiff.Modified[path].(*api.ChiShard)
shardFunc(shard)
- case *v1.ChiHost:
- host := ap.specDiff.Modified[path].(*v1.ChiHost)
+ case *api.ChiHost:
+ host := ap.specDiff.Modified[path].(*api.ChiHost)
hostFunc(host)
}
}
diff --git a/pkg/model/chi/affinity.go b/pkg/model/chi/affinity.go
index a0f95e537..45b07eff9 100644
--- a/pkg/model/chi/affinity.go
+++ b/pkg/model/chi/affinity.go
@@ -17,7 +17,7 @@ package chi
import (
"gopkg.in/d4l3k/messagediff.v1"
- "k8s.io/api/core/v1"
+ core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
@@ -26,7 +26,7 @@ import (
)
// NewAffinity creates new Affinity struct
-func NewAffinity(template *api.ChiPodTemplate) *v1.Affinity {
+func NewAffinity(template *api.ChiPodTemplate) *core.Affinity {
// Pod node affinity scheduling rules.
nodeAffinity := newNodeAffinity(template)
// Pod affinity scheduling rules. Ex.: co-locate this pod in the same node, zone, etc
@@ -40,7 +40,7 @@ func NewAffinity(template *api.ChiPodTemplate) *v1.Affinity {
return nil
}
- return &v1.Affinity{
+ return &core.Affinity{
NodeAffinity: nodeAffinity,
PodAffinity: podAffinity,
PodAntiAffinity: podAntiAffinity,
@@ -48,7 +48,7 @@ func NewAffinity(template *api.ChiPodTemplate) *v1.Affinity {
}
// MergeAffinity merges from src into dst and returns dst
-func MergeAffinity(dst *v1.Affinity, src *v1.Affinity) *v1.Affinity {
+func MergeAffinity(dst *core.Affinity, src *core.Affinity) *core.Affinity {
if src == nil {
// Nothing to merge from
return dst
@@ -57,7 +57,7 @@ func MergeAffinity(dst *v1.Affinity, src *v1.Affinity) *v1.Affinity {
created := false
if dst == nil {
// No receiver specified, allocate a new one
- dst = &v1.Affinity{}
+ dst = &core.Affinity{}
created = true
}
@@ -75,36 +75,36 @@ func MergeAffinity(dst *v1.Affinity, src *v1.Affinity) *v1.Affinity {
}
// newNodeAffinity
-func newNodeAffinity(template *api.ChiPodTemplate) *v1.NodeAffinity {
+func newNodeAffinity(template *api.ChiPodTemplate) *core.NodeAffinity {
if template.Zone.Key == "" {
return nil
}
- return &v1.NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
- NodeSelectorTerms: []v1.NodeSelectorTerm{
+ return &core.NodeAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{
+ NodeSelectorTerms: []core.NodeSelectorTerm{
{
// A list of node selector requirements by node's labels.
- MatchExpressions: []v1.NodeSelectorRequirement{
+ MatchExpressions: []core.NodeSelectorRequirement{
{
Key: template.Zone.Key,
- Operator: v1.NodeSelectorOpIn,
+ Operator: core.NodeSelectorOpIn,
Values: template.Zone.Values,
},
},
// A list of node selector requirements by node's fields.
- //MatchFields: []v1.NodeSelectorRequirement{
- // v1.NodeSelectorRequirement{},
+ //MatchFields: []core.NodeSelectorRequirement{
+ // core.NodeSelectorRequirement{},
//},
},
},
},
- // PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{},
+ // PreferredDuringSchedulingIgnoredDuringExecution: []core.PreferredSchedulingTerm{},
}
}
-func getNodeSelectorTerms(affinity *v1.NodeAffinity) []v1.NodeSelectorTerm {
+func getNodeSelectorTerms(affinity *core.NodeAffinity) []core.NodeSelectorTerm {
if affinity == nil {
return nil
}
@@ -115,7 +115,7 @@ func getNodeSelectorTerms(affinity *v1.NodeAffinity) []v1.NodeSelectorTerm {
return affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
}
-func getNodeSelectorTerm(affinity *v1.NodeAffinity, i int) *v1.NodeSelectorTerm {
+func getNodeSelectorTerm(affinity *core.NodeAffinity, i int) *core.NodeSelectorTerm {
terms := getNodeSelectorTerms(affinity)
if terms == nil {
return nil
@@ -126,17 +126,17 @@ func getNodeSelectorTerm(affinity *v1.NodeAffinity, i int) *v1.NodeSelectorTerm
return &terms[i]
}
-func appendNodeSelectorTerm(affinity *v1.NodeAffinity, term *v1.NodeSelectorTerm) *v1.NodeAffinity {
+func appendNodeSelectorTerm(affinity *core.NodeAffinity, term *core.NodeSelectorTerm) *core.NodeAffinity {
if term == nil {
return affinity
}
// Ensure path to terms exists
if affinity == nil {
- affinity = &v1.NodeAffinity{}
+ affinity = &core.NodeAffinity{}
}
if affinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
- affinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{}
+ affinity.RequiredDuringSchedulingIgnoredDuringExecution = &core.NodeSelector{}
}
affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(
@@ -147,7 +147,7 @@ func appendNodeSelectorTerm(affinity *v1.NodeAffinity, term *v1.NodeSelectorTerm
return affinity
}
-func getPreferredSchedulingTerms(affinity *v1.NodeAffinity) []v1.PreferredSchedulingTerm {
+func getPreferredSchedulingTerms(affinity *core.NodeAffinity) []core.PreferredSchedulingTerm {
if affinity == nil {
return nil
}
@@ -155,7 +155,7 @@ func getPreferredSchedulingTerms(affinity *v1.NodeAffinity) []v1.PreferredSchedu
return affinity.PreferredDuringSchedulingIgnoredDuringExecution
}
-func getPreferredSchedulingTerm(affinity *v1.NodeAffinity, i int) *v1.PreferredSchedulingTerm {
+func getPreferredSchedulingTerm(affinity *core.NodeAffinity, i int) *core.PreferredSchedulingTerm {
terms := getPreferredSchedulingTerms(affinity)
if terms == nil {
return nil
@@ -166,14 +166,14 @@ func getPreferredSchedulingTerm(affinity *v1.NodeAffinity, i int) *v1.PreferredS
return &terms[i]
}
-func appendPreferredSchedulingTerm(affinity *v1.NodeAffinity, term *v1.PreferredSchedulingTerm) *v1.NodeAffinity {
+func appendPreferredSchedulingTerm(affinity *core.NodeAffinity, term *core.PreferredSchedulingTerm) *core.NodeAffinity {
if term == nil {
return affinity
}
// Ensure path to terms exists
if affinity == nil {
- affinity = &v1.NodeAffinity{}
+ affinity = &core.NodeAffinity{}
}
affinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
@@ -185,7 +185,7 @@ func appendPreferredSchedulingTerm(affinity *v1.NodeAffinity, term *v1.Preferred
}
// mergeNodeAffinity
-func mergeNodeAffinity(dst *v1.NodeAffinity, src *v1.NodeAffinity) *v1.NodeAffinity {
+func mergeNodeAffinity(dst *core.NodeAffinity, src *core.NodeAffinity) *core.NodeAffinity {
if src == nil {
// Nothing to merge from
return dst
@@ -229,10 +229,10 @@ func mergeNodeAffinity(dst *v1.NodeAffinity, src *v1.NodeAffinity) *v1.NodeAffin
}
// newPodAffinity
-func newPodAffinity(template *api.ChiPodTemplate) *v1.PodAffinity {
+func newPodAffinity(template *api.ChiPodTemplate) *core.PodAffinity {
// Return podAffinity only in case something was added into it
added := false
- podAffinity := &v1.PodAffinity{}
+ podAffinity := &core.PodAffinity{}
for i := range template.PodDistribution {
podDistribution := &template.PodDistribution[i]
@@ -330,7 +330,7 @@ func newPodAffinity(template *api.ChiPodTemplate) *v1.PodAffinity {
return nil
}
-func getPodAffinityTerms(affinity *v1.PodAffinity) []v1.PodAffinityTerm {
+func getPodAffinityTerms(affinity *core.PodAffinity) []core.PodAffinityTerm {
if affinity == nil {
return nil
}
@@ -338,7 +338,7 @@ func getPodAffinityTerms(affinity *v1.PodAffinity) []v1.PodAffinityTerm {
return affinity.RequiredDuringSchedulingIgnoredDuringExecution
}
-func getPodAffinityTerm(affinity *v1.PodAffinity, i int) *v1.PodAffinityTerm {
+func getPodAffinityTerm(affinity *core.PodAffinity, i int) *core.PodAffinityTerm {
terms := getPodAffinityTerms(affinity)
if terms == nil {
return nil
@@ -349,14 +349,14 @@ func getPodAffinityTerm(affinity *v1.PodAffinity, i int) *v1.PodAffinityTerm {
return &terms[i]
}
-func appendPodAffinityTerm(affinity *v1.PodAffinity, term *v1.PodAffinityTerm) *v1.PodAffinity {
+func appendPodAffinityTerm(affinity *core.PodAffinity, term *core.PodAffinityTerm) *core.PodAffinity {
if term == nil {
return affinity
}
// Ensure path to terms exists
if affinity == nil {
- affinity = &v1.PodAffinity{}
+ affinity = &core.PodAffinity{}
}
affinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
@@ -367,7 +367,7 @@ func appendPodAffinityTerm(affinity *v1.PodAffinity, term *v1.PodAffinityTerm) *
return affinity
}
-func getWeightedPodAffinityTerms(affinity *v1.PodAffinity) []v1.WeightedPodAffinityTerm {
+func getWeightedPodAffinityTerms(affinity *core.PodAffinity) []core.WeightedPodAffinityTerm {
if affinity == nil {
return nil
}
@@ -375,7 +375,7 @@ func getWeightedPodAffinityTerms(affinity *v1.PodAffinity) []v1.WeightedPodAffin
return affinity.PreferredDuringSchedulingIgnoredDuringExecution
}
-func getWeightedPodAffinityTerm(affinity *v1.PodAffinity, i int) *v1.WeightedPodAffinityTerm {
+func getWeightedPodAffinityTerm(affinity *core.PodAffinity, i int) *core.WeightedPodAffinityTerm {
terms := getWeightedPodAffinityTerms(affinity)
if terms == nil {
return nil
@@ -386,14 +386,14 @@ func getWeightedPodAffinityTerm(affinity *v1.PodAffinity, i int) *v1.WeightedPod
return &terms[i]
}
-func appendWeightedPodAffinityTerm(affinity *v1.PodAffinity, term *v1.WeightedPodAffinityTerm) *v1.PodAffinity {
+func appendWeightedPodAffinityTerm(affinity *core.PodAffinity, term *core.WeightedPodAffinityTerm) *core.PodAffinity {
if term == nil {
return affinity
}
// Ensure path to terms exists
if affinity == nil {
- affinity = &v1.PodAffinity{}
+ affinity = &core.PodAffinity{}
}
affinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
@@ -405,7 +405,7 @@ func appendWeightedPodAffinityTerm(affinity *v1.PodAffinity, term *v1.WeightedPo
}
// mergePodAffinity
-func mergePodAffinity(dst *v1.PodAffinity, src *v1.PodAffinity) *v1.PodAffinity {
+func mergePodAffinity(dst *core.PodAffinity, src *core.PodAffinity) *core.PodAffinity {
if src == nil {
// Nothing to merge from
return dst
@@ -493,10 +493,10 @@ func newMatchLabels(
}
// newPodAntiAffinity
-func newPodAntiAffinity(template *api.ChiPodTemplate) *v1.PodAntiAffinity {
+func newPodAntiAffinity(template *api.ChiPodTemplate) *core.PodAntiAffinity {
// Return podAntiAffinity only in case something was added into it
added := false
- podAntiAffinity := &v1.PodAntiAffinity{}
+ podAntiAffinity := &core.PodAntiAffinity{}
// PodDistribution
for i := range template.PodDistribution {
@@ -620,7 +620,7 @@ func newPodAntiAffinity(template *api.ChiPodTemplate) *v1.PodAntiAffinity {
return nil
}
-func getPodAntiAffinityTerms(affinity *v1.PodAntiAffinity) []v1.PodAffinityTerm {
+func getPodAntiAffinityTerms(affinity *core.PodAntiAffinity) []core.PodAffinityTerm {
if affinity == nil {
return nil
}
@@ -628,7 +628,7 @@ func getPodAntiAffinityTerms(affinity *v1.PodAntiAffinity) []v1.PodAffinityTerm
return affinity.RequiredDuringSchedulingIgnoredDuringExecution
}
-func getPodAntiAffinityTerm(affinity *v1.PodAntiAffinity, i int) *v1.PodAffinityTerm {
+func getPodAntiAffinityTerm(affinity *core.PodAntiAffinity, i int) *core.PodAffinityTerm {
terms := getPodAntiAffinityTerms(affinity)
if terms == nil {
return nil
@@ -639,14 +639,14 @@ func getPodAntiAffinityTerm(affinity *v1.PodAntiAffinity, i int) *v1.PodAffinity
return &terms[i]
}
-func appendPodAntiAffinityTerm(affinity *v1.PodAntiAffinity, term *v1.PodAffinityTerm) *v1.PodAntiAffinity {
+func appendPodAntiAffinityTerm(affinity *core.PodAntiAffinity, term *core.PodAffinityTerm) *core.PodAntiAffinity {
if term == nil {
return affinity
}
// Ensure path to terms exists
if affinity == nil {
- affinity = &v1.PodAntiAffinity{}
+ affinity = &core.PodAntiAffinity{}
}
affinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
@@ -657,7 +657,7 @@ func appendPodAntiAffinityTerm(affinity *v1.PodAntiAffinity, term *v1.PodAffinit
return affinity
}
-func getWeightedPodAntiAffinityTerms(affinity *v1.PodAntiAffinity) []v1.WeightedPodAffinityTerm {
+func getWeightedPodAntiAffinityTerms(affinity *core.PodAntiAffinity) []core.WeightedPodAffinityTerm {
if affinity == nil {
return nil
}
@@ -665,7 +665,7 @@ func getWeightedPodAntiAffinityTerms(affinity *v1.PodAntiAffinity) []v1.Weighted
return affinity.PreferredDuringSchedulingIgnoredDuringExecution
}
-func getWeightedPodAntiAffinityTerm(affinity *v1.PodAntiAffinity, i int) *v1.WeightedPodAffinityTerm {
+func getWeightedPodAntiAffinityTerm(affinity *core.PodAntiAffinity, i int) *core.WeightedPodAffinityTerm {
terms := getWeightedPodAntiAffinityTerms(affinity)
if terms == nil {
return nil
@@ -676,14 +676,14 @@ func getWeightedPodAntiAffinityTerm(affinity *v1.PodAntiAffinity, i int) *v1.Wei
return &terms[i]
}
-func appendWeightedPodAntiAffinityTerm(affinity *v1.PodAntiAffinity, term *v1.WeightedPodAffinityTerm) *v1.PodAntiAffinity {
+func appendWeightedPodAntiAffinityTerm(affinity *core.PodAntiAffinity, term *core.WeightedPodAffinityTerm) *core.PodAntiAffinity {
if term == nil {
return affinity
}
// Ensure path to terms exists
if affinity == nil {
- affinity = &v1.PodAntiAffinity{}
+ affinity = &core.PodAntiAffinity{}
}
affinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
@@ -695,7 +695,7 @@ func appendWeightedPodAntiAffinityTerm(affinity *v1.PodAntiAffinity, term *v1.We
}
// mergePodAntiAffinity
-func mergePodAntiAffinity(dst *v1.PodAntiAffinity, src *v1.PodAntiAffinity) *v1.PodAntiAffinity {
+func mergePodAntiAffinity(dst *core.PodAntiAffinity, src *core.PodAntiAffinity) *core.PodAntiAffinity {
if src == nil {
// Nothing to merge from
return dst
@@ -742,8 +742,8 @@ func mergePodAntiAffinity(dst *v1.PodAntiAffinity, src *v1.PodAntiAffinity) *v1.
func newPodAffinityTermWithMatchLabels(
podDistribution *api.ChiPodDistribution,
matchLabels map[string]string,
-) v1.PodAffinityTerm {
- return v1.PodAffinityTerm{
+) core.PodAffinityTerm {
+ return core.PodAffinityTerm{
LabelSelector: &meta.LabelSelector{
// A list of node selector requirements by node's labels.
//MatchLabels: map[string]string{
@@ -769,8 +769,8 @@ func newPodAffinityTermWithMatchLabels(
func newPodAffinityTermWithMatchExpressions(
podDistribution *api.ChiPodDistribution,
matchExpressions []meta.LabelSelectorRequirement,
-) v1.PodAffinityTerm {
- return v1.PodAffinityTerm{
+) core.PodAffinityTerm {
+ return core.PodAffinityTerm{
LabelSelector: &meta.LabelSelector{
// A list of node selector requirements by node's labels.
//MatchLabels: map[string]string{
@@ -796,10 +796,10 @@ func newWeightedPodAffinityTermWithMatchLabels(
weight int32,
podDistribution *api.ChiPodDistribution,
matchLabels map[string]string,
-) v1.WeightedPodAffinityTerm {
- return v1.WeightedPodAffinityTerm{
+) core.WeightedPodAffinityTerm {
+ return core.WeightedPodAffinityTerm{
Weight: weight,
- PodAffinityTerm: v1.PodAffinityTerm{
+ PodAffinityTerm: core.PodAffinityTerm{
LabelSelector: &meta.LabelSelector{
// A list of node selector requirements by node's labels.
//MatchLabels: map[string]string{
@@ -822,8 +822,8 @@ func newWeightedPodAffinityTermWithMatchLabels(
}
}
-// prepareAffinity
-func prepareAffinity(podTemplate *api.ChiPodTemplate, host *api.ChiHost) {
+// PrepareAffinity
+func PrepareAffinity(podTemplate *api.ChiPodTemplate, host *api.ChiHost) {
switch {
case podTemplate == nil:
return
@@ -850,7 +850,7 @@ func prepareAffinity(podTemplate *api.ChiPodTemplate, host *api.ChiHost) {
}
// processNodeSelector
-func processNodeSelector(nodeSelector *v1.NodeSelector, host *api.ChiHost) {
+func processNodeSelector(nodeSelector *core.NodeSelector, host *api.ChiHost) {
if nodeSelector == nil {
return
}
@@ -861,7 +861,7 @@ func processNodeSelector(nodeSelector *v1.NodeSelector, host *api.ChiHost) {
}
// processPreferredSchedulingTerms
-func processPreferredSchedulingTerms(preferredSchedulingTerms []v1.PreferredSchedulingTerm, host *api.ChiHost) {
+func processPreferredSchedulingTerms(preferredSchedulingTerms []core.PreferredSchedulingTerm, host *api.ChiHost) {
for i := range preferredSchedulingTerms {
nodeSelectorTerm := &preferredSchedulingTerms[i].Preference
processNodeSelectorTerm(nodeSelectorTerm, host)
@@ -869,7 +869,7 @@ func processPreferredSchedulingTerms(preferredSchedulingTerms []v1.PreferredSche
}
// processNodeSelectorTerm
-func processNodeSelectorTerm(nodeSelectorTerm *v1.NodeSelectorTerm, host *api.ChiHost) {
+func processNodeSelectorTerm(nodeSelectorTerm *core.NodeSelectorTerm, host *api.ChiHost) {
for i := range nodeSelectorTerm.MatchExpressions {
nodeSelectorRequirement := &nodeSelectorTerm.MatchExpressions[i]
processNodeSelectorRequirement(nodeSelectorRequirement, host)
@@ -882,19 +882,19 @@ func processNodeSelectorTerm(nodeSelectorTerm *v1.NodeSelectorTerm, host *api.Ch
}
// processNodeSelectorRequirement
-func processNodeSelectorRequirement(nodeSelectorRequirement *v1.NodeSelectorRequirement, host *api.ChiHost) {
+func processNodeSelectorRequirement(nodeSelectorRequirement *core.NodeSelectorRequirement, host *api.ChiHost) {
if nodeSelectorRequirement == nil {
return
}
- nodeSelectorRequirement.Key = macro(host).Line(nodeSelectorRequirement.Key)
+ nodeSelectorRequirement.Key = Macro(host).Line(nodeSelectorRequirement.Key)
// Update values only, keys are not macros-ed
for i := range nodeSelectorRequirement.Values {
- nodeSelectorRequirement.Values[i] = macro(host).Line(nodeSelectorRequirement.Values[i])
+ nodeSelectorRequirement.Values[i] = Macro(host).Line(nodeSelectorRequirement.Values[i])
}
}
// processPodAffinityTerms
-func processPodAffinityTerms(podAffinityTerms []v1.PodAffinityTerm, host *api.ChiHost) {
+func processPodAffinityTerms(podAffinityTerms []core.PodAffinityTerm, host *api.ChiHost) {
for i := range podAffinityTerms {
podAffinityTerm := &podAffinityTerms[i]
processPodAffinityTerm(podAffinityTerm, host)
@@ -902,7 +902,7 @@ func processPodAffinityTerms(podAffinityTerms []v1.PodAffinityTerm, host *api.Ch
}
// processWeightedPodAffinityTerms
-func processWeightedPodAffinityTerms(weightedPodAffinityTerms []v1.WeightedPodAffinityTerm, host *api.ChiHost) {
+func processWeightedPodAffinityTerms(weightedPodAffinityTerms []core.WeightedPodAffinityTerm, host *api.ChiHost) {
for i := range weightedPodAffinityTerms {
podAffinityTerm := &weightedPodAffinityTerms[i].PodAffinityTerm
processPodAffinityTerm(podAffinityTerm, host)
@@ -910,12 +910,12 @@ func processWeightedPodAffinityTerms(weightedPodAffinityTerms []v1.WeightedPodAf
}
// processPodAffinityTerm
-func processPodAffinityTerm(podAffinityTerm *v1.PodAffinityTerm, host *api.ChiHost) {
+func processPodAffinityTerm(podAffinityTerm *core.PodAffinityTerm, host *api.ChiHost) {
if podAffinityTerm == nil {
return
}
processLabelSelector(podAffinityTerm.LabelSelector, host)
- podAffinityTerm.TopologyKey = macro(host).Line(podAffinityTerm.TopologyKey)
+ podAffinityTerm.TopologyKey = Macro(host).Line(podAffinityTerm.TopologyKey)
}
// processLabelSelector
@@ -925,7 +925,7 @@ func processLabelSelector(labelSelector *meta.LabelSelector, host *api.ChiHost)
}
for k := range labelSelector.MatchLabels {
- labelSelector.MatchLabels[k] = macro(host).Line(labelSelector.MatchLabels[k])
+ labelSelector.MatchLabels[k] = Macro(host).Line(labelSelector.MatchLabels[k])
}
for j := range labelSelector.MatchExpressions {
labelSelectorRequirement := &labelSelector.MatchExpressions[j]
@@ -938,9 +938,9 @@ func processLabelSelectorRequirement(labelSelectorRequirement *meta.LabelSelecto
if labelSelectorRequirement == nil {
return
}
- labelSelectorRequirement.Key = macro(host).Line(labelSelectorRequirement.Key)
+ labelSelectorRequirement.Key = Macro(host).Line(labelSelectorRequirement.Key)
// Update values only, keys are not macros-ed
for i := range labelSelectorRequirement.Values {
- labelSelectorRequirement.Values[i] = macro(host).Line(labelSelectorRequirement.Values[i])
+ labelSelectorRequirement.Values[i] = Macro(host).Line(labelSelectorRequirement.Values[i])
}
}
diff --git a/pkg/model/chi/annotator.go b/pkg/model/chi/annotator.go
index 4b237b650..651555385 100644
--- a/pkg/model/chi/annotator.go
+++ b/pkg/model/chi/annotator.go
@@ -34,58 +34,58 @@ func NewAnnotator(chi *api.ClickHouseInstallation) *Annotator {
}
}
-// getConfigMapCHICommon
-func (a *Annotator) getConfigMapCHICommon() map[string]string {
+// GetConfigMapCHICommon
+func (a *Annotator) GetConfigMapCHICommon() map[string]string {
return util.MergeStringMapsOverwrite(
a.getCHIScope(),
nil,
)
}
-// getConfigMapCHICommonUsers
-func (a *Annotator) getConfigMapCHICommonUsers() map[string]string {
+// GetConfigMapCHICommonUsers
+func (a *Annotator) GetConfigMapCHICommonUsers() map[string]string {
return util.MergeStringMapsOverwrite(
a.getCHIScope(),
nil,
)
}
-// getConfigMapHost
-func (a *Annotator) getConfigMapHost(host *api.ChiHost) map[string]string {
+// GetConfigMapHost
+func (a *Annotator) GetConfigMapHost(host *api.ChiHost) map[string]string {
return util.MergeStringMapsOverwrite(
- a.getHostScope(host),
+ a.GetHostScope(host),
nil,
)
}
-// getServiceCHI
-func (a *Annotator) getServiceCHI(chi *api.ClickHouseInstallation) map[string]string {
+// GetServiceCHI
+func (a *Annotator) GetServiceCHI(chi *api.ClickHouseInstallation) map[string]string {
return util.MergeStringMapsOverwrite(
a.getCHIScope(),
nil,
)
}
-// getServiceCluster
-func (a *Annotator) getServiceCluster(cluster *api.Cluster) map[string]string {
+// GetServiceCluster
+func (a *Annotator) GetServiceCluster(cluster *api.Cluster) map[string]string {
return util.MergeStringMapsOverwrite(
- a.getClusterScope(cluster),
+ a.GetClusterScope(cluster),
nil,
)
}
-// getServiceShard
-func (a *Annotator) getServiceShard(shard *api.ChiShard) map[string]string {
+// GetServiceShard
+func (a *Annotator) GetServiceShard(shard *api.ChiShard) map[string]string {
return util.MergeStringMapsOverwrite(
a.getShardScope(shard),
nil,
)
}
-// getServiceHost
-func (a *Annotator) getServiceHost(host *api.ChiHost) map[string]string {
+// GetServiceHost
+func (a *Annotator) GetServiceHost(host *api.ChiHost) map[string]string {
return util.MergeStringMapsOverwrite(
- a.getHostScope(host),
+ a.GetHostScope(host),
nil,
)
}
@@ -96,8 +96,8 @@ func (a *Annotator) getCHIScope() map[string]string {
return a.filterOutPredefined(a.appendCHIProvidedTo(nil))
}
-// getClusterScope gets annotations for Cluster-scoped object
-func (a *Annotator) getClusterScope(cluster *api.Cluster) map[string]string {
+// GetClusterScope gets annotations for Cluster-scoped object
+func (a *Annotator) GetClusterScope(cluster *api.Cluster) map[string]string {
// Combine generated annotations and CHI-provided annotations
return a.filterOutPredefined(a.appendCHIProvidedTo(nil))
}
@@ -108,8 +108,8 @@ func (a *Annotator) getShardScope(shard *api.ChiShard) map[string]string {
return a.filterOutPredefined(a.appendCHIProvidedTo(nil))
}
-// getHostScope gets annotations for Host-scoped object
-func (a *Annotator) getHostScope(host *api.ChiHost) map[string]string {
+// GetHostScope gets annotations for Host-scoped object
+func (a *Annotator) GetHostScope(host *api.ChiHost) map[string]string {
return a.filterOutPredefined(a.appendCHIProvidedTo(nil))
}
@@ -124,17 +124,17 @@ func (a *Annotator) appendCHIProvidedTo(dst map[string]string) map[string]string
return util.MergeStringMapsOverwrite(dst, source)
}
-// getPV
-func (a *Annotator) getPV(pv *core.PersistentVolume, host *api.ChiHost) map[string]string {
- return util.MergeStringMapsOverwrite(pv.Annotations, a.getHostScope(host))
+// GetPV
+func (a *Annotator) GetPV(pv *core.PersistentVolume, host *api.ChiHost) map[string]string {
+ return util.MergeStringMapsOverwrite(pv.Annotations, a.GetHostScope(host))
}
-// getPVC
-func (a *Annotator) getPVC(
+// GetPVC
+func (a *Annotator) GetPVC(
pvc *core.PersistentVolumeClaim,
host *api.ChiHost,
template *api.ChiVolumeClaimTemplate,
) map[string]string {
annotations := util.MergeStringMapsOverwrite(pvc.Annotations, template.ObjectMeta.Annotations)
- return util.MergeStringMapsOverwrite(annotations, a.getHostScope(host))
+ return util.MergeStringMapsOverwrite(annotations, a.GetHostScope(host))
}
diff --git a/pkg/model/chi/ch_config_const.go b/pkg/model/chi/ch_config_const.go
index 661f467d9..53dc3cd30 100644
--- a/pkg/model/chi/ch_config_const.go
+++ b/pkg/model/chi/ch_config_const.go
@@ -32,78 +32,79 @@ const (
)
const (
- // dirPathCommonConfig specifies full path to folder, where generated common XML files for ClickHouse would be placed
+ // DirPathCommonConfig specifies full path to folder, where generated common XML files for ClickHouse would be placed
// for the following sections:
// 1. remote servers
// 2. operator-provided additional config files
- dirPathCommonConfig = "/etc/clickhouse-server/" + api.CommonConfigDir + "/"
+ DirPathCommonConfig = "/etc/clickhouse-server/" + api.CommonConfigDir + "/"
- // dirPathUsersConfig specifies full path to folder, where generated users XML files for ClickHouse would be placed
+ // DirPathUsersConfig specifies full path to folder, where generated users XML files for ClickHouse would be placed
// for the following sections:
// 1. users
// 2. quotas
// 3. profiles
// 4. operator-provided additional config files
- dirPathUsersConfig = "/etc/clickhouse-server/" + api.UsersConfigDir + "/"
+ DirPathUsersConfig = "/etc/clickhouse-server/" + api.UsersConfigDir + "/"
- // dirPathHostConfig specifies full path to folder, where generated host XML files for ClickHouse would be placed
+ // DirPathHostConfig specifies full path to folder, where generated host XML files for ClickHouse would be placed
// for the following sections:
// 1. macros
// 2. zookeeper
// 3. settings
// 4. files
// 5. operator-provided additional config files
- dirPathHostConfig = "/etc/clickhouse-server/" + api.HostConfigDir + "/"
+ DirPathHostConfig = "/etc/clickhouse-server/" + api.HostConfigDir + "/"
- dirPathSecretFilesConfig = "/etc/clickhouse-server/secrets.d/"
+ // DirPathSecretFilesConfig specifies full path to folder, where secrets are mounted
+ DirPathSecretFilesConfig = "/etc/clickhouse-server/secrets.d/"
- // dirPathClickHouseData specifies full path of data folder where ClickHouse would place its data storage
- dirPathClickHouseData = "/var/lib/clickhouse"
+ // DirPathClickHouseData specifies full path of data folder where ClickHouse would place its data storage
+ DirPathClickHouseData = "/var/lib/clickhouse"
- // dirPathClickHouseLog specifies full path of data folder where ClickHouse would place its log files
- dirPathClickHouseLog = "/var/log/clickhouse-server"
+ // DirPathClickHouseLog specifies full path of data folder where ClickHouse would place its log files
+ DirPathClickHouseLog = "/var/log/clickhouse-server"
- // dirPathDockerEntrypointInit specified full path of docker-entrypoint-initdb.d
+ // DirPathDockerEntrypointInit specified full path of docker-entrypoint-initdb.d
// For more details please check: https://github.com/ClickHouse/ClickHouse/issues/3319
- dirPathDockerEntrypointInit = "/docker-entrypoint-initdb.d"
+ DirPathDockerEntrypointInit = "/docker-entrypoint-initdb.d"
)
const (
- // defaultClickHouseDockerImage specifies default ClickHouse docker image to be used
- defaultClickHouseDockerImage = "clickhouse/clickhouse-server:latest"
+ // DefaultClickHouseDockerImage specifies default ClickHouse docker image to be used
+ DefaultClickHouseDockerImage = "clickhouse/clickhouse-server:latest"
- // defaultBusyBoxDockerImage specifies default BusyBox docker image to be used
- defaultBusyBoxDockerImage = "busybox"
+ // DefaultBusyBoxDockerImage specifies default BusyBox docker image to be used
+ DefaultBusyBoxDockerImage = "busybox"
- // defaultUbiDockerImage specifies default ubi docker image to be used
- defaultUbiDockerImage = "registry.access.redhat.com/ubi8/ubi-minimal:latest"
+ // DefaultUbiDockerImage specifies default ubi docker image to be used
+ DefaultUbiDockerImage = "registry.access.redhat.com/ubi8/ubi-minimal:latest"
// Name of container within Pod with ClickHouse instance.
// Pod may have other containers included, such as monitoring, logging
- // clickHouseContainerName specifies name of the clickhouse container in the pod
- clickHouseContainerName = "clickhouse"
- // clickHouseLogContainerName specifies name of the logger container in the pod
- clickHouseLogContainerName = "clickhouse-log"
+ // ClickHouseContainerName specifies name of the clickhouse container in the pod
+ ClickHouseContainerName = "clickhouse"
+ // ClickHouseLogContainerName specifies name of the logger container in the pod
+ ClickHouseLogContainerName = "clickhouse-log"
)
const (
// ClickHouse open ports names and values
- chDefaultTCPPortName = "tcp"
- chDefaultTCPPortNumber = int32(9000)
- chDefaultTLSPortName = "secureclient"
- chDefaultTLSPortNumber = int32(9440)
- chDefaultHTTPPortName = "http"
- chDefaultHTTPPortNumber = int32(8123)
- chDefaultHTTPSPortName = "https"
- chDefaultHTTPSPortNumber = int32(8443)
- chDefaultInterserverHTTPPortName = "interserver"
- chDefaultInterserverHTTPPortNumber = int32(9009)
+ ChDefaultTCPPortName = "tcp"
+ ChDefaultTCPPortNumber = int32(9000)
+ ChDefaultTLSPortName = "secureclient"
+ ChDefaultTLSPortNumber = int32(9440)
+ ChDefaultHTTPPortName = "http"
+ ChDefaultHTTPPortNumber = int32(8123)
+ ChDefaultHTTPSPortName = "https"
+ ChDefaultHTTPSPortNumber = int32(8443)
+ ChDefaultInterserverHTTPPortName = "interserver"
+ ChDefaultInterserverHTTPPortNumber = int32(9009)
)
const (
- // zkDefaultPort specifies Zookeeper default port
- zkDefaultPort = 2181
- // zkDefaultRootTemplate specifies default ZK root - /clickhouse/{namespace}/{chi name}
- zkDefaultRootTemplate = "/clickhouse/%s/%s"
+ // ZkDefaultPort specifies Zookeeper default port
+ ZkDefaultPort = 2181
+ // ZkDefaultRootTemplate specifies default ZK root - /clickhouse/{namespace}/{chi name}
+ ZkDefaultRootTemplate = "/clickhouse/%s/%s"
)
diff --git a/pkg/model/chi/ch_config_generator.go b/pkg/model/chi/ch_config_generator.go
index c7238626f..e9b1340f0 100644
--- a/pkg/model/chi/ch_config_generator.go
+++ b/pkg/model/chi/ch_config_generator.go
@@ -352,7 +352,7 @@ func (c *ClickHouseConfigGenerator) GetRemoteServers(options *RemoteServersGener
util.Iline(b, 12, "%s", cluster.Secret.Value)
case api.ClusterSecretSourceSecretRef, api.ClusterSecretSourceAuto:
// Use secret via ENV var from secret
- util.Iline(b, 12, ``, internodeClusterSecretEnvName)
+ util.Iline(b, 12, ``, InternodeClusterSecretEnvName)
}
// Build each shard XML
@@ -458,7 +458,7 @@ func (c *ClickHouseConfigGenerator) GetHostMacros(host *api.ChiHost) string {
util.Iline(b, 0, " ")
// CHI-name-macros-value
- util.Iline(b, 8, "%s", host.Address.CHIName)
+ util.Iline(b, 8, "%s", host.Runtime.Address.CHIName)
// cluster-name-macros-value
// util.Iline(b, 8, "<%s>%[2]s%[1]s>", replica.Address.ClusterName, c.getMacrosCluster(replica.Address.ClusterName))
@@ -467,13 +467,13 @@ func (c *ClickHouseConfigGenerator) GetHostMacros(host *api.ChiHost) string {
// All Shards One Replica ChkCluster
// 0-based shard index within all-shards-one-replica-cluster
- util.Iline(b, 8, "<%s-shard>%d%[1]s-shard>", AllShardsOneReplicaClusterName, host.Address.CHIScopeIndex)
+ util.Iline(b, 8, "<%s-shard>%d%[1]s-shard>", AllShardsOneReplicaClusterName, host.Runtime.Address.CHIScopeIndex)
// and macros are applicable to main cluster only. All aux clusters do not have ambiguous macros
// macro
- util.Iline(b, 8, "%s", host.Address.ClusterName)
+ util.Iline(b, 8, "%s", host.Runtime.Address.ClusterName)
// macro
- util.Iline(b, 8, "%s", host.Address.ShardName)
+ util.Iline(b, 8, "%s", host.Runtime.Address.ShardName)
// replica id = full deployment id
// full deployment id is unique to identify replica within the cluster
util.Iline(b, 8, "%s", CreatePodHostname(host))
@@ -494,22 +494,22 @@ func (c *ClickHouseConfigGenerator) GetHostHostnameAndPorts(host *api.ChiHost) s
//
util.Iline(b, 0, "<"+xmlTagYandex+">")
- if host.TCPPort != chDefaultTCPPortNumber {
+ if host.TCPPort != ChDefaultTCPPortNumber {
util.Iline(b, 4, "%d", host.TCPPort)
}
- if host.TLSPort != chDefaultTLSPortNumber {
+ if host.TLSPort != ChDefaultTLSPortNumber {
util.Iline(b, 4, "%d", host.TLSPort)
}
- if host.HTTPPort != chDefaultHTTPPortNumber {
+ if host.HTTPPort != ChDefaultHTTPPortNumber {
util.Iline(b, 4, "%d", host.HTTPPort)
}
- if host.HTTPSPort != chDefaultHTTPSPortNumber {
+ if host.HTTPSPort != ChDefaultHTTPSPortNumber {
util.Iline(b, 4, "%d", host.HTTPSPort)
}
// Interserver host and port
util.Iline(b, 4, "%s", c.getRemoteServersReplicaHostname(host))
- if host.InterserverHTTPPort != chDefaultInterserverHTTPPortNumber {
+ if host.InterserverHTTPPort != ChDefaultInterserverHTTPPortNumber {
util.Iline(b, 4, "%d", host.InterserverHTTPPort)
}
diff --git a/pkg/model/chi/chop_config.go b/pkg/model/chi/chop_config.go
index 6624643a4..b498ff7ad 100644
--- a/pkg/model/chi/chop_config.go
+++ b/pkg/model/chi/chop_config.go
@@ -45,7 +45,7 @@ func hostVersionMatches(host *api.ChiHost, versionConstraint string) bool {
// Default version will also be used in case ClickHouse version is unknown.
// ClickHouse version may be unknown due to host being down - for example, because of incorrect "settings" section.
// ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
- return (versionConstraint == "*") || host.Version.Matches(versionConstraint)
+ return (versionConstraint == "*") || host.Runtime.Version.Matches(versionConstraint)
}
// ruleMatches checks whether provided rule (rule set) matches specified `path`
diff --git a/pkg/model/chi/const.go b/pkg/model/chi/const.go
index ce8facf5a..3fd25a8aa 100644
--- a/pkg/model/chi/const.go
+++ b/pkg/model/chi/const.go
@@ -16,10 +16,18 @@ package chi
const (
// Default value for ClusterIP service
- templateDefaultsServiceClusterIP = "None"
+ TemplateDefaultsServiceClusterIP = "None"
)
const (
- // .spec.useTemplate.useType
- useTypeMerge = "merge"
+ InternodeClusterSecretEnvName = "CLICKHOUSE_INTERNODE_CLUSTER_SECRET"
+)
+
+// Values for Schema Policy
+const (
+ SchemaPolicyReplicaNone = "None"
+ SchemaPolicyReplicaAll = "All"
+ SchemaPolicyShardNone = "None"
+ SchemaPolicyShardAll = "All"
+ SchemaPolicyShardDistributedTablesOnly = "DistributedTablesOnly"
)
diff --git a/pkg/model/chi/creator.go b/pkg/model/chi/creator.go
deleted file mode 100644
index dafb9fc9a..000000000
--- a/pkg/model/chi/creator.go
+++ /dev/null
@@ -1,1429 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- "fmt"
-
- "github.com/gosimple/slug"
- apps "k8s.io/api/apps/v1"
- core "k8s.io/api/core/v1"
- policy "k8s.io/api/policy/v1"
- meta "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/util/intstr"
-
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/apis/deployment"
- "github.com/altinity/clickhouse-operator/pkg/chop"
- "github.com/altinity/clickhouse-operator/pkg/util"
-)
-
-// Creator specifies creator object
-type Creator struct {
- chi *api.ClickHouseInstallation
- chConfigFilesGenerator *ClickHouseConfigFilesGenerator
- labels *Labeler
- annotations *Annotator
- a log.Announcer
-}
-
-// NewCreator creates new Creator object
-func NewCreator(chi *api.ClickHouseInstallation) *Creator {
- return &Creator{
- chi: chi,
- chConfigFilesGenerator: NewClickHouseConfigFilesGenerator(NewClickHouseConfigGenerator(chi), chop.Config()),
- labels: NewLabeler(chi),
- annotations: NewAnnotator(chi),
- a: log.M(chi),
- }
-}
-
-// CreateServiceCHI creates new core.Service for specified CHI
-func (c *Creator) CreateServiceCHI() *core.Service {
- serviceName := CreateCHIServiceName(c.chi)
- ownerReferences := getOwnerReferences(c.chi)
-
- c.a.V(1).F().Info("%s/%s", c.chi.Namespace, serviceName)
- if template, ok := c.chi.GetCHIServiceTemplate(); ok {
- // .templates.ServiceTemplate specified
- return c.createServiceFromTemplate(
- template,
- c.chi.Namespace,
- serviceName,
- c.labels.getServiceCHI(c.chi),
- c.annotations.getServiceCHI(c.chi),
- c.labels.getSelectorCHIScopeReady(),
- ownerReferences,
- macro(c.chi),
- )
- }
-
- // Create default Service
- // We do not have .templates.ServiceTemplate specified or it is incorrect
- svc := &core.Service{
- ObjectMeta: meta.ObjectMeta{
- Name: serviceName,
- Namespace: c.chi.Namespace,
- Labels: macro(c.chi).Map(c.labels.getServiceCHI(c.chi)),
- Annotations: macro(c.chi).Map(c.annotations.getServiceCHI(c.chi)),
- OwnerReferences: ownerReferences,
- },
- Spec: core.ServiceSpec{
- // ClusterIP: templateDefaultsServiceClusterIP,
- Ports: []core.ServicePort{
- {
- Name: chDefaultHTTPPortName,
- Protocol: core.ProtocolTCP,
- Port: chDefaultHTTPPortNumber,
- TargetPort: intstr.FromString(chDefaultHTTPPortName),
- },
- {
- Name: chDefaultTCPPortName,
- Protocol: core.ProtocolTCP,
- Port: chDefaultTCPPortNumber,
- TargetPort: intstr.FromString(chDefaultTCPPortName),
- },
- },
- Selector: c.labels.getSelectorCHIScopeReady(),
- Type: core.ServiceTypeLoadBalancer,
- ExternalTrafficPolicy: core.ServiceExternalTrafficPolicyTypeLocal,
- },
- }
- MakeObjectVersion(&svc.ObjectMeta, svc)
- return svc
-}
-
-// CreateServiceCluster creates new core.Service for specified Cluster
-func (c *Creator) CreateServiceCluster(cluster *api.Cluster) *core.Service {
- serviceName := CreateClusterServiceName(cluster)
- ownerReferences := getOwnerReferences(c.chi)
-
- c.a.V(1).F().Info("%s/%s", cluster.Address.Namespace, serviceName)
- if template, ok := cluster.GetServiceTemplate(); ok {
- // .templates.ServiceTemplate specified
- return c.createServiceFromTemplate(
- template,
- cluster.Address.Namespace,
- serviceName,
- c.labels.getServiceCluster(cluster),
- c.annotations.getServiceCluster(cluster),
- getSelectorClusterScopeReady(cluster),
- ownerReferences,
- macro(cluster),
- )
- }
- // No template specified, no need to create service
- return nil
-}
-
-// CreateServiceShard creates new core.Service for specified Shard
-func (c *Creator) CreateServiceShard(shard *api.ChiShard) *core.Service {
- serviceName := CreateShardServiceName(shard)
- ownerReferences := getOwnerReferences(c.chi)
-
- c.a.V(1).F().Info("%s/%s", shard.Address.Namespace, serviceName)
- if template, ok := shard.GetServiceTemplate(); ok {
- // .templates.ServiceTemplate specified
- return c.createServiceFromTemplate(
- template,
- shard.Address.Namespace,
- serviceName,
- c.labels.getServiceShard(shard),
- c.annotations.getServiceShard(shard),
- getSelectorShardScopeReady(shard),
- ownerReferences,
- macro(shard),
- )
- }
- // No template specified, no need to create service
- return nil
-}
-
-// CreateServiceHost creates new core.Service for specified host
-func (c *Creator) CreateServiceHost(host *api.ChiHost) *core.Service {
- serviceName := CreateStatefulSetServiceName(host)
- statefulSetName := CreateStatefulSetName(host)
- ownerReferences := getOwnerReferences(c.chi)
-
- c.a.V(1).F().Info("%s/%s for Set %s", host.Address.Namespace, serviceName, statefulSetName)
- if template, ok := host.GetServiceTemplate(); ok {
- // .templates.ServiceTemplate specified
- return c.createServiceFromTemplate(
- template,
- host.Address.Namespace,
- serviceName,
- c.labels.getServiceHost(host),
- c.annotations.getServiceHost(host),
- GetSelectorHostScope(host),
- ownerReferences,
- macro(host),
- )
- }
-
- // Create default Service
- // We do not have .templates.ServiceTemplate specified or it is incorrect
- svc := &core.Service{
- ObjectMeta: meta.ObjectMeta{
- Name: serviceName,
- Namespace: host.Address.Namespace,
- Labels: macro(host).Map(c.labels.getServiceHost(host)),
- Annotations: macro(host).Map(c.annotations.getServiceHost(host)),
- OwnerReferences: ownerReferences,
- },
- Spec: core.ServiceSpec{
- Selector: GetSelectorHostScope(host),
- ClusterIP: templateDefaultsServiceClusterIP,
- Type: "ClusterIP",
- PublishNotReadyAddresses: true,
- },
- }
- appendServicePorts(svc, host)
- MakeObjectVersion(&svc.ObjectMeta, svc)
- return svc
-}
-
-func appendServicePorts(service *core.Service, host *api.ChiHost) {
- if api.IsPortAssigned(host.TCPPort) {
- service.Spec.Ports = append(service.Spec.Ports,
- core.ServicePort{
- Name: chDefaultTCPPortName,
- Protocol: core.ProtocolTCP,
- Port: host.TCPPort,
- TargetPort: intstr.FromInt(int(host.TCPPort)),
- },
- )
- }
- if api.IsPortAssigned(host.TLSPort) {
- service.Spec.Ports = append(service.Spec.Ports,
- core.ServicePort{
- Name: chDefaultTLSPortName,
- Protocol: core.ProtocolTCP,
- Port: host.TLSPort,
- TargetPort: intstr.FromInt(int(host.TLSPort)),
- },
- )
- }
- if api.IsPortAssigned(host.HTTPPort) {
- service.Spec.Ports = append(service.Spec.Ports,
- core.ServicePort{
- Name: chDefaultHTTPPortName,
- Protocol: core.ProtocolTCP,
- Port: host.HTTPPort,
- TargetPort: intstr.FromInt(int(host.HTTPPort)),
- },
- )
- }
- if api.IsPortAssigned(host.HTTPSPort) {
- service.Spec.Ports = append(service.Spec.Ports,
- core.ServicePort{
- Name: chDefaultHTTPSPortName,
- Protocol: core.ProtocolTCP,
- Port: host.HTTPSPort,
- TargetPort: intstr.FromInt(int(host.HTTPSPort)),
- },
- )
- }
- if api.IsPortAssigned(host.InterserverHTTPPort) {
- service.Spec.Ports = append(service.Spec.Ports,
- core.ServicePort{
- Name: chDefaultInterserverHTTPPortName,
- Protocol: core.ProtocolTCP,
- Port: host.InterserverHTTPPort,
- TargetPort: intstr.FromInt(int(host.InterserverHTTPPort)),
- },
- )
- }
-}
-
-// verifyServiceTemplatePorts verifies ChiServiceTemplate to have reasonable ports specified
-func (c *Creator) verifyServiceTemplatePorts(template *api.ChiServiceTemplate) error {
- for i := range template.Spec.Ports {
- servicePort := &template.Spec.Ports[i]
- if api.IsPortInvalid(servicePort.Port) {
- msg := fmt.Sprintf("template:%s INCORRECT PORT:%d", template.Name, servicePort.Port)
- c.a.V(1).F().Warning(msg)
- return fmt.Errorf(msg)
- }
- }
- return nil
-}
-
-// createServiceFromTemplate create Service from ChiServiceTemplate and additional info
-func (c *Creator) createServiceFromTemplate(
- template *api.ChiServiceTemplate,
- namespace string,
- name string,
- labels map[string]string,
- annotations map[string]string,
- selector map[string]string,
- ownerReferences []meta.OwnerReference,
- macro *macrosEngine,
-) *core.Service {
-
- // Verify Ports
- if err := c.verifyServiceTemplatePorts(template); err != nil {
- return nil
- }
-
- // Create Service
- service := &core.Service{
- ObjectMeta: *template.ObjectMeta.DeepCopy(),
- Spec: *template.Spec.DeepCopy(),
- }
-
- // Overwrite .name and .namespace - they are not allowed to be specified in template
- service.Name = name
- service.Namespace = namespace
- service.OwnerReferences = ownerReferences
-
- // Combine labels and annotations
- service.Labels = macro.Map(util.MergeStringMapsOverwrite(service.Labels, labels))
- service.Annotations = macro.Map(util.MergeStringMapsOverwrite(service.Annotations, annotations))
-
- // Append provided Selector to already specified Selector in template
- service.Spec.Selector = util.MergeStringMapsOverwrite(service.Spec.Selector, selector)
-
- // And after the object is ready we can put version label
- MakeObjectVersion(&service.ObjectMeta, service)
-
- return service
-}
-
-// CreateConfigMapCHICommon creates new core.ConfigMap
-func (c *Creator) CreateConfigMapCHICommon(options *ClickHouseConfigFilesGeneratorOptions) *core.ConfigMap {
- cm := &core.ConfigMap{
- ObjectMeta: meta.ObjectMeta{
- Name: CreateConfigMapCommonName(c.chi),
- Namespace: c.chi.Namespace,
- Labels: macro(c.chi).Map(c.labels.getConfigMapCHICommon()),
- Annotations: macro(c.chi).Map(c.annotations.getConfigMapCHICommon()),
- OwnerReferences: getOwnerReferences(c.chi),
- },
- // Data contains several sections which are to be several xml chopConfig files
- Data: c.chConfigFilesGenerator.CreateConfigFilesGroupCommon(options),
- }
- // And after the object is ready we can put version label
- MakeObjectVersion(&cm.ObjectMeta, cm)
- return cm
-}
-
-// CreateConfigMapCHICommonUsers creates new core.ConfigMap
-func (c *Creator) CreateConfigMapCHICommonUsers() *core.ConfigMap {
- cm := &core.ConfigMap{
- ObjectMeta: meta.ObjectMeta{
- Name: CreateConfigMapCommonUsersName(c.chi),
- Namespace: c.chi.Namespace,
- Labels: macro(c.chi).Map(c.labels.getConfigMapCHICommonUsers()),
- Annotations: macro(c.chi).Map(c.annotations.getConfigMapCHICommonUsers()),
- OwnerReferences: getOwnerReferences(c.chi),
- },
- // Data contains several sections which are to be several xml chopConfig files
- Data: c.chConfigFilesGenerator.CreateConfigFilesGroupUsers(),
- }
- // And after the object is ready we can put version label
- MakeObjectVersion(&cm.ObjectMeta, cm)
- return cm
-}
-
-// createConfigMapHost creates new core.ConfigMap
-func (c *Creator) createConfigMapHost(host *api.ChiHost, name string, data map[string]string) *core.ConfigMap {
- cm := &core.ConfigMap{
- ObjectMeta: meta.ObjectMeta{
- Name: name,
- Namespace: host.Address.Namespace,
- Labels: macro(host).Map(c.labels.getConfigMapHost(host)),
- Annotations: macro(host).Map(c.annotations.getConfigMapHost(host)),
- OwnerReferences: getOwnerReferences(c.chi),
- },
- Data: data,
- }
- // And after the object is ready we can put version label
- MakeObjectVersion(&cm.ObjectMeta, cm)
- return cm
-}
-
-// CreateConfigMapHost creates new core.ConfigMap
-func (c *Creator) CreateConfigMapHost(host *api.ChiHost) *core.ConfigMap {
- return c.createConfigMapHost(host, CreateConfigMapHostName(host), c.chConfigFilesGenerator.CreateConfigFilesGroupHost(host))
-}
-
-// CreateConfigMapHostMigration creates new core.ConfigMap
-//func (c *Creator) CreateConfigMapHostMigration(host *api.ChiHost, data map[string]string) *core.ConfigMap {
-// return c.createConfigMapHost(host, CreateConfigMapHostMigrationName(host), data)
-//}
-
-// MakeConfigMapData makes data for a config mao
-func (c *Creator) MakeConfigMapData(names, files []string) map[string]string {
- if len(names) < 1 {
- return nil
- }
- res := make(map[string]string)
- for i := range names {
- name := fmt.Sprintf("%08d_%s.sql", i+1, slug.Make(names[i]))
- file := files[i]
- res[name] = file
- }
- return res
-}
-
-// CreateStatefulSet creates new apps.StatefulSet
-func (c *Creator) CreateStatefulSet(host *api.ChiHost, shutdown bool) *apps.StatefulSet {
- statefulSet := &apps.StatefulSet{
- ObjectMeta: meta.ObjectMeta{
- Name: CreateStatefulSetName(host),
- Namespace: host.Address.Namespace,
- Labels: macro(host).Map(c.labels.getHostScope(host, true)),
- Annotations: macro(host).Map(c.annotations.getHostScope(host)),
- OwnerReferences: getOwnerReferences(c.chi),
- },
- Spec: apps.StatefulSetSpec{
- Replicas: host.GetStatefulSetReplicasNum(shutdown),
- ServiceName: CreateStatefulSetServiceName(host),
- Selector: &meta.LabelSelector{
- MatchLabels: GetSelectorHostScope(host),
- },
-
- // IMPORTANT
- // Template is to be setup later
- Template: core.PodTemplateSpec{},
-
- // IMPORTANT
- // VolumeClaimTemplates are to be setup later
- VolumeClaimTemplates: nil,
-
- PodManagementPolicy: apps.OrderedReadyPodManagement,
- UpdateStrategy: apps.StatefulSetUpdateStrategy{
- Type: apps.RollingUpdateStatefulSetStrategyType,
- },
- RevisionHistoryLimit: chop.Config().GetRevisionHistoryLimit(),
- },
- }
-
- c.setupStatefulSetPodTemplate(statefulSet, host)
- c.setupStatefulSetVolumeClaimTemplates(statefulSet, host)
- MakeObjectVersion(&statefulSet.ObjectMeta, statefulSet)
-
- return statefulSet
-}
-
-// PreparePersistentVolume prepares PV labels
-func (c *Creator) PreparePersistentVolume(pv *core.PersistentVolume, host *api.ChiHost) *core.PersistentVolume {
- pv.Labels = macro(host).Map(c.labels.getPV(pv, host))
- pv.Annotations = macro(host).Map(c.annotations.getPV(pv, host))
- // And after the object is ready we can put version label
- MakeObjectVersion(&pv.ObjectMeta, pv)
- return pv
-}
-
-// PreparePersistentVolumeClaim prepares PVC - labels and annotations
-func (c *Creator) PreparePersistentVolumeClaim(
- pvc *core.PersistentVolumeClaim,
- host *api.ChiHost,
- template *api.ChiVolumeClaimTemplate,
-) *core.PersistentVolumeClaim {
- pvc.Labels = macro(host).Map(c.labels.getPVC(pvc, host, template))
- pvc.Annotations = macro(host).Map(c.annotations.getPVC(pvc, host, template))
- // And after the object is ready we can put version label
- MakeObjectVersion(&pvc.ObjectMeta, pvc)
- return pvc
-}
-
-// setupStatefulSetPodTemplate performs PodTemplate setup of StatefulSet
-func (c *Creator) setupStatefulSetPodTemplate(statefulSet *apps.StatefulSet, host *api.ChiHost) {
- // Process Pod Template
- podTemplate := c.getPodTemplate(host)
- c.statefulSetApplyPodTemplate(statefulSet, podTemplate, host)
-
- // Post-process StatefulSet
- ensureStatefulSetTemplateIntegrity(statefulSet, host)
- setupEnvVars(statefulSet, host)
- c.personalizeStatefulSetTemplate(statefulSet, host)
-}
-
-// ensureStatefulSetTemplateIntegrity
-func ensureStatefulSetTemplateIntegrity(statefulSet *apps.StatefulSet, host *api.ChiHost) {
- ensureClickHouseContainerSpecified(statefulSet, host)
- ensureProbesSpecified(statefulSet, host)
- ensureNamedPortsSpecified(statefulSet, host)
-}
-
-// setupEnvVars setup ENV vars for clickhouse container
-func setupEnvVars(statefulSet *apps.StatefulSet, host *api.ChiHost) {
- container, ok := getClickHouseContainer(statefulSet)
- if !ok {
- return
- }
-
- container.Env = append(container.Env, host.GetCHI().Attributes.AdditionalEnvVars...)
-}
-
-// ensureClickHouseContainerSpecified
-func ensureClickHouseContainerSpecified(statefulSet *apps.StatefulSet, host *api.ChiHost) {
- _, ok := getClickHouseContainer(statefulSet)
- if ok {
- return
- }
-
- // No ClickHouse container available, let's add one
- addContainer(
- &statefulSet.Spec.Template.Spec,
- newDefaultClickHouseContainer(host),
- )
-}
-
-// ensureClickHouseLogContainerSpecified
-func ensureClickHouseLogContainerSpecified(statefulSet *apps.StatefulSet) {
- _, ok := getClickHouseLogContainer(statefulSet)
- if ok {
- return
- }
-
- // No ClickHouse Log container available, let's add one
-
- addContainer(
- &statefulSet.Spec.Template.Spec,
- newDefaultLogContainer(),
- )
-}
-
-// ensureProbesSpecified
-func ensureProbesSpecified(statefulSet *apps.StatefulSet, host *api.ChiHost) {
- container, ok := getClickHouseContainer(statefulSet)
- if !ok {
- return
- }
- if container.LivenessProbe == nil {
- container.LivenessProbe = newDefaultLivenessProbe(host)
- }
- if container.ReadinessProbe == nil {
- container.ReadinessProbe = newDefaultReadinessProbe(host)
- }
-}
-
-// personalizeStatefulSetTemplate
-func (c *Creator) personalizeStatefulSetTemplate(statefulSet *apps.StatefulSet, host *api.ChiHost) {
- // Ensure pod created by this StatefulSet has alias 127.0.0.1
- statefulSet.Spec.Template.Spec.HostAliases = []core.HostAlias{
- {
- IP: "127.0.0.1",
- Hostnames: []string{CreatePodHostname(host)},
- },
- }
-
- // Setup volumes
- c.statefulSetSetupVolumes(statefulSet, host)
- // Setup statefulSet according to troubleshoot mode (if any)
- c.setupTroubleshoot(statefulSet)
- // Setup dedicated log container
- c.setupLogContainer(statefulSet, host)
-}
-
-// setupTroubleshoot
-func (c *Creator) setupTroubleshoot(statefulSet *apps.StatefulSet) {
- if !c.chi.IsTroubleshoot() {
- // We are not troubleshooting
- return
- }
-
- container, ok := getClickHouseContainer(statefulSet)
- if !ok {
- // Unable to locate ClickHouse container
- return
- }
-
- // Let's setup troubleshooting in ClickHouse container
-
- sleep := " || sleep 1800"
- if len(container.Command) > 0 {
- // In case we have user-specified command, let's
- // append troubleshooting-capable tail and hope for the best
- container.Command[len(container.Command)-1] += sleep
- } else {
- // Assume standard ClickHouse container is used
- // Substitute entrypoint with troubleshooting-capable command
- container.Command = []string{
- "/bin/sh",
- "-c",
- "/entrypoint.sh" + sleep,
- }
- }
- // Appended `sleep` command makes Pod unable to respond to probes, and probes would cause unexpected restart.
- // Thus we need to disable all probes in troubleshooting mode.
- container.LivenessProbe = nil
- container.ReadinessProbe = nil
-}
-
-// setupLogContainer
-func (c *Creator) setupLogContainer(statefulSet *apps.StatefulSet, host *api.ChiHost) {
- statefulSetName := CreateStatefulSetName(host)
- // In case we have default LogVolumeClaimTemplate specified - need to append log container to Pod Template
- if host.Templates.HasLogVolumeClaimTemplate() {
- ensureClickHouseLogContainerSpecified(statefulSet)
-
- c.a.V(1).F().Info("add log container for statefulSet %s", statefulSetName)
- }
-}
-
-// getPodTemplate gets Pod Template to be used to create StatefulSet
-func (c *Creator) getPodTemplate(host *api.ChiHost) *api.ChiPodTemplate {
- statefulSetName := CreateStatefulSetName(host)
-
- // Which pod template would be used - either explicitly defined in or a default one
- podTemplate, ok := host.GetPodTemplate()
- if ok {
- // Host references known PodTemplate
- // Make local copy of this PodTemplate, in order not to spoil the original common-used template
- podTemplate = podTemplate.DeepCopy()
- c.a.V(3).F().Info("statefulSet %s use custom template: %s", statefulSetName, podTemplate.Name)
- } else {
- // Host references UNKNOWN PodTemplate, will use default one
- podTemplate = newDefaultPodTemplate(statefulSetName, host)
- c.a.V(3).F().Info("statefulSet %s use default generated template", statefulSetName)
- }
-
- // Here we have local copy of Pod Template, to be used to create StatefulSet
- // Now we can customize this Pod Template for particular host
-
- prepareAffinity(podTemplate, host)
-
- return podTemplate
-}
-
-// statefulSetSetupVolumes setup all volumes
-func (c *Creator) statefulSetSetupVolumes(statefulSet *apps.StatefulSet, host *api.ChiHost) {
- c.statefulSetSetupVolumesForConfigMaps(statefulSet, host)
- c.statefulSetSetupVolumesForSecrets(statefulSet, host)
-}
-
-// statefulSetSetupVolumesForConfigMaps adds to each container in the Pod VolumeMount objects
-func (c *Creator) statefulSetSetupVolumesForConfigMaps(statefulSet *apps.StatefulSet, host *api.ChiHost) {
- configMapHostName := CreateConfigMapHostName(host)
- configMapCommonName := CreateConfigMapCommonName(c.chi)
- configMapCommonUsersName := CreateConfigMapCommonUsersName(c.chi)
-
- // Add all ConfigMap objects as Volume objects of type ConfigMap
- c.statefulSetAppendVolumes(
- statefulSet,
- newVolumeForConfigMap(configMapCommonName),
- newVolumeForConfigMap(configMapCommonUsersName),
- newVolumeForConfigMap(configMapHostName),
- //newVolumeForConfigMap(configMapHostMigrationName),
- )
-
- // And reference these Volumes in each Container via VolumeMount
- // So Pod will have ConfigMaps mounted as Volumes
- for i := range statefulSet.Spec.Template.Spec.Containers {
- // Convenience wrapper
- container := &statefulSet.Spec.Template.Spec.Containers[i]
- c.containerAppendVolumeMounts(
- container,
- newVolumeMount(configMapCommonName, dirPathCommonConfig),
- newVolumeMount(configMapCommonUsersName, dirPathUsersConfig),
- newVolumeMount(configMapHostName, dirPathHostConfig),
- )
- }
-}
-
-// statefulSetSetupVolumesForSecrets adds to each container in the Pod VolumeMount objects
-func (c *Creator) statefulSetSetupVolumesForSecrets(statefulSet *apps.StatefulSet, host *api.ChiHost) {
-
- // Add all ConfigMap objects as Volume objects of type ConfigMap
- c.statefulSetAppendVolumes(
- statefulSet,
- host.GetCHI().Attributes.AdditionalVolumes...,
- )
-
- // And reference these Volumes in each Container via VolumeMount
- // So Pod will have Secrets mounted as Volumes
- for i := range statefulSet.Spec.Template.Spec.Containers {
- // Convenience wrapper
- container := &statefulSet.Spec.Template.Spec.Containers[i]
- c.containerAppendVolumeMounts(
- container,
- host.GetCHI().Attributes.AdditionalVolumeMounts...,
- )
- }
-}
-
-// statefulSetAppendUsedPVCTemplates appends all PVC templates which are used (referenced by name) by containers
-// to the StatefulSet.Spec.VolumeClaimTemplates list
-func (c *Creator) statefulSetAppendUsedPVCTemplates(statefulSet *apps.StatefulSet, host *api.ChiHost) {
- // VolumeClaimTemplates, that are directly referenced in containers' VolumeMount object(s)
- // are appended to StatefulSet's Spec.VolumeClaimTemplates slice
- //
- // Deal with `volumeMounts` of a `container`, located by the path:
- // .spec.templates.podTemplates.*.spec.containers.volumeMounts.*
- for i := range statefulSet.Spec.Template.Spec.Containers {
- // Convenience wrapper
- container := &statefulSet.Spec.Template.Spec.Containers[i]
- for j := range container.VolumeMounts {
- if volumeClaimTemplate, ok := c.getVolumeClaimTemplate(&container.VolumeMounts[j]); ok {
- c.statefulSetAppendPVCTemplate(statefulSet, host, volumeClaimTemplate)
- }
- }
- }
-}
-
-// statefulSetAppendVolumeMountsForDataAndLogVolumeClaimTemplates
-// appends VolumeMounts for Data and Log VolumeClaimTemplates on all containers.
-// Creates VolumeMounts for Data and Log volumes in case these volume templates are specified in `templates`.
-func (c *Creator) statefulSetAppendVolumeMountsForDataAndLogVolumeClaimTemplates(statefulSet *apps.StatefulSet, host *api.ChiHost) {
- // Mount all named (data and log so far) VolumeClaimTemplates into all containers
- for i := range statefulSet.Spec.Template.Spec.Containers {
- // Convenience wrapper
- container := &statefulSet.Spec.Template.Spec.Containers[i]
- c.containerAppendVolumeMounts(
- container,
- newVolumeMount(host.Templates.GetDataVolumeClaimTemplate(), dirPathClickHouseData),
- )
- c.containerAppendVolumeMounts(
- container,
- newVolumeMount(host.Templates.GetLogVolumeClaimTemplate(), dirPathClickHouseLog),
- )
- }
-}
-
-// setupStatefulSetVolumeClaimTemplates performs VolumeClaimTemplate setup for Containers in PodTemplate of a StatefulSet
-func (c *Creator) setupStatefulSetVolumeClaimTemplates(statefulSet *apps.StatefulSet, host *api.ChiHost) {
- c.statefulSetAppendVolumeMountsForDataAndLogVolumeClaimTemplates(statefulSet, host)
- c.statefulSetAppendUsedPVCTemplates(statefulSet, host)
-}
-
-// statefulSetApplyPodTemplate fills StatefulSet.Spec.Template with data from provided ChiPodTemplate
-func (c *Creator) statefulSetApplyPodTemplate(
- statefulSet *apps.StatefulSet,
- template *api.ChiPodTemplate,
- host *api.ChiHost,
-) {
- // StatefulSet's pod template is not directly compatible with ChiPodTemplate,
- // we need to extract some fields from ChiPodTemplate and apply on StatefulSet
- statefulSet.Spec.Template = core.PodTemplateSpec{
- ObjectMeta: meta.ObjectMeta{
- Name: template.Name,
- Labels: macro(host).Map(util.MergeStringMapsOverwrite(
- c.labels.getHostScopeReady(host, true),
- template.ObjectMeta.Labels,
- )),
- Annotations: macro(host).Map(util.MergeStringMapsOverwrite(
- c.annotations.getHostScope(host),
- template.ObjectMeta.Annotations,
- )),
- },
- Spec: *template.Spec.DeepCopy(),
- }
-
- if statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds == nil {
- statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds = chop.Config().GetTerminationGracePeriod()
- }
-}
-
-// getContainer gets container from the StatefulSet either by name or by index
-func getContainer(statefulSet *apps.StatefulSet, name string, index int) (*core.Container, bool) {
- if len(name) > 0 {
- // Find by name
- for i := range statefulSet.Spec.Template.Spec.Containers {
- container := &statefulSet.Spec.Template.Spec.Containers[i]
- if container.Name == name {
- return container, true
- }
- }
- }
-
- if index >= 0 {
- // Find by index
- if len(statefulSet.Spec.Template.Spec.Containers) > index {
- return &statefulSet.Spec.Template.Spec.Containers[index], true
- }
- }
-
- return nil, false
-}
-
-// getClickHouseContainer
-func getClickHouseContainer(statefulSet *apps.StatefulSet) (*core.Container, bool) {
- return getContainer(statefulSet, clickHouseContainerName, 0)
-}
-
-// getClickHouseLogContainer
-func getClickHouseLogContainer(statefulSet *apps.StatefulSet) (*core.Container, bool) {
- return getContainer(statefulSet, clickHouseLogContainerName, -1)
-}
-
-// IsStatefulSetGeneration returns whether StatefulSet has requested generation or not
-func IsStatefulSetGeneration(statefulSet *apps.StatefulSet, generation int64) bool {
- if statefulSet == nil {
- return false
- }
-
- // StatefulSet has .spec generation we are looking for
- return (statefulSet.Generation == generation) &&
- // and this .spec generation is being applied to replicas - it is observed right now
- (statefulSet.Status.ObservedGeneration == statefulSet.Generation) &&
- // and all replicas are of expected generation
- (statefulSet.Status.CurrentReplicas == *statefulSet.Spec.Replicas) &&
- // and all replicas are updated - meaning rolling update completed over all replicas
- (statefulSet.Status.UpdatedReplicas == *statefulSet.Spec.Replicas) &&
- // and current revision is an updated one - meaning rolling update completed over all replicas
- (statefulSet.Status.CurrentRevision == statefulSet.Status.UpdateRevision)
-}
-
-// IsStatefulSetReady returns whether StatefulSet is ready
-func IsStatefulSetReady(statefulSet *apps.StatefulSet) bool {
- if statefulSet == nil {
- return false
- }
-
- if statefulSet.Spec.Replicas == nil {
- return false
- }
- // All replicas are in "Ready" status - meaning ready to be used - no failure inside
- return statefulSet.Status.ReadyReplicas == *statefulSet.Spec.Replicas
-}
-
-// IsStatefulSetNotReady returns whether StatefulSet is not ready
-func IsStatefulSetNotReady(statefulSet *apps.StatefulSet) bool {
- if statefulSet == nil {
- return false
- }
-
- return !IsStatefulSetReady(statefulSet)
-}
-
-// StrStatefulSetStatus returns human-friendly string representation of StatefulSet status
-func StrStatefulSetStatus(status *apps.StatefulSetStatus) string {
- return fmt.Sprintf(
- "ObservedGeneration:%d Replicas:%d ReadyReplicas:%d CurrentReplicas:%d UpdatedReplicas:%d CurrentRevision:%s UpdateRevision:%s",
- status.ObservedGeneration,
- status.Replicas,
- status.ReadyReplicas,
- status.CurrentReplicas,
- status.UpdatedReplicas,
- status.CurrentRevision,
- status.UpdateRevision,
- )
-}
-
-// ensureNamedPortsSpecified
-func ensureNamedPortsSpecified(statefulSet *apps.StatefulSet, host *api.ChiHost) {
- // Ensure ClickHouse container has all named ports specified
- container, ok := getClickHouseContainer(statefulSet)
- if !ok {
- return
- }
- ensurePortByName(container, chDefaultTCPPortName, host.TCPPort)
- ensurePortByName(container, chDefaultTLSPortName, host.TLSPort)
- ensurePortByName(container, chDefaultHTTPPortName, host.HTTPPort)
- ensurePortByName(container, chDefaultHTTPSPortName, host.HTTPSPort)
- ensurePortByName(container, chDefaultInterserverHTTPPortName, host.InterserverHTTPPort)
-}
-
-// ensurePortByName
-func ensurePortByName(container *core.Container, name string, port int32) {
- if api.IsPortUnassigned(port) {
- return
- }
-
- // Find port with specified name
- for i := range container.Ports {
- containerPort := &container.Ports[i]
- if containerPort.Name == name {
- // Assign value to existing port
- containerPort.HostPort = 0
- containerPort.ContainerPort = port
- return
- }
- }
-
- // Port with specified name not found. Need to append
- container.Ports = append(container.Ports, core.ContainerPort{
- Name: name,
- ContainerPort: port,
- })
-}
-
-// NewPodDisruptionBudget creates new PodDisruptionBudget
-func (c *Creator) NewPodDisruptionBudget(cluster *api.Cluster) *policy.PodDisruptionBudget {
- ownerReferences := getOwnerReferences(c.chi)
- return &policy.PodDisruptionBudget{
- ObjectMeta: meta.ObjectMeta{
- Name: fmt.Sprintf("%s-%s", cluster.Address.CHIName, cluster.Address.ClusterName),
- Namespace: c.chi.Namespace,
- Labels: macro(c.chi).Map(c.labels.getClusterScope(cluster)),
- Annotations: macro(c.chi).Map(c.annotations.getClusterScope(cluster)),
- OwnerReferences: ownerReferences,
- },
- Spec: policy.PodDisruptionBudgetSpec{
- Selector: &meta.LabelSelector{
- MatchLabels: getSelectorClusterScope(cluster),
- },
- MaxUnavailable: &intstr.IntOrString{
- Type: intstr.Int,
- IntVal: 1,
- },
- },
- }
-}
-
-// setupStatefulSetApplyVolumeMount applies .templates.volumeClaimTemplates.* to a StatefulSet
-func (c *Creator) setupStatefulSetApplyVolumeMount(
- host *api.ChiHost,
- statefulSet *apps.StatefulSet,
- containerName string,
- volumeMount core.VolumeMount,
-) error {
- //
- // Sanity checks
- //
-
- // Specified (referenced from volumeMount) VolumeClaimTemplate has to be available as well
- if _, ok := c.getVolumeClaimTemplate(&volumeMount); !ok {
- // Incorrect/unknown .templates.VolumeClaimTemplate specified
- c.a.V(1).F().Warning("Can not find VolumeClaimTemplate for VolumeMount: %s. Volume claim can not be mounted", volumeMount.Name)
- return nil
- }
-
- // Specified container has to be available
- container := getContainerByName(statefulSet, containerName)
- if container == nil {
- c.a.V(1).F().Warning("Can not find container: %s. Volume claim can not be mounted", containerName)
- return nil
- }
-
- // Looks like all components are in place
-
- // Mount specified (by volumeMount.Name) VolumeClaimTemplate into volumeMount.Path (say into '/var/lib/clickhouse')
- //
- // A container wants to have this VolumeClaimTemplate mounted into `mountPath` in case:
- // 1. This VolumeClaimTemplate is NOT already mounted in the container with any VolumeMount (to avoid double-mount of a VolumeClaimTemplate)
- // 2. And specified `mountPath` (say '/var/lib/clickhouse') is NOT already mounted with any VolumeMount (to avoid double-mount/rewrite into single `mountPath`)
-
- for i := range container.VolumeMounts {
- // Convenience wrapper
- existingVolumeMount := &container.VolumeMounts[i]
-
- // 1. Check whether this VolumeClaimTemplate is already listed in VolumeMount of this container
- if volumeMount.Name == existingVolumeMount.Name {
- // This .templates.VolumeClaimTemplate is already used in VolumeMount
- c.a.V(1).F().Warning(
- "StatefulSet:%s container:%s volumeClaimTemplateName:%s already used. Skip it and all the rest.",
- statefulSet.Name,
- container.Name,
- volumeMount.Name,
- )
- return nil
- }
-
- // 2. Check whether `mountPath` (say '/var/lib/clickhouse') is already mounted
- if volumeMount.MountPath == existingVolumeMount.MountPath {
- // `mountPath` (say /var/lib/clickhouse) is already mounted
- c.a.V(1).F().Warning(
- "StatefulSet:%s container:%s mountPath:%s already used. Skip it and all the rest.",
- statefulSet.Name,
- container.Name,
- volumeMount.MountPath,
- )
- return nil
- }
- }
-
- // This VolumeClaimTemplate is not used explicitly by name and `mountPath` (say /var/lib/clickhouse) is not used also.
- // Let's mount this VolumeClaimTemplate into `mountPath` (say '/var/lib/clickhouse') of a container
- if volumeClaimTemplate, ok := c.getVolumeClaimTemplate(&volumeMount); ok {
- // Add VolumeClaimTemplate to StatefulSet
- c.statefulSetAppendPVCTemplate(statefulSet, host, volumeClaimTemplate)
- // Add VolumeMount to ClickHouse container to `mountPath` point
- c.containerAppendVolumeMounts(
- container,
- volumeMount,
- )
- }
-
- c.a.V(1).F().Info(
- "StatefulSet: %s container: %s mounted VolumeMount: %s onto path: %s",
- statefulSet.Name,
- container.Name,
- volumeMount.Name,
- volumeMount.MountPath,
- )
-
- return nil
-}
-
-// statefulSetAppendVolumes appends multiple Volume(s) to the specified StatefulSet
-func (c *Creator) statefulSetAppendVolumes(statefulSet *apps.StatefulSet, volumes ...core.Volume) {
- statefulSet.Spec.Template.Spec.Volumes = append(
- statefulSet.Spec.Template.Spec.Volumes,
- volumes...,
- )
-}
-
-// containerAppendVolumeMounts appends multiple VolumeMount(s) to the specified container
-func (c *Creator) containerAppendVolumeMounts(container *core.Container, volumeMounts ...core.VolumeMount) {
- for _, volumeMount := range volumeMounts {
- c.containerAppendVolumeMount(container, volumeMount)
- }
-}
-
-// containerAppendVolumeMount appends one VolumeMount to the specified container
-func (c *Creator) containerAppendVolumeMount(container *core.Container, volumeMount core.VolumeMount) {
- //
- // Sanity checks
- //
-
- if container == nil {
- return
- }
-
- // VolumeMount has to have reasonable data - Name and MountPath
- if (volumeMount.Name == "") || (volumeMount.MountPath == "") {
- return
- }
-
- // Check that:
- // 1. Mountable item (VolumeClaimTemplate or Volume) specified in this VolumeMount is NOT already mounted
- // in this container by any other VolumeMount (to avoid double-mount of a mountable item)
- // 2. And specified `mountPath` (say '/var/lib/clickhouse') is NOT already mounted in this container
- // by any VolumeMount (to avoid double-mount/rewrite into single `mountPath`)
- for i := range container.VolumeMounts {
- // Convenience wrapper
- existingVolumeMount := &container.VolumeMounts[i]
-
- // 1. Check whether this mountable item is already listed in VolumeMount of this container
- if volumeMount.Name == existingVolumeMount.Name {
- // This .templates.VolumeClaimTemplate is already used in VolumeMount
- c.a.V(1).F().Warning(
- "container.Name:%s volumeMount.Name:%s already used",
- container.Name,
- volumeMount.Name,
- )
- return
- }
-
- // 2. Check whether `mountPath` (say '/var/lib/clickhouse') is already mounted
- if volumeMount.MountPath == existingVolumeMount.MountPath {
- // `mountPath` (say /var/lib/clickhouse) is already mounted
- c.a.V(1).F().Warning(
- "container.Name:%s volumeMount.MountPath:%s already used",
- container.Name,
- volumeMount.MountPath,
- )
- return
- }
- }
-
- // Add VolumeMount to ClickHouse container to `mountPath` point
- container.VolumeMounts = append(
- container.VolumeMounts,
- volumeMount,
- )
-
- c.a.V(3).F().Info(
- "container:%s volumeMount added: %s on %s",
- container.Name,
- volumeMount.Name,
- volumeMount.MountPath,
- )
-
- return
-}
-
-// createPVC
-func (c *Creator) createPVC(
- name string,
- namespace string,
- host *api.ChiHost,
- spec *core.PersistentVolumeClaimSpec,
-) core.PersistentVolumeClaim {
- persistentVolumeClaim := core.PersistentVolumeClaim{
- TypeMeta: meta.TypeMeta{
- Kind: "PersistentVolumeClaim",
- APIVersion: "v1",
- },
- ObjectMeta: meta.ObjectMeta{
- Name: name,
- Namespace: namespace,
- // TODO
- // this has to wait until proper disk inheritance procedure will be available
- // UPDATE
- // we are close to proper disk inheritance
- // Right now we hit the following error:
- // "Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden"
- Labels: macro(host).Map(c.labels.getHostScope(host, false)),
- Annotations: macro(host).Map(c.annotations.getHostScope(host)),
- },
- // Append copy of PersistentVolumeClaimSpec
- Spec: *spec.DeepCopy(),
- }
- // TODO introduce normalization
- // Overwrite .Spec.VolumeMode
- volumeMode := core.PersistentVolumeFilesystem
- persistentVolumeClaim.Spec.VolumeMode = &volumeMode
-
- return persistentVolumeClaim
-}
-
-// CreatePVC creates PVC
-func (c *Creator) CreatePVC(name string, host *api.ChiHost, spec *core.PersistentVolumeClaimSpec) *core.PersistentVolumeClaim {
- pvc := c.createPVC(name, host.Address.Namespace, host, spec)
- return &pvc
-}
-
-// statefulSetAppendPVCTemplate appends to StatefulSet.Spec.VolumeClaimTemplates new entry with data from provided 'src' ChiVolumeClaimTemplate
-func (c *Creator) statefulSetAppendPVCTemplate(
- statefulSet *apps.StatefulSet,
- host *api.ChiHost,
- volumeClaimTemplate *api.ChiVolumeClaimTemplate,
-) {
- // Since we have the same names for PVs produced from both VolumeClaimTemplates and Volumes,
- // we need to check naming for all of them
-
- // Check whether provided VolumeClaimTemplate is already listed in statefulSet.Spec.VolumeClaimTemplates
- for i := range statefulSet.Spec.VolumeClaimTemplates {
- // Convenience wrapper
- _volumeClaimTemplate := &statefulSet.Spec.VolumeClaimTemplates[i]
- if _volumeClaimTemplate.Name == volumeClaimTemplate.Name {
- // This VolumeClaimTemplate is already listed in statefulSet.Spec.VolumeClaimTemplates
- // No need to add it second time
- return
- }
- }
-
- // Check whether provided VolumeClaimTemplate is already listed in statefulSet.Spec.Template.Spec.Volumes
- for i := range statefulSet.Spec.Template.Spec.Volumes {
- // Convenience wrapper
- _volume := &statefulSet.Spec.Template.Spec.Volumes[i]
- if _volume.Name == volumeClaimTemplate.Name {
- // This VolumeClaimTemplate is already listed in statefulSet.Spec.Template.Spec.Volumes
- // No need to add it second time
- return
- }
- }
-
- // Provided VolumeClaimTemplate is not listed neither in
- // statefulSet.Spec.Template.Spec.Volumes
- // nor in
- // statefulSet.Spec.VolumeClaimTemplates
- // so, let's add it
-
- if c.OperatorShouldCreatePVC(host, volumeClaimTemplate) {
- claimName := CreatePVCNameByVolumeClaimTemplate(host, volumeClaimTemplate)
- statefulSet.Spec.Template.Spec.Volumes = append(
- statefulSet.Spec.Template.Spec.Volumes,
- newVolumeForPVC(volumeClaimTemplate.Name, claimName),
- )
- } else {
- statefulSet.Spec.VolumeClaimTemplates = append(
- statefulSet.Spec.VolumeClaimTemplates,
- // For templates we should not specify namespace where PVC would be located
- c.createPVC(volumeClaimTemplate.Name, "", host, &volumeClaimTemplate.Spec),
- )
- }
-}
-
-// OperatorShouldCreatePVC checks whether operator should create PVC for specified volumeCLimaTemplate
-func (c *Creator) OperatorShouldCreatePVC(host *api.ChiHost, volumeClaimTemplate *api.ChiVolumeClaimTemplate) bool {
- return getPVCProvisioner(host, volumeClaimTemplate) == api.PVCProvisionerOperator
-}
-
-// CreateClusterSecret creates cluster secret
-func (c *Creator) CreateClusterSecret(name string) *core.Secret {
- return &core.Secret{
- ObjectMeta: meta.ObjectMeta{
- Namespace: c.chi.Namespace,
- Name: name,
- },
- StringData: map[string]string{
- "secret": util.RandStringRange(10, 20),
- },
- Type: core.SecretTypeOpaque,
- }
-}
-
-// newDefaultHostTemplate returns default Host Template to be used with StatefulSet
-func newDefaultHostTemplate(name string) *api.ChiHostTemplate {
- return &api.ChiHostTemplate{
- Name: name,
- PortDistribution: []api.ChiPortDistribution{
- {
- Type: deployment.PortDistributionUnspecified,
- },
- },
- Spec: api.ChiHost{
- Name: "",
- TCPPort: api.PortUnassigned(),
- TLSPort: api.PortUnassigned(),
- HTTPPort: api.PortUnassigned(),
- HTTPSPort: api.PortUnassigned(),
- InterserverHTTPPort: api.PortUnassigned(),
- Templates: nil,
- },
- }
-}
-
-// newDefaultHostTemplateForHostNetwork
-func newDefaultHostTemplateForHostNetwork(name string) *api.ChiHostTemplate {
- return &api.ChiHostTemplate{
- Name: name,
- PortDistribution: []api.ChiPortDistribution{
- {
- Type: deployment.PortDistributionClusterScopeIndex,
- },
- },
- Spec: api.ChiHost{
- Name: "",
- TCPPort: api.PortUnassigned(),
- TLSPort: api.PortUnassigned(),
- HTTPPort: api.PortUnassigned(),
- HTTPSPort: api.PortUnassigned(),
- InterserverHTTPPort: api.PortUnassigned(),
- Templates: nil,
- },
- }
-}
-
-// newDefaultPodTemplate returns default Pod Template to be used with StatefulSet
-func newDefaultPodTemplate(name string, host *api.ChiHost) *api.ChiPodTemplate {
- podTemplate := &api.ChiPodTemplate{
- Name: name,
- Spec: core.PodSpec{
- Containers: []core.Container{},
- Volumes: []core.Volume{},
- },
- }
-
- addContainer(&podTemplate.Spec, newDefaultClickHouseContainer(host))
-
- return podTemplate
-}
-
-// newDefaultLivenessProbe returns default liveness probe
-func newDefaultLivenessProbe(host *api.ChiHost) *core.Probe {
- // Introduce http probe in case http port is specified
- if api.IsPortAssigned(host.HTTPPort) {
- return &core.Probe{
- ProbeHandler: core.ProbeHandler{
- HTTPGet: &core.HTTPGetAction{
- Path: "/ping",
- Port: intstr.Parse(chDefaultHTTPPortName), // What if it is not a default?
- },
- },
- InitialDelaySeconds: 60,
- PeriodSeconds: 3,
- FailureThreshold: 10,
- }
- }
-
- // Introduce https probe in case https port is specified
- if api.IsPortAssigned(host.HTTPSPort) {
- return &core.Probe{
- ProbeHandler: core.ProbeHandler{
- HTTPGet: &core.HTTPGetAction{
- Path: "/ping",
- Port: intstr.Parse(chDefaultHTTPSPortName), // What if it is not a default?
- Scheme: core.URISchemeHTTPS,
- },
- },
- InitialDelaySeconds: 60,
- PeriodSeconds: 3,
- FailureThreshold: 10,
- }
- }
-
- // Probe is not available
- return nil
-}
-
-// newDefaultReadinessProbe returns default readiness probe
-func newDefaultReadinessProbe(host *api.ChiHost) *core.Probe {
- // Introduce http probe in case http port is specified
- if api.IsPortAssigned(host.HTTPPort) {
- return &core.Probe{
- ProbeHandler: core.ProbeHandler{
- HTTPGet: &core.HTTPGetAction{
- Path: "/ping",
- Port: intstr.Parse(chDefaultHTTPPortName), // What if port name is not a default?
- },
- },
- InitialDelaySeconds: 10,
- PeriodSeconds: 3,
- }
- }
-
- // Introduce https probe in case https port is specified
- if api.IsPortAssigned(host.HTTPSPort) {
- return &core.Probe{
- ProbeHandler: core.ProbeHandler{
- HTTPGet: &core.HTTPGetAction{
- Path: "/ping",
- Port: intstr.Parse(chDefaultHTTPSPortName), // What if port name is not a default?
- Scheme: core.URISchemeHTTPS,
- },
- },
- InitialDelaySeconds: 10,
- PeriodSeconds: 3,
- }
- }
-
- // Probe is not available
- return nil
-}
-
-func appendContainerPorts(container *core.Container, host *api.ChiHost) {
- if api.IsPortAssigned(host.TCPPort) {
- container.Ports = append(container.Ports,
- core.ContainerPort{
- Name: chDefaultTCPPortName,
- ContainerPort: host.TCPPort,
- Protocol: core.ProtocolTCP,
- },
- )
- }
- if api.IsPortAssigned(host.TLSPort) {
- container.Ports = append(container.Ports,
- core.ContainerPort{
- Name: chDefaultTLSPortName,
- ContainerPort: host.TLSPort,
- Protocol: core.ProtocolTCP,
- },
- )
- }
- if api.IsPortAssigned(host.HTTPPort) {
- container.Ports = append(container.Ports,
- core.ContainerPort{
- Name: chDefaultHTTPPortName,
- ContainerPort: host.HTTPPort,
- Protocol: core.ProtocolTCP,
- },
- )
- }
- if api.IsPortAssigned(host.HTTPSPort) {
- container.Ports = append(container.Ports,
- core.ContainerPort{
- Name: chDefaultHTTPSPortName,
- ContainerPort: host.HTTPSPort,
- Protocol: core.ProtocolTCP,
- },
- )
- }
- if api.IsPortAssigned(host.InterserverHTTPPort) {
- container.Ports = append(container.Ports,
- core.ContainerPort{
- Name: chDefaultInterserverHTTPPortName,
- ContainerPort: host.InterserverHTTPPort,
- Protocol: core.ProtocolTCP,
- },
- )
- }
-}
-
-// newDefaultClickHouseContainer returns default ClickHouse Container
-func newDefaultClickHouseContainer(host *api.ChiHost) core.Container {
- container := core.Container{
- Name: clickHouseContainerName,
- Image: defaultClickHouseDockerImage,
- LivenessProbe: newDefaultLivenessProbe(host),
- ReadinessProbe: newDefaultReadinessProbe(host),
- }
- appendContainerPorts(&container, host)
- return container
-}
-
-// newDefaultLogContainer returns default Log Container
-func newDefaultLogContainer() core.Container {
- return core.Container{
- Name: clickHouseLogContainerName,
- Image: defaultUbiDockerImage,
- Command: []string{
- "/bin/sh", "-c", "--",
- },
- Args: []string{
- "while true; do sleep 30; done;",
- },
- }
-}
-
-// addContainer adds container to ChiPodTemplate
-func addContainer(podSpec *core.PodSpec, container core.Container) {
- podSpec.Containers = append(podSpec.Containers, container)
-}
-
-// newVolumeForPVC returns core.Volume object with defined name
-func newVolumeForPVC(name, claimName string) core.Volume {
- return core.Volume{
- Name: name,
- VolumeSource: core.VolumeSource{
- PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{
- ClaimName: claimName,
- ReadOnly: false,
- },
- },
- }
-}
-
-// newVolumeForConfigMap returns core.Volume object with defined name
-func newVolumeForConfigMap(name string) core.Volume {
- var defaultMode int32 = 0644
- return core.Volume{
- Name: name,
- VolumeSource: core.VolumeSource{
- ConfigMap: &core.ConfigMapVolumeSource{
- LocalObjectReference: core.LocalObjectReference{
- Name: name,
- },
- DefaultMode: &defaultMode,
- },
- },
- }
-}
-
-// newVolumeMount returns core.VolumeMount object with name and mount path
-func newVolumeMount(name, mountPath string) core.VolumeMount {
- return core.VolumeMount{
- Name: name,
- MountPath: mountPath,
- }
-}
-
-// getContainerByName finds Container with specified name among all containers of Pod Template in StatefulSet
-func getContainerByName(statefulSet *apps.StatefulSet, name string) *core.Container {
- for i := range statefulSet.Spec.Template.Spec.Containers {
- // Convenience wrapper
- container := &statefulSet.Spec.Template.Spec.Containers[i]
- if container.Name == name {
- return container
- }
- }
-
- return nil
-}
-
-func getOwnerReferences(chi *api.ClickHouseInstallation) []meta.OwnerReference {
- if chi.Attributes.SkipOwnerRef {
- return nil
- }
- controller := true
- block := true
- return []meta.OwnerReference{
- {
- APIVersion: api.SchemeGroupVersion.String(),
- Kind: api.ClickHouseInstallationCRDResourceKind,
- Name: chi.Name,
- UID: chi.UID,
- Controller: &controller,
- BlockOwnerDeletion: &block,
- },
- }
-}
diff --git a/pkg/model/chi/creator/chi.go b/pkg/model/chi/creator/chi.go
new file mode 100644
index 000000000..6b58222a0
--- /dev/null
+++ b/pkg/model/chi/creator/chi.go
@@ -0,0 +1,30 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import (
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+)
+
+func NewCHI() *api.ClickHouseInstallation {
+ return &api.ClickHouseInstallation{
+ TypeMeta: meta.TypeMeta{
+ Kind: api.ClickHouseInstallationCRDResourceKind,
+ APIVersion: api.SchemeGroupVersion.String(),
+ },
+ }
+}
diff --git a/pkg/model/chi/creator/cluster.go b/pkg/model/chi/creator/cluster.go
new file mode 100644
index 000000000..549a82b00
--- /dev/null
+++ b/pkg/model/chi/creator/cluster.go
@@ -0,0 +1,24 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+
+// NewDefaultCluster
+func NewDefaultCluster() *api.Cluster {
+ return &api.Cluster{
+ Name: "cluster",
+ }
+}
diff --git a/pkg/model/chi/creator/config_map.go b/pkg/model/chi/creator/config_map.go
new file mode 100644
index 000000000..0c5cddc2a
--- /dev/null
+++ b/pkg/model/chi/creator/config_map.go
@@ -0,0 +1,77 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import (
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+)
+
+// CreateConfigMapCHICommon creates new core.ConfigMap
+func (c *Creator) CreateConfigMapCHICommon(options *model.ClickHouseConfigFilesGeneratorOptions) *core.ConfigMap {
+ cm := &core.ConfigMap{
+ ObjectMeta: meta.ObjectMeta{
+ Name: model.CreateConfigMapCommonName(c.chi),
+ Namespace: c.chi.Namespace,
+ Labels: model.Macro(c.chi).Map(c.labels.GetConfigMapCHICommon()),
+ Annotations: model.Macro(c.chi).Map(c.annotations.GetConfigMapCHICommon()),
+ OwnerReferences: getOwnerReferences(c.chi),
+ },
+ // Data contains several sections which are to be several xml chopConfig files
+ Data: c.chConfigFilesGenerator.CreateConfigFilesGroupCommon(options),
+ }
+ // And after the object is ready we can put version label
+ model.MakeObjectVersion(&cm.ObjectMeta, cm)
+ return cm
+}
+
+// CreateConfigMapCHICommonUsers creates new core.ConfigMap
+func (c *Creator) CreateConfigMapCHICommonUsers() *core.ConfigMap {
+ cm := &core.ConfigMap{
+ ObjectMeta: meta.ObjectMeta{
+ Name: model.CreateConfigMapCommonUsersName(c.chi),
+ Namespace: c.chi.Namespace,
+ Labels: model.Macro(c.chi).Map(c.labels.GetConfigMapCHICommonUsers()),
+ Annotations: model.Macro(c.chi).Map(c.annotations.GetConfigMapCHICommonUsers()),
+ OwnerReferences: getOwnerReferences(c.chi),
+ },
+ // Data contains several sections which are to be several xml chopConfig files
+ Data: c.chConfigFilesGenerator.CreateConfigFilesGroupUsers(),
+ }
+ // And after the object is ready we can put version label
+ model.MakeObjectVersion(&cm.ObjectMeta, cm)
+ return cm
+}
+
+// CreateConfigMapHost creates new core.ConfigMap
+func (c *Creator) CreateConfigMapHost(host *api.ChiHost) *core.ConfigMap {
+ cm := &core.ConfigMap{
+ ObjectMeta: meta.ObjectMeta{
+ Name: model.CreateConfigMapHostName(host),
+ Namespace: host.Runtime.Address.Namespace,
+ Labels: model.Macro(host).Map(c.labels.GetConfigMapHost(host)),
+ Annotations: model.Macro(host).Map(c.annotations.GetConfigMapHost(host)),
+ OwnerReferences: getOwnerReferences(c.chi),
+ },
+ // Data contains several sections which are to be several xml chopConfig files
+ Data: c.chConfigFilesGenerator.CreateConfigFilesGroupHost(host),
+ }
+ // And after the object is ready we can put version label
+ model.MakeObjectVersion(&cm.ObjectMeta, cm)
+ return cm
+}
diff --git a/pkg/model/chi/creator/creator.go b/pkg/model/chi/creator/creator.go
new file mode 100644
index 000000000..1a2cc93e4
--- /dev/null
+++ b/pkg/model/chi/creator/creator.go
@@ -0,0 +1,42 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import (
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/chop"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+)
+
+// Creator specifies creator object
+type Creator struct {
+ chi *api.ClickHouseInstallation
+ chConfigFilesGenerator *model.ClickHouseConfigFilesGenerator
+ labels *model.Labeler
+ annotations *model.Annotator
+ a log.Announcer
+}
+
+// NewCreator creates new Creator object
+func NewCreator(chi *api.ClickHouseInstallation) *Creator {
+ return &Creator{
+ chi: chi,
+ chConfigFilesGenerator: model.NewClickHouseConfigFilesGenerator(model.NewClickHouseConfigGenerator(chi), chop.Config()),
+ labels: model.NewLabeler(chi),
+ annotations: model.NewAnnotator(chi),
+ a: log.M(chi),
+ }
+}
diff --git a/pkg/model/chi/creator/host.go b/pkg/model/chi/creator/host.go
new file mode 100644
index 000000000..a69c727df
--- /dev/null
+++ b/pkg/model/chi/creator/host.go
@@ -0,0 +1,62 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import (
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/deployment"
+)
+
+// NewDefaultHostTemplate returns default Host Template to be used with StatefulSet
+func NewDefaultHostTemplate(name string) *api.ChiHostTemplate {
+ return &api.ChiHostTemplate{
+ Name: name,
+ PortDistribution: []api.ChiPortDistribution{
+ {
+ Type: deployment.PortDistributionUnspecified,
+ },
+ },
+ Spec: api.ChiHost{
+ Name: "",
+ TCPPort: api.PortUnassigned(),
+ TLSPort: api.PortUnassigned(),
+ HTTPPort: api.PortUnassigned(),
+ HTTPSPort: api.PortUnassigned(),
+ InterserverHTTPPort: api.PortUnassigned(),
+ Templates: nil,
+ },
+ }
+}
+
+// NewDefaultHostTemplateForHostNetwork
+func NewDefaultHostTemplateForHostNetwork(name string) *api.ChiHostTemplate {
+ return &api.ChiHostTemplate{
+ Name: name,
+ PortDistribution: []api.ChiPortDistribution{
+ {
+ Type: deployment.PortDistributionClusterScopeIndex,
+ },
+ },
+ Spec: api.ChiHost{
+ Name: "",
+ TCPPort: api.PortUnassigned(),
+ TLSPort: api.PortUnassigned(),
+ HTTPPort: api.PortUnassigned(),
+ HTTPSPort: api.PortUnassigned(),
+ InterserverHTTPPort: api.PortUnassigned(),
+ Templates: nil,
+ },
+ }
+}
diff --git a/pkg/model/chi/creator/owner_reference.go b/pkg/model/chi/creator/owner_reference.go
new file mode 100644
index 000000000..bc11ad106
--- /dev/null
+++ b/pkg/model/chi/creator/owner_reference.go
@@ -0,0 +1,43 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import (
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+)
+
+func getOwnerReferences(chi *api.ClickHouseInstallation) []meta.OwnerReference {
+ if chi.EnsureRuntime().EnsureAttributes().SkipOwnerRef {
+ return nil
+ }
+ return []meta.OwnerReference{
+ getOwnerReference(&chi.ObjectMeta),
+ }
+}
+
+func getOwnerReference(objectMeta *meta.ObjectMeta) meta.OwnerReference {
+ controller := true
+ block := true
+ return meta.OwnerReference{
+ APIVersion: api.SchemeGroupVersion.String(),
+ Kind: api.ClickHouseInstallationCRDResourceKind,
+ Name: objectMeta.GetName(),
+ UID: objectMeta.GetUID(),
+ Controller: &controller,
+ BlockOwnerDeletion: &block,
+ }
+}
diff --git a/pkg/model/chi/creator/pdb.go b/pkg/model/chi/creator/pdb.go
new file mode 100644
index 000000000..d9345e316
--- /dev/null
+++ b/pkg/model/chi/creator/pdb.go
@@ -0,0 +1,48 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import (
+ "fmt"
+
+ policy "k8s.io/api/policy/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+)
+
+// NewPodDisruptionBudget creates new PodDisruptionBudget
+func (c *Creator) NewPodDisruptionBudget(cluster *api.Cluster) *policy.PodDisruptionBudget {
+ return &policy.PodDisruptionBudget{
+ ObjectMeta: meta.ObjectMeta{
+ Name: fmt.Sprintf("%s-%s", cluster.Runtime.Address.CHIName, cluster.Runtime.Address.ClusterName),
+ Namespace: c.chi.Namespace,
+ Labels: model.Macro(c.chi).Map(c.labels.GetClusterScope(cluster)),
+ Annotations: model.Macro(c.chi).Map(c.annotations.GetClusterScope(cluster)),
+ OwnerReferences: getOwnerReferences(c.chi),
+ },
+ Spec: policy.PodDisruptionBudgetSpec{
+ Selector: &meta.LabelSelector{
+ MatchLabels: model.GetSelectorClusterScope(cluster),
+ },
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ },
+ }
+}
diff --git a/pkg/model/chi/creator/probe.go b/pkg/model/chi/creator/probe.go
new file mode 100644
index 000000000..f94333df3
--- /dev/null
+++ b/pkg/model/chi/creator/probe.go
@@ -0,0 +1,105 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import (
+ core "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+)
+
+// newDefaultLivenessProbe is a unification wrapper
+func newDefaultLivenessProbe(host *api.ChiHost) *core.Probe {
+ return newDefaultClickHouseLivenessProbe(host)
+}
+
+// newDefaultReadinessProbe is a unification wrapper
+func newDefaultReadinessProbe(host *api.ChiHost) *core.Probe {
+ return newDefaultClickHouseReadinessProbe(host)
+}
+
+// newDefaultClickHouseLivenessProbe returns default ClickHouse liveness probe
+func newDefaultClickHouseLivenessProbe(host *api.ChiHost) *core.Probe {
+ // Introduce http probe in case http port is specified
+ if api.IsPortAssigned(host.HTTPPort) {
+ return &core.Probe{
+ ProbeHandler: core.ProbeHandler{
+ HTTPGet: &core.HTTPGetAction{
+ Path: "/ping",
+ Port: intstr.Parse(model.ChDefaultHTTPPortName), // What if it is not a default?
+ },
+ },
+ InitialDelaySeconds: 60,
+ PeriodSeconds: 3,
+ FailureThreshold: 10,
+ }
+ }
+
+ // Introduce https probe in case https port is specified
+ if api.IsPortAssigned(host.HTTPSPort) {
+ return &core.Probe{
+ ProbeHandler: core.ProbeHandler{
+ HTTPGet: &core.HTTPGetAction{
+ Path: "/ping",
+ Port: intstr.Parse(model.ChDefaultHTTPSPortName), // What if it is not a default?
+ Scheme: core.URISchemeHTTPS,
+ },
+ },
+ InitialDelaySeconds: 60,
+ PeriodSeconds: 3,
+ FailureThreshold: 10,
+ }
+ }
+
+ // Probe is not available
+ return nil
+}
+
+// newDefaultClickHouseReadinessProbe returns default ClickHouse readiness probe
+func newDefaultClickHouseReadinessProbe(host *api.ChiHost) *core.Probe {
+ // Introduce http probe in case http port is specified
+ if api.IsPortAssigned(host.HTTPPort) {
+ return &core.Probe{
+ ProbeHandler: core.ProbeHandler{
+ HTTPGet: &core.HTTPGetAction{
+ Path: "/ping",
+ Port: intstr.Parse(model.ChDefaultHTTPPortName), // What if port name is not a default?
+ },
+ },
+ InitialDelaySeconds: 10,
+ PeriodSeconds: 3,
+ }
+ }
+
+ // Introduce https probe in case https port is specified
+ if api.IsPortAssigned(host.HTTPSPort) {
+ return &core.Probe{
+ ProbeHandler: core.ProbeHandler{
+ HTTPGet: &core.HTTPGetAction{
+ Path: "/ping",
+ Port: intstr.Parse(model.ChDefaultHTTPSPortName), // What if port name is not a default?
+ Scheme: core.URISchemeHTTPS,
+ },
+ },
+ InitialDelaySeconds: 10,
+ PeriodSeconds: 3,
+ }
+ }
+
+ // Probe is not available
+ return nil
+}
diff --git a/pkg/model/chi/creator/pv.go b/pkg/model/chi/creator/pv.go
new file mode 100644
index 000000000..7f17a78d3
--- /dev/null
+++ b/pkg/model/chi/creator/pv.go
@@ -0,0 +1,31 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import (
+ core "k8s.io/api/core/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+)
+
+// PreparePersistentVolume prepares PV labels
+func (c *Creator) PreparePersistentVolume(pv *core.PersistentVolume, host *api.ChiHost) *core.PersistentVolume {
+ pv.Labels = model.Macro(host).Map(c.labels.GetPV(pv, host))
+ pv.Annotations = model.Macro(host).Map(c.annotations.GetPV(pv, host))
+ // And after the object is ready we can put version label
+ model.MakeObjectVersion(&pv.ObjectMeta, pv)
+ return pv
+}
diff --git a/pkg/model/chi/creator/pvc.go b/pkg/model/chi/creator/pvc.go
new file mode 100644
index 000000000..ba8abb93c
--- /dev/null
+++ b/pkg/model/chi/creator/pvc.go
@@ -0,0 +1,82 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import (
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+)
+
+// PreparePersistentVolumeClaim prepares PVC - labels and annotations
+func (c *Creator) PreparePersistentVolumeClaim(
+ pvc *core.PersistentVolumeClaim,
+ host *api.ChiHost,
+ template *api.ChiVolumeClaimTemplate,
+) *core.PersistentVolumeClaim {
+ pvc.Labels = model.Macro(host).Map(c.labels.GetPVC(pvc, host, template))
+ pvc.Annotations = model.Macro(host).Map(c.annotations.GetPVC(pvc, host, template))
+ // And after the object is ready we can put version label
+ model.MakeObjectVersion(&pvc.ObjectMeta, pvc)
+ return pvc
+}
+
+// createPVC
+func (c *Creator) createPVC(
+ name string,
+ namespace string,
+ host *api.ChiHost,
+ spec *core.PersistentVolumeClaimSpec,
+) core.PersistentVolumeClaim {
+ persistentVolumeClaim := core.PersistentVolumeClaim{
+ TypeMeta: meta.TypeMeta{
+ Kind: "PersistentVolumeClaim",
+ APIVersion: "v1",
+ },
+ ObjectMeta: meta.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ // TODO
+ // this has to wait until proper disk inheritance procedure will be available
+ // UPDATE
+ // we are close to proper disk inheritance
+ // Right now we hit the following error:
+ // "Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden"
+ Labels: model.Macro(host).Map(c.labels.GetHostScope(host, false)),
+ Annotations: model.Macro(host).Map(c.annotations.GetHostScope(host)),
+ },
+ // Append copy of PersistentVolumeClaimSpec
+ Spec: *spec.DeepCopy(),
+ }
+ // TODO introduce normalization
+ // Overwrite .Spec.VolumeMode
+ volumeMode := core.PersistentVolumeFilesystem
+ persistentVolumeClaim.Spec.VolumeMode = &volumeMode
+
+ return persistentVolumeClaim
+}
+
+// CreatePVC creates PVC
+func (c *Creator) CreatePVC(name string, host *api.ChiHost, spec *core.PersistentVolumeClaimSpec) *core.PersistentVolumeClaim {
+ pvc := c.createPVC(name, host.Runtime.Address.Namespace, host, spec)
+ return &pvc
+}
+
+// OperatorShouldCreatePVC checks whether operator should create PVC for specified volumeCLimaTemplate
+func OperatorShouldCreatePVC(host *api.ChiHost, volumeClaimTemplate *api.ChiVolumeClaimTemplate) bool {
+ return model.GetPVCProvisioner(host, volumeClaimTemplate) == api.PVCProvisionerOperator
+}
diff --git a/pkg/model/chi/creator/secret.go b/pkg/model/chi/creator/secret.go
new file mode 100644
index 000000000..b8dc85bc7
--- /dev/null
+++ b/pkg/model/chi/creator/secret.go
@@ -0,0 +1,36 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import (
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// CreateClusterSecret creates cluster secret
+func (c *Creator) CreateClusterSecret(name string) *core.Secret {
+ return &core.Secret{
+ ObjectMeta: meta.ObjectMeta{
+ Namespace: c.chi.Namespace,
+ Name: name,
+ },
+ StringData: map[string]string{
+ "secret": util.RandStringRange(10, 20),
+ },
+ Type: core.SecretTypeOpaque,
+ }
+}
diff --git a/pkg/model/chi/creator/service.go b/pkg/model/chi/creator/service.go
new file mode 100644
index 000000000..58c995dc3
--- /dev/null
+++ b/pkg/model/chi/creator/service.go
@@ -0,0 +1,221 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import (
+ "fmt"
+
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ "github.com/altinity/clickhouse-operator/pkg/model/k8s"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// CreateServiceCHI creates new core.Service for specified CHI
+func (c *Creator) CreateServiceCHI() *core.Service {
+ if template, ok := c.chi.GetCHIServiceTemplate(); ok {
+ // .templates.ServiceTemplate specified
+ return c.createServiceFromTemplate(
+ template,
+ c.chi.Namespace,
+ model.CreateCHIServiceName(c.chi),
+ c.labels.GetServiceCHI(c.chi),
+ c.annotations.GetServiceCHI(c.chi),
+ c.labels.GetSelectorCHIScopeReady(),
+ getOwnerReferences(c.chi),
+ model.Macro(c.chi),
+ )
+ }
+
+ // Create default Service
+ // We do not have .templates.ServiceTemplate specified or it is incorrect
+ svc := &core.Service{
+ ObjectMeta: meta.ObjectMeta{
+ Name: model.CreateCHIServiceName(c.chi),
+ Namespace: c.chi.Namespace,
+ Labels: model.Macro(c.chi).Map(c.labels.GetServiceCHI(c.chi)),
+ Annotations: model.Macro(c.chi).Map(c.annotations.GetServiceCHI(c.chi)),
+ OwnerReferences: getOwnerReferences(c.chi),
+ },
+ Spec: core.ServiceSpec{
+ ClusterIP: model.TemplateDefaultsServiceClusterIP,
+ Ports: []core.ServicePort{
+ {
+ Name: model.ChDefaultHTTPPortName,
+ Protocol: core.ProtocolTCP,
+ Port: model.ChDefaultHTTPPortNumber,
+ TargetPort: intstr.FromString(model.ChDefaultHTTPPortName),
+ },
+ {
+ Name: model.ChDefaultTCPPortName,
+ Protocol: core.ProtocolTCP,
+ Port: model.ChDefaultTCPPortNumber,
+ TargetPort: intstr.FromString(model.ChDefaultTCPPortName),
+ },
+ },
+ Selector: c.labels.GetSelectorCHIScopeReady(),
+ Type: core.ServiceTypeClusterIP,
+ // ExternalTrafficPolicy: core.ServiceExternalTrafficPolicyTypeLocal, // For core.ServiceTypeLoadBalancer only
+ },
+ }
+ model.MakeObjectVersion(&svc.ObjectMeta, svc)
+ return svc
+}
+
+// CreateServiceCluster creates new core.Service for specified Cluster
+func (c *Creator) CreateServiceCluster(cluster *api.Cluster) *core.Service {
+ serviceName := model.CreateClusterServiceName(cluster)
+ ownerReferences := getOwnerReferences(c.chi)
+
+ c.a.V(1).F().Info("%s/%s", cluster.Runtime.Address.Namespace, serviceName)
+ if template, ok := cluster.GetServiceTemplate(); ok {
+ // .templates.ServiceTemplate specified
+ return c.createServiceFromTemplate(
+ template,
+ cluster.Runtime.Address.Namespace,
+ serviceName,
+ c.labels.GetServiceCluster(cluster),
+ c.annotations.GetServiceCluster(cluster),
+ model.GetSelectorClusterScopeReady(cluster),
+ ownerReferences,
+ model.Macro(cluster),
+ )
+ }
+ // No template specified, no need to create service
+ return nil
+}
+
+// CreateServiceShard creates new core.Service for specified Shard
+func (c *Creator) CreateServiceShard(shard *api.ChiShard) *core.Service {
+ if template, ok := shard.GetServiceTemplate(); ok {
+ // .templates.ServiceTemplate specified
+ return c.createServiceFromTemplate(
+ template,
+ shard.Runtime.Address.Namespace,
+ model.CreateShardServiceName(shard),
+ c.labels.GetServiceShard(shard),
+ c.annotations.GetServiceShard(shard),
+ model.GetSelectorShardScopeReady(shard),
+ getOwnerReferences(c.chi),
+ model.Macro(shard),
+ )
+ }
+ // No template specified, no need to create service
+ return nil
+}
+
+// CreateServiceHost creates new core.Service for specified host
+func (c *Creator) CreateServiceHost(host *api.ChiHost) *core.Service {
+ if template, ok := host.GetServiceTemplate(); ok {
+ // .templates.ServiceTemplate specified
+ return c.createServiceFromTemplate(
+ template,
+ host.Runtime.Address.Namespace,
+ model.CreateStatefulSetServiceName(host),
+ c.labels.GetServiceHost(host),
+ c.annotations.GetServiceHost(host),
+ model.GetSelectorHostScope(host),
+ getOwnerReferences(c.chi),
+ model.Macro(host),
+ )
+ }
+
+ // Create default Service
+ // We do not have .templates.ServiceTemplate specified or it is incorrect
+ svc := &core.Service{
+ ObjectMeta: meta.ObjectMeta{
+ Name: model.CreateStatefulSetServiceName(host),
+ Namespace: host.Runtime.Address.Namespace,
+ Labels: model.Macro(host).Map(c.labels.GetServiceHost(host)),
+ Annotations: model.Macro(host).Map(c.annotations.GetServiceHost(host)),
+ OwnerReferences: getOwnerReferences(c.chi),
+ },
+ Spec: core.ServiceSpec{
+ Selector: model.GetSelectorHostScope(host),
+ ClusterIP: model.TemplateDefaultsServiceClusterIP,
+ Type: "ClusterIP",
+ PublishNotReadyAddresses: true,
+ },
+ }
+ appendServicePorts(svc, host)
+ model.MakeObjectVersion(&svc.ObjectMeta, svc)
+ return svc
+}
+
+func appendServicePorts(service *core.Service, host *api.ChiHost) {
+ // Walk over all assigned ports of the host and append each port to the list of service's ports
+ model.HostWalkAssignedPorts(
+ host,
+ func(name string, port *int32, protocol core.Protocol) bool {
+ // Append assigned port to the list of service's ports
+ service.Spec.Ports = append(service.Spec.Ports,
+ core.ServicePort{
+ Name: name,
+ Protocol: protocol,
+ Port: *port,
+ TargetPort: intstr.FromInt(int(*port)),
+ },
+ )
+ // Do not abort, continue iterating
+ return false
+ },
+ )
+}
+
+// createServiceFromTemplate create Service from ChiServiceTemplate and additional info
+func (c *Creator) createServiceFromTemplate(
+ template *api.ChiServiceTemplate,
+ namespace string,
+ name string,
+ labels map[string]string,
+ annotations map[string]string,
+ selector map[string]string,
+ ownerReferences []meta.OwnerReference,
+ macro *model.MacrosEngine,
+) *core.Service {
+
+ // Verify Ports
+ if err := k8s.ServiceSpecVerifyPorts(&template.Spec); err != nil {
+ c.a.V(1).F().Warning(fmt.Sprintf("template: %s err: %s", template.Name, err))
+ return nil
+ }
+
+ // Create Service
+ service := &core.Service{
+ ObjectMeta: *template.ObjectMeta.DeepCopy(),
+ Spec: *template.Spec.DeepCopy(),
+ }
+
+ // Overwrite .name and .namespace - they are not allowed to be specified in template
+ service.Name = name
+ service.Namespace = namespace
+ service.OwnerReferences = ownerReferences
+
+ // Combine labels and annotations
+ service.Labels = macro.Map(util.MergeStringMapsOverwrite(service.Labels, labels))
+ service.Annotations = macro.Map(util.MergeStringMapsOverwrite(service.Annotations, annotations))
+
+ // Append provided Selector to already specified Selector in template
+ service.Spec.Selector = util.MergeStringMapsOverwrite(service.Spec.Selector, selector)
+
+ // And after the object is ready we can put version label
+ model.MakeObjectVersion(&service.ObjectMeta, service)
+
+ return service
+}
diff --git a/pkg/model/chi/creator/stateful_set.go b/pkg/model/chi/creator/stateful_set.go
new file mode 100644
index 000000000..e6722014c
--- /dev/null
+++ b/pkg/model/chi/creator/stateful_set.go
@@ -0,0 +1,498 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import (
+ apps "k8s.io/api/apps/v1"
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/chop"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ "github.com/altinity/clickhouse-operator/pkg/model/k8s"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// CreateStatefulSet creates new apps.StatefulSet
+func (c *Creator) CreateStatefulSet(host *api.ChiHost, shutdown bool) *apps.StatefulSet {
+ statefulSet := &apps.StatefulSet{
+ ObjectMeta: meta.ObjectMeta{
+ Name: model.CreateStatefulSetName(host),
+ Namespace: host.Runtime.Address.Namespace,
+ Labels: model.Macro(host).Map(c.labels.GetHostScope(host, true)),
+ Annotations: model.Macro(host).Map(c.annotations.GetHostScope(host)),
+ OwnerReferences: getOwnerReferences(c.chi),
+ },
+ Spec: apps.StatefulSetSpec{
+ Replicas: host.GetStatefulSetReplicasNum(shutdown),
+ ServiceName: model.CreateStatefulSetServiceName(host),
+ Selector: &meta.LabelSelector{
+ MatchLabels: model.GetSelectorHostScope(host),
+ },
+
+ // IMPORTANT
+ // Template is to be setup later
+ // VolumeClaimTemplates are to be setup later
+ Template: core.PodTemplateSpec{},
+ VolumeClaimTemplates: nil,
+
+ PodManagementPolicy: apps.OrderedReadyPodManagement,
+ UpdateStrategy: apps.StatefulSetUpdateStrategy{
+ Type: apps.RollingUpdateStatefulSetStrategyType,
+ },
+ RevisionHistoryLimit: chop.Config().GetRevisionHistoryLimit(),
+ },
+ }
+
+ c.setupStatefulSetPodTemplate(statefulSet, host)
+ c.setupStatefulSetVolumeClaimTemplates(statefulSet, host)
+ model.MakeObjectVersion(&statefulSet.ObjectMeta, statefulSet)
+
+ return statefulSet
+}
+
+// setupStatefulSetPodTemplate performs PodTemplate setup of StatefulSet
+func (c *Creator) setupStatefulSetPodTemplate(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ // Process Pod Template
+ podTemplate := c.getPodTemplate(host)
+ c.statefulSetApplyPodTemplate(statefulSet, podTemplate, host)
+
+ // Post-process StatefulSet
+ ensureStatefulSetTemplateIntegrity(statefulSet, host)
+ setupEnvVars(statefulSet, host)
+ c.personalizeStatefulSetTemplate(statefulSet, host)
+}
+
+// ensureStatefulSetTemplateIntegrity
+func ensureStatefulSetTemplateIntegrity(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ ensureMainContainerSpecified(statefulSet, host)
+ ensureProbesSpecified(statefulSet, host)
+ ensureNamedPortsSpecified(statefulSet, host)
+}
+
+// setupEnvVars setup ENV vars for clickhouse container
+func setupEnvVars(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ container, ok := getMainContainer(statefulSet)
+ if !ok {
+ return
+ }
+
+ container.Env = append(container.Env, host.GetCHI().EnsureRuntime().EnsureAttributes().AdditionalEnvVars...)
+}
+
+// ensureMainContainerSpecified is a unification wrapper
+func ensureMainContainerSpecified(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ ensureClickHouseContainerSpecified(statefulSet, host)
+}
+
+// ensureLogContainerSpecified is a unification wrapper
+func ensureLogContainerSpecified(statefulSet *apps.StatefulSet) {
+ ensureClickHouseLogContainerSpecified(statefulSet)
+}
+
+// ensureClickHouseContainerSpecified
+func ensureClickHouseContainerSpecified(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ _, ok := getClickHouseContainer(statefulSet)
+ if ok {
+ return
+ }
+
+ // No ClickHouse container available, let's add one
+ k8s.PodSpecAddContainer(
+ &statefulSet.Spec.Template.Spec,
+ newDefaultClickHouseContainer(host),
+ )
+}
+
+// ensureClickHouseLogContainerSpecified
+func ensureClickHouseLogContainerSpecified(statefulSet *apps.StatefulSet) {
+ _, ok := getClickHouseLogContainer(statefulSet)
+ if ok {
+ return
+ }
+
+ // No ClickHouse Log container available, let's add one
+
+ k8s.PodSpecAddContainer(
+ &statefulSet.Spec.Template.Spec,
+ newDefaultLogContainer(),
+ )
+}
+
+// ensureProbesSpecified
+func ensureProbesSpecified(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ container, ok := getMainContainer(statefulSet)
+ if !ok {
+ return
+ }
+ if container.LivenessProbe == nil {
+ container.LivenessProbe = newDefaultLivenessProbe(host)
+ }
+ if container.ReadinessProbe == nil {
+ container.ReadinessProbe = newDefaultReadinessProbe(host)
+ }
+}
+
+// personalizeStatefulSetTemplate
+func (c *Creator) personalizeStatefulSetTemplate(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ // Ensure pod created by this StatefulSet has alias 127.0.0.1
+ statefulSet.Spec.Template.Spec.HostAliases = []core.HostAlias{
+ {
+ IP: "127.0.0.1",
+ Hostnames: []string{
+ model.CreatePodHostname(host),
+ },
+ },
+ }
+
+ // Setup volumes
+ c.statefulSetSetupVolumes(statefulSet, host)
+ // Setup statefulSet according to troubleshoot mode (if any)
+ c.setupTroubleshootingMode(statefulSet, host)
+ // Setup dedicated log container
+ c.setupLogContainer(statefulSet, host)
+}
+
+// setupTroubleshootingMode
+func (c *Creator) setupTroubleshootingMode(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ if !host.GetCHI().IsTroubleshoot() {
+ // We are not troubleshooting
+ return
+ }
+
+ container, ok := getMainContainer(statefulSet)
+ if !ok {
+ // Unable to locate ClickHouse container
+ return
+ }
+
+ // Let's setup troubleshooting in ClickHouse container
+
+ sleep := " || sleep 1800"
+ if len(container.Command) > 0 {
+ // In case we have user-specified command, let's
+ // append troubleshooting-capable tail and hope for the best
+ container.Command[len(container.Command)-1] += sleep
+ } else {
+ // Assume standard ClickHouse container is used
+ // Substitute entrypoint with troubleshooting-capable command
+ container.Command = []string{
+ "/bin/sh",
+ "-c",
+ "/entrypoint.sh" + sleep,
+ }
+ }
+ // Appended `sleep` command makes Pod unable to respond to probes and probes would fail, causing unexpected restart.
+ // Thus we need to disable all probes in troubleshooting mode.
+ container.LivenessProbe = nil
+ container.ReadinessProbe = nil
+}
+
+// setupLogContainer
+func (c *Creator) setupLogContainer(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ // In case we have default LogVolumeClaimTemplate specified - need to append log container to Pod Template
+ if host.Templates.HasLogVolumeClaimTemplate() {
+ ensureLogContainerSpecified(statefulSet)
+ c.a.V(1).F().Info("add log container for host: %s", host.Runtime.Address.HostName)
+ }
+}
+
+// getPodTemplate gets Pod Template to be used to create StatefulSet
+func (c *Creator) getPodTemplate(host *api.ChiHost) *api.ChiPodTemplate {
+ // Which pod template should be used - either explicitly defined or a default one
+ podTemplate, ok := host.GetPodTemplate()
+ if ok {
+ // Host references known PodTemplate
+ // Make local copy of this PodTemplate, in order not to spoil the original common-used template
+ podTemplate = podTemplate.DeepCopy()
+ c.a.V(3).F().Info("host: %s StatefulSet - use custom template: %s", host.Runtime.Address.HostName, podTemplate.Name)
+ } else {
+ // Host references UNKNOWN PodTemplate, will use default one
+ podTemplate = newDefaultPodTemplate(host)
+ c.a.V(3).F().Info("host: %s StatefulSet - use default generated template", host.Runtime.Address.HostName)
+ }
+
+ // Here we have local copy of Pod Template, to be used to create StatefulSet
+ // Now we can customize this Pod Template for particular host
+
+ model.PrepareAffinity(podTemplate, host)
+
+ return podTemplate
+}
+
+// statefulSetSetupVolumes setup all volumes
+func (c *Creator) statefulSetSetupVolumes(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ c.statefulSetSetupVolumesForConfigMaps(statefulSet, host)
+ c.statefulSetSetupVolumesForSecrets(statefulSet, host)
+}
+
+// statefulSetSetupVolumesForConfigMaps adds to each container in the Pod VolumeMount objects
+func (c *Creator) statefulSetSetupVolumesForConfigMaps(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ configMapHostName := model.CreateConfigMapHostName(host)
+ configMapCommonName := model.CreateConfigMapCommonName(c.chi)
+ configMapCommonUsersName := model.CreateConfigMapCommonUsersName(c.chi)
+
+ // Add all ConfigMap objects as Volume objects of type ConfigMap
+ k8s.StatefulSetAppendVolumes(
+ statefulSet,
+ newVolumeForConfigMap(configMapCommonName),
+ newVolumeForConfigMap(configMapCommonUsersName),
+ newVolumeForConfigMap(configMapHostName),
+ //newVolumeForConfigMap(configMapHostMigrationName),
+ )
+
+ // And reference these Volumes in each Container via VolumeMount
+ // So Pod will have ConfigMaps mounted as Volumes
+ k8s.StatefulSetAppendVolumeMounts(
+ statefulSet,
+ newVolumeMount(configMapCommonName, model.DirPathCommonConfig),
+ newVolumeMount(configMapCommonUsersName, model.DirPathUsersConfig),
+ newVolumeMount(configMapHostName, model.DirPathHostConfig),
+ )
+}
+
+// statefulSetSetupVolumesForSecrets adds to each container in the Pod VolumeMount objects
+func (c *Creator) statefulSetSetupVolumesForSecrets(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ // Add all additional Volumes
+ k8s.StatefulSetAppendVolumes(
+ statefulSet,
+ host.GetCHI().EnsureRuntime().EnsureAttributes().AdditionalVolumes...,
+ )
+
+ // And reference these Volumes in each Container via VolumeMount
+ // So Pod will have additional volumes mounted as Volumes
+ k8s.StatefulSetAppendVolumeMounts(
+ statefulSet,
+ host.GetCHI().EnsureRuntime().EnsureAttributes().AdditionalVolumeMounts...,
+ )
+}
+
+// statefulSetAppendUsedPVCTemplates appends all PVC templates which are used (referenced by name) by containers
+// to the StatefulSet.Spec.VolumeClaimTemplates list
+func (c *Creator) statefulSetAppendUsedPVCTemplates(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ // VolumeClaimTemplates, that are directly referenced in containers' VolumeMount object(s)
+ // are appended to StatefulSet's Spec.VolumeClaimTemplates slice
+ //
+ // Deal with `volumeMounts` of a `container`, located by the path:
+ // .spec.templates.podTemplates.*.spec.containers.volumeMounts.*
+ for i := range statefulSet.Spec.Template.Spec.Containers {
+ // Convenience wrapper
+ container := &statefulSet.Spec.Template.Spec.Containers[i]
+ for j := range container.VolumeMounts {
+ // Convenience wrapper
+ volumeMount := &container.VolumeMounts[j]
+ if volumeClaimTemplate, ok := getVolumeClaimTemplate(volumeMount, host); ok {
+ c.statefulSetAppendPVCTemplate(statefulSet, host, volumeClaimTemplate)
+ }
+ }
+ }
+}
+
+// statefulSetAppendVolumeMountsForDataAndLogVolumeClaimTemplates
+// appends VolumeMounts for Data and Log VolumeClaimTemplates on all containers.
+// Creates VolumeMounts for Data and Log volumes in case these volume templates are specified in `templates`.
+func (c *Creator) statefulSetAppendVolumeMountsForDataAndLogVolumeClaimTemplates(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ // Mount all named (data and log so far) VolumeClaimTemplates into all containers
+ for i := range statefulSet.Spec.Template.Spec.Containers {
+ // Convenience wrapper
+ container := &statefulSet.Spec.Template.Spec.Containers[i]
+ k8s.ContainerAppendVolumeMounts(
+ container,
+ newVolumeMount(host.Templates.GetDataVolumeClaimTemplate(), model.DirPathClickHouseData),
+ )
+ k8s.ContainerAppendVolumeMounts(
+ container,
+ newVolumeMount(host.Templates.GetLogVolumeClaimTemplate(), model.DirPathClickHouseLog),
+ )
+ }
+}
+
+// setupStatefulSetVolumeClaimTemplates performs VolumeClaimTemplate setup for Containers in PodTemplate of a StatefulSet
+func (c *Creator) setupStatefulSetVolumeClaimTemplates(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ c.statefulSetAppendVolumeMountsForDataAndLogVolumeClaimTemplates(statefulSet, host)
+ c.statefulSetAppendUsedPVCTemplates(statefulSet, host)
+}
+
+// statefulSetApplyPodTemplate fills StatefulSet.Spec.Template with data from provided ChiPodTemplate
+func (c *Creator) statefulSetApplyPodTemplate(
+ statefulSet *apps.StatefulSet,
+ template *api.ChiPodTemplate,
+ host *api.ChiHost,
+) {
+ // StatefulSet's pod template is not directly compatible with ChiPodTemplate,
+ // we need to extract some fields from ChiPodTemplate and apply on StatefulSet
+ statefulSet.Spec.Template = core.PodTemplateSpec{
+ ObjectMeta: meta.ObjectMeta{
+ Name: template.Name,
+ Labels: model.Macro(host).Map(util.MergeStringMapsOverwrite(
+ c.labels.GetHostScopeReady(host, true),
+ template.ObjectMeta.Labels,
+ )),
+ Annotations: model.Macro(host).Map(util.MergeStringMapsOverwrite(
+ c.annotations.GetHostScope(host),
+ template.ObjectMeta.Annotations,
+ )),
+ },
+ Spec: *template.Spec.DeepCopy(),
+ }
+
+ if statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds == nil {
+ statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds = chop.Config().GetTerminationGracePeriod()
+ }
+}
+
+// getMainContainer is a unification wrapper
+func getMainContainer(statefulSet *apps.StatefulSet) (*core.Container, bool) {
+ return getClickHouseContainer(statefulSet)
+}
+
+// getClickHouseContainer
+func getClickHouseContainer(statefulSet *apps.StatefulSet) (*core.Container, bool) {
+ return k8s.StatefulSetContainerGet(statefulSet, model.ClickHouseContainerName, 0)
+}
+
+// getClickHouseLogContainer
+func getClickHouseLogContainer(statefulSet *apps.StatefulSet) (*core.Container, bool) {
+ return k8s.StatefulSetContainerGet(statefulSet, model.ClickHouseLogContainerName, -1)
+}
+
+// ensureNamedPortsSpecified
+func ensureNamedPortsSpecified(statefulSet *apps.StatefulSet, host *api.ChiHost) {
+ // Ensure ClickHouse container has all named ports specified
+ container, ok := getMainContainer(statefulSet)
+ if !ok {
+ return
+ }
+ // Walk over all assigned ports of the host and ensure each port in container
+ model.HostWalkAssignedPorts(
+ host,
+ func(name string, port *int32, protocol core.Protocol) bool {
+ k8s.ContainerEnsurePortByName(container, name, *port)
+ // Do not abort, continue iterating
+ return false
+ },
+ )
+}
+
+// statefulSetAppendPVCTemplate appends to StatefulSet.Spec.VolumeClaimTemplates new entry with data from provided 'src' ChiVolumeClaimTemplate
+func (c *Creator) statefulSetAppendPVCTemplate(
+ statefulSet *apps.StatefulSet,
+ host *api.ChiHost,
+ volumeClaimTemplate *api.ChiVolumeClaimTemplate,
+) {
+ // Since we have the same names for PVs produced from both VolumeClaimTemplates and Volumes,
+ // we need to check naming for all of them
+
+ // Check whether provided VolumeClaimTemplate is already listed in statefulSet.Spec.VolumeClaimTemplates
+ if k8s.StatefulSetHasVolumeClaimTemplateByName(statefulSet, volumeClaimTemplate.Name) {
+ // This VolumeClaimTemplate is already listed in statefulSet.Spec.VolumeClaimTemplates
+ // No need to add it second time
+ return
+ }
+
+ // Check whether provided VolumeClaimTemplate is already listed in statefulSet.Spec.Template.Spec.Volumes
+ if k8s.StatefulSetHasVolumeByName(statefulSet, volumeClaimTemplate.Name) {
+ // This VolumeClaimTemplate is already listed in statefulSet.Spec.Template.Spec.Volumes
+ // No need to add it second time
+ return
+ }
+
+ // Provided VolumeClaimTemplate is not listed neither in
+ // statefulSet.Spec.Template.Spec.Volumes
+ // nor in
+ // statefulSet.Spec.VolumeClaimTemplates
+ // so, let's add it
+
+ if OperatorShouldCreatePVC(host, volumeClaimTemplate) {
+ claimName := model.CreatePVCNameByVolumeClaimTemplate(host, volumeClaimTemplate)
+ statefulSet.Spec.Template.Spec.Volumes = append(
+ statefulSet.Spec.Template.Spec.Volumes,
+ newVolumeForPVC(volumeClaimTemplate.Name, claimName),
+ )
+ } else {
+ statefulSet.Spec.VolumeClaimTemplates = append(
+ statefulSet.Spec.VolumeClaimTemplates,
+ // For templates we should not specify namespace where PVC would be located
+ c.createPVC(volumeClaimTemplate.Name, "", host, &volumeClaimTemplate.Spec),
+ )
+ }
+}
+
+// newDefaultPodTemplate is a unification wrapper
+func newDefaultPodTemplate(host *api.ChiHost) *api.ChiPodTemplate {
+ return newDefaultClickHousePodTemplate(host)
+}
+
+// newDefaultClickHousePodTemplate returns default Pod Template to be used with StatefulSet
+func newDefaultClickHousePodTemplate(host *api.ChiHost) *api.ChiPodTemplate {
+ podTemplate := &api.ChiPodTemplate{
+ Name: model.CreateStatefulSetName(host),
+ Spec: core.PodSpec{
+ Containers: []core.Container{},
+ Volumes: []core.Volume{},
+ },
+ }
+
+ // Pod has to have main container.
+ k8s.PodSpecAddContainer(&podTemplate.Spec, newDefaultClickHouseContainer(host))
+
+ return podTemplate
+}
+
+func appendContainerPorts(container *core.Container, host *api.ChiHost) {
+ // Walk over all assigned ports of the host and append each port to the list of container's ports
+ model.HostWalkAssignedPorts(
+ host,
+ func(name string, port *int32, protocol core.Protocol) bool {
+ // Append assigned port to the list of container's ports
+ container.Ports = append(container.Ports,
+ core.ContainerPort{
+ Name: name,
+ ContainerPort: *port,
+ Protocol: protocol,
+ },
+ )
+ // Do not abort, continue iterating
+ return false
+ },
+ )
+}
+
+// newDefaultClickHouseContainer returns default ClickHouse Container
+func newDefaultClickHouseContainer(host *api.ChiHost) core.Container {
+ container := core.Container{
+ Name: model.ClickHouseContainerName,
+ Image: model.DefaultClickHouseDockerImage,
+ LivenessProbe: newDefaultClickHouseLivenessProbe(host),
+ ReadinessProbe: newDefaultClickHouseReadinessProbe(host),
+ }
+ appendContainerPorts(&container, host)
+ return container
+}
+
+// newDefaultLogContainer returns default ClickHouse Log Container
+func newDefaultLogContainer() core.Container {
+ return core.Container{
+ Name: model.ClickHouseLogContainerName,
+ Image: model.DefaultUbiDockerImage,
+ Command: []string{
+ "/bin/sh", "-c", "--",
+ },
+ Args: []string{
+ "while true; do sleep 30; done;",
+ },
+ }
+}
diff --git a/pkg/model/chi/creator/volume.go b/pkg/model/chi/creator/volume.go
new file mode 100644
index 000000000..19ac4c3b2
--- /dev/null
+++ b/pkg/model/chi/creator/volume.go
@@ -0,0 +1,67 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package creator
+
+import (
+ core "k8s.io/api/core/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+)
+
+// newVolumeForPVC returns core.Volume object with defined name
+func newVolumeForPVC(name, claimName string) core.Volume {
+ return core.Volume{
+ Name: name,
+ VolumeSource: core.VolumeSource{
+ PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{
+ ClaimName: claimName,
+ ReadOnly: false,
+ },
+ },
+ }
+}
+
+// newVolumeForConfigMap returns core.Volume object with defined name
+func newVolumeForConfigMap(name string) core.Volume {
+ var defaultMode int32 = 0644
+ return core.Volume{
+ Name: name,
+ VolumeSource: core.VolumeSource{
+ ConfigMap: &core.ConfigMapVolumeSource{
+ LocalObjectReference: core.LocalObjectReference{
+ Name: name,
+ },
+ DefaultMode: &defaultMode,
+ },
+ },
+ }
+}
+
+// newVolumeMount returns core.VolumeMount object with name and mount path
+func newVolumeMount(name, mountPath string) core.VolumeMount {
+ return core.VolumeMount{
+ Name: name,
+ MountPath: mountPath,
+ }
+}
+
+func getVolumeClaimTemplate(volumeMount *core.VolumeMount, host *api.ChiHost) (*api.ChiVolumeClaimTemplate, bool) {
+ volumeClaimTemplateName := volumeMount.Name
+
+ volumeClaimTemplate, ok := host.GetCHI().GetVolumeClaimTemplate(volumeClaimTemplateName)
+ // Sometimes it is impossible to find VolumeClaimTemplate related to specified volumeMount.
+ // May be this volumeMount is not created from VolumeClaimTemplate, it may be a reference to a ConfigMap
+ return volumeClaimTemplate, ok
+}
diff --git a/pkg/model/chi/deleter.go b/pkg/model/chi/deleter.go
index 4132aca2b..56029b8d1 100644
--- a/pkg/model/chi/deleter.go
+++ b/pkg/model/chi/deleter.go
@@ -15,7 +15,7 @@
package chi
import (
- "k8s.io/api/core/v1"
+ core "k8s.io/api/core/v1"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
)
@@ -26,7 +26,7 @@ func HostCanDeletePVC(host *api.ChiHost, pvcName string) bool {
policy := api.PVCReclaimPolicyDelete
// What host, VolumeMount and VolumeClaimTemplate this PVC is made from?
- host.WalkVolumeMounts(api.CurStatefulSet, func(volumeMount *v1.VolumeMount) {
+ host.WalkVolumeMounts(api.CurStatefulSet, func(volumeMount *core.VolumeMount) {
volumeClaimTemplate, ok := GetVolumeClaimTemplate(host, volumeMount)
if !ok {
// No this is not a reference to VolumeClaimTemplate
@@ -48,7 +48,7 @@ func HostCanDeletePVC(host *api.ChiHost, pvcName string) bool {
// HostCanDeleteAllPVCs checks whether all PVCs can be deleted
func HostCanDeleteAllPVCs(host *api.ChiHost) bool {
canDeleteAllPVCs := true
- host.CHI.WalkVolumeClaimTemplates(func(template *api.ChiVolumeClaimTemplate) {
+ host.GetCHI().WalkVolumeClaimTemplates(func(template *api.ChiVolumeClaimTemplate) {
if getPVCReclaimPolicy(host, template) == api.PVCReclaimPolicyRetain {
// At least one template wants to keep its PVC
canDeleteAllPVCs = false
diff --git a/pkg/model/chi/host.go b/pkg/model/chi/host.go
index 0b78b0484..c0ac26597 100644
--- a/pkg/model/chi/host.go
+++ b/pkg/model/chi/host.go
@@ -15,6 +15,8 @@
package chi
import (
+ core "k8s.io/api/core/v1"
+
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/util"
)
@@ -30,3 +32,56 @@ func HostIsNewOne(host *api.ChiHost) bool {
func HostHasTablesCreated(host *api.ChiHost) bool {
return util.InArray(CreateFQDN(host), host.GetCHI().EnsureStatus().GetHostsWithTablesCreated())
}
+
+func HostWalkPorts(host *api.ChiHost, f func(name string, port *int32, protocol core.Protocol) bool) {
+ if host == nil {
+ return
+ }
+ if f(ChDefaultTCPPortName, &host.TCPPort, core.ProtocolTCP) {
+ return
+ }
+ if f(ChDefaultTLSPortName, &host.TLSPort, core.ProtocolTCP) {
+ return
+ }
+ if f(ChDefaultHTTPPortName, &host.HTTPPort, core.ProtocolTCP) {
+ return
+ }
+ if f(ChDefaultHTTPSPortName, &host.HTTPSPort, core.ProtocolTCP) {
+ return
+ }
+ if f(ChDefaultInterserverHTTPPortName, &host.InterserverHTTPPort, core.ProtocolTCP) {
+ return
+ }
+}
+
+func HostWalkAssignedPorts(host *api.ChiHost, f func(name string, port *int32, protocol core.Protocol) bool) {
+ if host == nil {
+ return
+ }
+ HostWalkPorts(
+ host,
+ func(_name string, _port *int32, _protocol core.Protocol) bool {
+ if api.IsPortAssigned(*_port) {
+ return f(_name, _port, _protocol)
+ }
+ // Do not break, continue iterating
+ return false
+ },
+ )
+}
+
+func HostWalkInvalidPorts(host *api.ChiHost, f func(name string, port *int32, protocol core.Protocol) bool) {
+ if host == nil {
+ return
+ }
+ HostWalkPorts(
+ host,
+ func(_name string, _port *int32, _protocol core.Protocol) bool {
+ if api.IsPortInvalid(*_port) {
+ return f(_name, _port, _protocol)
+ }
+ // Do not break, continue iterating
+ return false
+ },
+ )
+}
diff --git a/pkg/model/chi/labeler.go b/pkg/model/chi/labeler.go
index aa5d0085e..48a6b81cd 100644
--- a/pkg/model/chi/labeler.go
+++ b/pkg/model/chi/labeler.go
@@ -16,8 +16,8 @@ package chi
import (
"fmt"
- core "k8s.io/api/core/v1"
+ core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sLabels "k8s.io/apimachinery/pkg/labels"
@@ -87,8 +87,8 @@ func NewLabeler(chi *api.ClickHouseInstallation) *Labeler {
}
}
-// getConfigMapCHICommon
-func (l *Labeler) getConfigMapCHICommon() map[string]string {
+// GetConfigMapCHICommon
+func (l *Labeler) GetConfigMapCHICommon() map[string]string {
return util.MergeStringMapsOverwrite(
l.getCHIScope(),
map[string]string{
@@ -96,8 +96,8 @@ func (l *Labeler) getConfigMapCHICommon() map[string]string {
})
}
-// getConfigMapCHICommonUsers
-func (l *Labeler) getConfigMapCHICommonUsers() map[string]string {
+// GetConfigMapCHICommonUsers
+func (l *Labeler) GetConfigMapCHICommonUsers() map[string]string {
return util.MergeStringMapsOverwrite(
l.getCHIScope(),
map[string]string{
@@ -105,17 +105,17 @@ func (l *Labeler) getConfigMapCHICommonUsers() map[string]string {
})
}
-// getConfigMapHost
-func (l *Labeler) getConfigMapHost(host *api.ChiHost) map[string]string {
+// GetConfigMapHost
+func (l *Labeler) GetConfigMapHost(host *api.ChiHost) map[string]string {
return util.MergeStringMapsOverwrite(
- l.getHostScope(host, false),
+ l.GetHostScope(host, false),
map[string]string{
LabelConfigMap: labelConfigMapValueHost,
})
}
-// getServiceCHI
-func (l *Labeler) getServiceCHI(chi *api.ClickHouseInstallation) map[string]string {
+// GetServiceCHI
+func (l *Labeler) GetServiceCHI(chi *api.ClickHouseInstallation) map[string]string {
return util.MergeStringMapsOverwrite(
l.getCHIScope(),
map[string]string{
@@ -123,17 +123,17 @@ func (l *Labeler) getServiceCHI(chi *api.ClickHouseInstallation) map[string]stri
})
}
-// getServiceCluster
-func (l *Labeler) getServiceCluster(cluster *api.Cluster) map[string]string {
+// GetServiceCluster
+func (l *Labeler) GetServiceCluster(cluster *api.Cluster) map[string]string {
return util.MergeStringMapsOverwrite(
- l.getClusterScope(cluster),
+ l.GetClusterScope(cluster),
map[string]string{
LabelService: labelServiceValueCluster,
})
}
-// getServiceShard
-func (l *Labeler) getServiceShard(shard *api.ChiShard) map[string]string {
+// GetServiceShard
+func (l *Labeler) GetServiceShard(shard *api.ChiShard) map[string]string {
return util.MergeStringMapsOverwrite(
l.getShardScope(shard),
map[string]string{
@@ -141,10 +141,10 @@ func (l *Labeler) getServiceShard(shard *api.ChiShard) map[string]string {
})
}
-// getServiceHost
-func (l *Labeler) getServiceHost(host *api.ChiHost) map[string]string {
+// GetServiceHost
+func (l *Labeler) GetServiceHost(host *api.ChiHost) map[string]string {
return util.MergeStringMapsOverwrite(
- l.getHostScope(host, false),
+ l.GetHostScope(host, false),
map[string]string{
LabelService: labelServiceValueHost,
})
@@ -168,19 +168,19 @@ func (l *Labeler) GetSelectorCHIScope() map[string]string {
}
}
-// getSelectorCHIScopeReady gets labels to select a ready-labelled CHI-scoped object
-func (l *Labeler) getSelectorCHIScopeReady() map[string]string {
+// GetSelectorCHIScopeReady gets labels to select a ready-labelled CHI-scoped object
+func (l *Labeler) GetSelectorCHIScopeReady() map[string]string {
return appendKeyReady(l.GetSelectorCHIScope())
}
-// getClusterScope gets labels for Cluster-scoped object
-func (l *Labeler) getClusterScope(cluster *api.Cluster) map[string]string {
+// GetClusterScope gets labels for Cluster-scoped object
+func (l *Labeler) GetClusterScope(cluster *api.Cluster) map[string]string {
// Combine generated labels and CHI-provided labels
- return l.filterOutPredefined(l.appendCHIProvidedTo(getSelectorClusterScope(cluster)))
+ return l.filterOutPredefined(l.appendCHIProvidedTo(GetSelectorClusterScope(cluster)))
}
-// getSelectorClusterScope gets labels to select a Cluster-scoped object
-func getSelectorClusterScope(cluster *api.Cluster) map[string]string {
+// GetSelectorClusterScope gets labels to select a Cluster-scoped object
+func GetSelectorClusterScope(cluster *api.Cluster) map[string]string {
// Do not include CHI-provided labels
return map[string]string{
LabelNamespace: labelsNamer.getNamePartNamespace(cluster),
@@ -190,9 +190,9 @@ func getSelectorClusterScope(cluster *api.Cluster) map[string]string {
}
}
-// getSelectorClusterScope gets labels to select a ready-labelled Cluster-scoped object
-func getSelectorClusterScopeReady(cluster *api.Cluster) map[string]string {
- return appendKeyReady(getSelectorClusterScope(cluster))
+// GetSelectorClusterScope gets labels to select a ready-labelled Cluster-scoped object
+func GetSelectorClusterScopeReady(cluster *api.Cluster) map[string]string {
+ return appendKeyReady(GetSelectorClusterScope(cluster))
}
// getShardScope gets labels for Shard-scoped object
@@ -213,13 +213,13 @@ func getSelectorShardScope(shard *api.ChiShard) map[string]string {
}
}
-// getSelectorShardScope gets labels to select a ready-labelled Shard-scoped object
-func getSelectorShardScopeReady(shard *api.ChiShard) map[string]string {
+// GetSelectorShardScopeReady gets labels to select a ready-labelled Shard-scoped object
+func GetSelectorShardScopeReady(shard *api.ChiShard) map[string]string {
return appendKeyReady(getSelectorShardScope(shard))
}
-// getHostScope gets labels for Host-scoped object
-func (l *Labeler) getHostScope(host *api.ChiHost, applySupplementaryServiceLabels bool) map[string]string {
+// GetHostScope gets labels for Host-scoped object
+func (l *Labeler) GetHostScope(host *api.ChiHost, applySupplementaryServiceLabels bool) map[string]string {
// Combine generated labels and CHI-provided labels
labels := GetSelectorHostScope(host)
if chop.Config().Label.Runtime.AppendScope {
@@ -246,10 +246,10 @@ func (l *Labeler) getHostScope(host *api.ChiHost, applySupplementaryServiceLabel
func appendConfigLabels(host *api.ChiHost, labels map[string]string) map[string]string {
if host.HasCurStatefulSet() {
- if val, exists := host.CurStatefulSet.Labels[LabelZookeeperConfigVersion]; exists {
+ if val, exists := host.Runtime.CurStatefulSet.Labels[LabelZookeeperConfigVersion]; exists {
labels[LabelZookeeperConfigVersion] = val
}
- if val, exists := host.CurStatefulSet.Labels[LabelSettingsConfigVersion]; exists {
+ if val, exists := host.Runtime.CurStatefulSet.Labels[LabelSettingsConfigVersion]; exists {
labels[LabelSettingsConfigVersion] = val
}
}
@@ -258,25 +258,25 @@ func appendConfigLabels(host *api.ChiHost, labels map[string]string) map[string]
return labels
}
-// getHostScopeReady gets labels for Host-scoped object including Ready label
-func (l *Labeler) getHostScopeReady(host *api.ChiHost, applySupplementaryServiceLabels bool) map[string]string {
- return appendKeyReady(l.getHostScope(host, applySupplementaryServiceLabels))
+// GetHostScopeReady gets labels for Host-scoped object including Ready label
+func (l *Labeler) GetHostScopeReady(host *api.ChiHost, applySupplementaryServiceLabels bool) map[string]string {
+ return appendKeyReady(l.GetHostScope(host, applySupplementaryServiceLabels))
}
// getHostScopeReclaimPolicy gets host scope labels with PVCReclaimPolicy from template
func (l *Labeler) getHostScopeReclaimPolicy(host *api.ChiHost, template *api.ChiVolumeClaimTemplate, applySupplementaryServiceLabels bool) map[string]string {
- return util.MergeStringMapsOverwrite(l.getHostScope(host, applySupplementaryServiceLabels), map[string]string{
+ return util.MergeStringMapsOverwrite(l.GetHostScope(host, applySupplementaryServiceLabels), map[string]string{
LabelPVCReclaimPolicyName: getPVCReclaimPolicy(host, template).String(),
})
}
-// getPV
-func (l *Labeler) getPV(pv *core.PersistentVolume, host *api.ChiHost) map[string]string {
- return util.MergeStringMapsOverwrite(pv.Labels, l.getHostScope(host, false))
+// GetPV
+func (l *Labeler) GetPV(pv *core.PersistentVolume, host *api.ChiHost) map[string]string {
+ return util.MergeStringMapsOverwrite(pv.Labels, l.GetHostScope(host, false))
}
-// getPVC
-func (l *Labeler) getPVC(
+// GetPVC
+func (l *Labeler) GetPVC(
pvc *core.PersistentVolumeClaim,
host *api.ChiHost,
template *api.ChiVolumeClaimTemplate,
diff --git a/pkg/model/chi/macro.go b/pkg/model/chi/macro.go
index eb1a3ea6a..324411937 100644
--- a/pkg/model/chi/macro.go
+++ b/pkg/model/chi/macro.go
@@ -76,8 +76,8 @@ const (
macrosClusterScopeCycleHeadPointsToPreviousCycleTail = "{clusterScopeCycleHeadPointsToPreviousCycleTail}"
)
-// macrosEngine
-type macrosEngine struct {
+// MacrosEngine
+type MacrosEngine struct {
names *namer
chi *api.ClickHouseInstallation
cluster *api.Cluster
@@ -85,25 +85,25 @@ type macrosEngine struct {
host *api.ChiHost
}
-// macro
-func macro(scope interface{}) *macrosEngine {
- m := new(macrosEngine)
+// Macro
+func Macro(scope interface{}) *MacrosEngine {
+ m := new(MacrosEngine)
m.names = newNamer(namerContextNames)
- switch t := scope.(type) {
+ switch typed := scope.(type) {
case *api.ClickHouseInstallation:
- m.chi = t
+ m.chi = typed
case *api.Cluster:
- m.cluster = t
+ m.cluster = typed
case *api.ChiShard:
- m.shard = t
+ m.shard = typed
case *api.ChiHost:
- m.host = t
+ m.host = typed
}
return m
}
// Line expands line with macros(es)
-func (m *macrosEngine) Line(line string) string {
+func (m *MacrosEngine) Line(line string) string {
switch {
case m.chi != nil:
return m.newLineMacroReplacerChi().Replace(line)
@@ -118,7 +118,7 @@ func (m *macrosEngine) Line(line string) string {
}
// Map expands map with macros(es)
-func (m *macrosEngine) Map(_map map[string]string) map[string]string {
+func (m *MacrosEngine) Map(_map map[string]string) map[string]string {
switch {
case m.chi != nil:
return m.newMapMacroReplacerChi().Replace(_map)
@@ -136,7 +136,7 @@ func (m *macrosEngine) Map(_map map[string]string) map[string]string {
}
// newLineMacroReplacerChi
-func (m *macrosEngine) newLineMacroReplacerChi() *strings.Replacer {
+func (m *MacrosEngine) newLineMacroReplacerChi() *strings.Replacer {
return strings.NewReplacer(
macrosNamespace, m.names.namePartNamespace(m.chi.Namespace),
macrosChiName, m.names.namePartChiName(m.chi.Name),
@@ -145,99 +145,99 @@ func (m *macrosEngine) newLineMacroReplacerChi() *strings.Replacer {
}
// newMapMacroReplacerChi
-func (m *macrosEngine) newMapMacroReplacerChi() *util.MapReplacer {
+func (m *MacrosEngine) newMapMacroReplacerChi() *util.MapReplacer {
return util.NewMapReplacer(m.newLineMacroReplacerChi())
}
// newLineMacroReplacerCluster
-func (m *macrosEngine) newLineMacroReplacerCluster() *strings.Replacer {
+func (m *MacrosEngine) newLineMacroReplacerCluster() *strings.Replacer {
return strings.NewReplacer(
- macrosNamespace, m.names.namePartNamespace(m.cluster.Address.Namespace),
- macrosChiName, m.names.namePartChiName(m.cluster.Address.CHIName),
- macrosChiID, m.names.namePartChiNameID(m.cluster.Address.CHIName),
- macrosClusterName, m.names.namePartClusterName(m.cluster.Address.ClusterName),
- macrosClusterID, m.names.namePartClusterNameID(m.cluster.Address.ClusterName),
- macrosClusterIndex, strconv.Itoa(m.cluster.Address.ClusterIndex),
+ macrosNamespace, m.names.namePartNamespace(m.cluster.Runtime.Address.Namespace),
+ macrosChiName, m.names.namePartChiName(m.cluster.Runtime.Address.CHIName),
+ macrosChiID, m.names.namePartChiNameID(m.cluster.Runtime.Address.CHIName),
+ macrosClusterName, m.names.namePartClusterName(m.cluster.Runtime.Address.ClusterName),
+ macrosClusterID, m.names.namePartClusterNameID(m.cluster.Runtime.Address.ClusterName),
+ macrosClusterIndex, strconv.Itoa(m.cluster.Runtime.Address.ClusterIndex),
)
}
// newMapMacroReplacerCluster
-func (m *macrosEngine) newMapMacroReplacerCluster() *util.MapReplacer {
+func (m *MacrosEngine) newMapMacroReplacerCluster() *util.MapReplacer {
return util.NewMapReplacer(m.newLineMacroReplacerCluster())
}
// newLineMacroReplacerShard
-func (m *macrosEngine) newLineMacroReplacerShard() *strings.Replacer {
+func (m *MacrosEngine) newLineMacroReplacerShard() *strings.Replacer {
return strings.NewReplacer(
- macrosNamespace, m.names.namePartNamespace(m.shard.Address.Namespace),
- macrosChiName, m.names.namePartChiName(m.shard.Address.CHIName),
- macrosChiID, m.names.namePartChiNameID(m.shard.Address.CHIName),
- macrosClusterName, m.names.namePartClusterName(m.shard.Address.ClusterName),
- macrosClusterID, m.names.namePartClusterNameID(m.shard.Address.ClusterName),
- macrosClusterIndex, strconv.Itoa(m.shard.Address.ClusterIndex),
- macrosShardName, m.names.namePartShardName(m.shard.Address.ShardName),
- macrosShardID, m.names.namePartShardNameID(m.shard.Address.ShardName),
- macrosShardIndex, strconv.Itoa(m.shard.Address.ShardIndex),
+ macrosNamespace, m.names.namePartNamespace(m.shard.Runtime.Address.Namespace),
+ macrosChiName, m.names.namePartChiName(m.shard.Runtime.Address.CHIName),
+ macrosChiID, m.names.namePartChiNameID(m.shard.Runtime.Address.CHIName),
+ macrosClusterName, m.names.namePartClusterName(m.shard.Runtime.Address.ClusterName),
+ macrosClusterID, m.names.namePartClusterNameID(m.shard.Runtime.Address.ClusterName),
+ macrosClusterIndex, strconv.Itoa(m.shard.Runtime.Address.ClusterIndex),
+ macrosShardName, m.names.namePartShardName(m.shard.Runtime.Address.ShardName),
+ macrosShardID, m.names.namePartShardNameID(m.shard.Runtime.Address.ShardName),
+ macrosShardIndex, strconv.Itoa(m.shard.Runtime.Address.ShardIndex),
)
}
// newMapMacroReplacerShard
-func (m *macrosEngine) newMapMacroReplacerShard() *util.MapReplacer {
+func (m *MacrosEngine) newMapMacroReplacerShard() *util.MapReplacer {
return util.NewMapReplacer(m.newLineMacroReplacerShard())
}
// clusterScopeIndexOfPreviousCycleTail gets cluster-scope index of previous cycle tail
func clusterScopeIndexOfPreviousCycleTail(host *api.ChiHost) int {
- if host.Address.ClusterScopeCycleOffset == 0 {
+ if host.Runtime.Address.ClusterScopeCycleOffset == 0 {
// This is the cycle head - the first host of the cycle
// We need to point to previous host in this cluster - which would be previous cycle tail
- if host.Address.ClusterScopeIndex == 0 {
+ if host.Runtime.Address.ClusterScopeIndex == 0 {
// This is the very first host in the cluster - head of the first cycle
// No previous host available, so just point to the same host, mainly because label must be an empty string
// or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character
// So we can't set it to "-1"
- return host.Address.ClusterScopeIndex
+ return host.Runtime.Address.ClusterScopeIndex
}
// This is head of non-first cycle, point to previous host in the cluster - which would be previous cycle tail
- return host.Address.ClusterScopeIndex - 1
+ return host.Runtime.Address.ClusterScopeIndex - 1
}
// This is not cycle head - just point to the same host
- return host.Address.ClusterScopeIndex
+ return host.Runtime.Address.ClusterScopeIndex
}
// newLineMacroReplacerHost
-func (m *macrosEngine) newLineMacroReplacerHost() *strings.Replacer {
+func (m *MacrosEngine) newLineMacroReplacerHost() *strings.Replacer {
return strings.NewReplacer(
- macrosNamespace, m.names.namePartNamespace(m.host.Address.Namespace),
- macrosChiName, m.names.namePartChiName(m.host.Address.CHIName),
- macrosChiID, m.names.namePartChiNameID(m.host.Address.CHIName),
- macrosClusterName, m.names.namePartClusterName(m.host.Address.ClusterName),
- macrosClusterID, m.names.namePartClusterNameID(m.host.Address.ClusterName),
- macrosClusterIndex, strconv.Itoa(m.host.Address.ClusterIndex),
- macrosShardName, m.names.namePartShardName(m.host.Address.ShardName),
- macrosShardID, m.names.namePartShardNameID(m.host.Address.ShardName),
- macrosShardIndex, strconv.Itoa(m.host.Address.ShardIndex),
- macrosShardScopeIndex, strconv.Itoa(m.host.Address.ShardScopeIndex), // TODO use appropriate namePart function
- macrosReplicaName, m.names.namePartReplicaName(m.host.Address.ReplicaName),
- macrosReplicaID, m.names.namePartReplicaNameID(m.host.Address.ReplicaName),
- macrosReplicaIndex, strconv.Itoa(m.host.Address.ReplicaIndex),
- macrosReplicaScopeIndex, strconv.Itoa(m.host.Address.ReplicaScopeIndex), // TODO use appropriate namePart function
- macrosHostName, m.names.namePartHostName(m.host.Address.HostName),
- macrosHostID, m.names.namePartHostNameID(m.host.Address.HostName),
- macrosChiScopeIndex, strconv.Itoa(m.host.Address.CHIScopeIndex), // TODO use appropriate namePart function
- macrosChiScopeCycleIndex, strconv.Itoa(m.host.Address.CHIScopeCycleIndex), // TODO use appropriate namePart function
- macrosChiScopeCycleOffset, strconv.Itoa(m.host.Address.CHIScopeCycleOffset), // TODO use appropriate namePart function
- macrosClusterScopeIndex, strconv.Itoa(m.host.Address.ClusterScopeIndex), // TODO use appropriate namePart function
- macrosClusterScopeCycleIndex, strconv.Itoa(m.host.Address.ClusterScopeCycleIndex), // TODO use appropriate namePart function
- macrosClusterScopeCycleOffset, strconv.Itoa(m.host.Address.ClusterScopeCycleOffset), // TODO use appropriate namePart function
+ macrosNamespace, m.names.namePartNamespace(m.host.Runtime.Address.Namespace),
+ macrosChiName, m.names.namePartChiName(m.host.Runtime.Address.CHIName),
+ macrosChiID, m.names.namePartChiNameID(m.host.Runtime.Address.CHIName),
+ macrosClusterName, m.names.namePartClusterName(m.host.Runtime.Address.ClusterName),
+ macrosClusterID, m.names.namePartClusterNameID(m.host.Runtime.Address.ClusterName),
+ macrosClusterIndex, strconv.Itoa(m.host.Runtime.Address.ClusterIndex),
+ macrosShardName, m.names.namePartShardName(m.host.Runtime.Address.ShardName),
+ macrosShardID, m.names.namePartShardNameID(m.host.Runtime.Address.ShardName),
+ macrosShardIndex, strconv.Itoa(m.host.Runtime.Address.ShardIndex),
+ macrosShardScopeIndex, strconv.Itoa(m.host.Runtime.Address.ShardScopeIndex), // TODO use appropriate namePart function
+ macrosReplicaName, m.names.namePartReplicaName(m.host.Runtime.Address.ReplicaName),
+ macrosReplicaID, m.names.namePartReplicaNameID(m.host.Runtime.Address.ReplicaName),
+ macrosReplicaIndex, strconv.Itoa(m.host.Runtime.Address.ReplicaIndex),
+ macrosReplicaScopeIndex, strconv.Itoa(m.host.Runtime.Address.ReplicaScopeIndex), // TODO use appropriate namePart function
+ macrosHostName, m.names.namePartHostName(m.host.Runtime.Address.HostName),
+ macrosHostID, m.names.namePartHostNameID(m.host.Runtime.Address.HostName),
+ macrosChiScopeIndex, strconv.Itoa(m.host.Runtime.Address.CHIScopeIndex), // TODO use appropriate namePart function
+ macrosChiScopeCycleIndex, strconv.Itoa(m.host.Runtime.Address.CHIScopeCycleIndex), // TODO use appropriate namePart function
+ macrosChiScopeCycleOffset, strconv.Itoa(m.host.Runtime.Address.CHIScopeCycleOffset), // TODO use appropriate namePart function
+ macrosClusterScopeIndex, strconv.Itoa(m.host.Runtime.Address.ClusterScopeIndex), // TODO use appropriate namePart function
+ macrosClusterScopeCycleIndex, strconv.Itoa(m.host.Runtime.Address.ClusterScopeCycleIndex), // TODO use appropriate namePart function
+ macrosClusterScopeCycleOffset, strconv.Itoa(m.host.Runtime.Address.ClusterScopeCycleOffset), // TODO use appropriate namePart function
macrosClusterScopeCycleHeadPointsToPreviousCycleTail, strconv.Itoa(clusterScopeIndexOfPreviousCycleTail(m.host)),
)
}
// newMapMacroReplacerHost
-func (m *macrosEngine) newMapMacroReplacerHost() *util.MapReplacer {
+func (m *MacrosEngine) newMapMacroReplacerHost() *util.MapReplacer {
return util.NewMapReplacer(m.newLineMacroReplacerHost())
}
diff --git a/pkg/model/chi/namer.go b/pkg/model/chi/namer.go
index 6a531caf3..6a5a4f4bc 100644
--- a/pkg/model/chi/namer.go
+++ b/pkg/model/chi/namer.go
@@ -114,125 +114,93 @@ func newNamer(ctx namerContext) *namer {
}
}
-// namePartNamespace
-func (n *namer) namePartNamespace(name string) string {
- var _len int
+func (n *namer) lenCHI() int {
if n.ctx == namerContextLabels {
- _len = namePartChiMaxLenLabelsCtx
+ return namePartChiMaxLenLabelsCtx
} else {
- _len = namePartChiMaxLenNamesCtx
+ return namePartChiMaxLenNamesCtx
}
- return sanitize(util.StringHead(name, _len))
+}
+
+// namePartNamespace
+func (n *namer) namePartNamespace(name string) string {
+ return sanitize(util.StringHead(name, n.lenCHI()))
}
// namePartChiName
func (n *namer) namePartChiName(name string) string {
- var _len int
- if n.ctx == namerContextLabels {
- _len = namePartChiMaxLenLabelsCtx
- } else {
- _len = namePartChiMaxLenNamesCtx
- }
- return sanitize(util.StringHead(name, _len))
+ return sanitize(util.StringHead(name, n.lenCHI()))
}
// namePartChiNameID
func (n *namer) namePartChiNameID(name string) string {
- var _len int
+ return util.CreateStringID(name, n.lenCHI())
+}
+
+func (n *namer) lenCluster() int {
if n.ctx == namerContextLabels {
- _len = namePartChiMaxLenLabelsCtx
+ return namePartClusterMaxLenLabelsCtx
} else {
- _len = namePartChiMaxLenNamesCtx
+ return namePartClusterMaxLenNamesCtx
}
- return util.CreateStringID(name, _len)
}
// namePartClusterName
func (n *namer) namePartClusterName(name string) string {
- var _len int
- if n.ctx == namerContextLabels {
- _len = namePartClusterMaxLenLabelsCtx
- } else {
- _len = namePartClusterMaxLenNamesCtx
- }
- return sanitize(util.StringHead(name, _len))
+ return sanitize(util.StringHead(name, n.lenCluster()))
}
// namePartClusterNameID
func (n *namer) namePartClusterNameID(name string) string {
- var _len int
+ return util.CreateStringID(name, n.lenCluster())
+}
+
+func (n *namer) lenShard() int {
if n.ctx == namerContextLabels {
- _len = namePartClusterMaxLenLabelsCtx
+ return namePartShardMaxLenLabelsCtx
} else {
- _len = namePartClusterMaxLenNamesCtx
+ return namePartShardMaxLenNamesCtx
}
- return util.CreateStringID(name, _len)
+
}
// namePartShardName
func (n *namer) namePartShardName(name string) string {
- var _len int
- if n.ctx == namerContextLabels {
- _len = namePartShardMaxLenLabelsCtx
- } else {
- _len = namePartShardMaxLenNamesCtx
- }
- return sanitize(util.StringHead(name, _len))
+ return sanitize(util.StringHead(name, n.lenShard()))
}
// namePartShardNameID
func (n *namer) namePartShardNameID(name string) string {
- var _len int
+ return util.CreateStringID(name, n.lenShard())
+}
+
+func (n *namer) lenReplica() int {
if n.ctx == namerContextLabels {
- _len = namePartShardMaxLenLabelsCtx
+ return namePartReplicaMaxLenLabelsCtx
} else {
- _len = namePartShardMaxLenNamesCtx
+ return namePartReplicaMaxLenNamesCtx
}
- return util.CreateStringID(name, _len)
+
}
// namePartReplicaName
func (n *namer) namePartReplicaName(name string) string {
- var _len int
- if n.ctx == namerContextLabels {
- _len = namePartReplicaMaxLenLabelsCtx
- } else {
- _len = namePartReplicaMaxLenNamesCtx
- }
- return sanitize(util.StringHead(name, _len))
+ return sanitize(util.StringHead(name, n.lenReplica()))
}
// namePartReplicaNameID
func (n *namer) namePartReplicaNameID(name string) string {
- var _len int
- if n.ctx == namerContextLabels {
- _len = namePartReplicaMaxLenLabelsCtx
- } else {
- _len = namePartReplicaMaxLenNamesCtx
- }
- return util.CreateStringID(name, _len)
+ return util.CreateStringID(name, n.lenReplica())
}
// namePartHostName
func (n *namer) namePartHostName(name string) string {
- var _len int
- if n.ctx == namerContextLabels {
- _len = namePartReplicaMaxLenLabelsCtx
- } else {
- _len = namePartReplicaMaxLenNamesCtx
- }
- return sanitize(util.StringHead(name, _len))
+ return sanitize(util.StringHead(name, n.lenReplica()))
}
// namePartHostNameID
func (n *namer) namePartHostNameID(name string) string {
- var _len int
- if n.ctx == namerContextLabels {
- _len = namePartReplicaMaxLenLabelsCtx
- } else {
- _len = namePartReplicaMaxLenNamesCtx
- }
- return util.CreateStringID(name, _len)
+ return util.CreateStringID(name, n.lenReplica())
}
// getNamePartNamespace
@@ -243,13 +211,13 @@ func (n *namer) getNamePartNamespace(obj interface{}) string {
return n.namePartChiName(chi.Namespace)
case *api.Cluster:
cluster := obj.(*api.Cluster)
- return n.namePartChiName(cluster.Address.Namespace)
+ return n.namePartChiName(cluster.Runtime.Address.Namespace)
case *api.ChiShard:
shard := obj.(*api.ChiShard)
- return n.namePartChiName(shard.Address.Namespace)
+ return n.namePartChiName(shard.Runtime.Address.Namespace)
case *api.ChiHost:
host := obj.(*api.ChiHost)
- return n.namePartChiName(host.Address.Namespace)
+ return n.namePartChiName(host.Runtime.Address.Namespace)
}
return "ERROR"
@@ -263,13 +231,13 @@ func (n *namer) getNamePartCHIName(obj interface{}) string {
return n.namePartChiName(chi.Name)
case *api.Cluster:
cluster := obj.(*api.Cluster)
- return n.namePartChiName(cluster.Address.CHIName)
+ return n.namePartChiName(cluster.Runtime.Address.CHIName)
case *api.ChiShard:
shard := obj.(*api.ChiShard)
- return n.namePartChiName(shard.Address.CHIName)
+ return n.namePartChiName(shard.Runtime.Address.CHIName)
case *api.ChiHost:
host := obj.(*api.ChiHost)
- return n.namePartChiName(host.Address.CHIName)
+ return n.namePartChiName(host.Runtime.Address.CHIName)
}
return "ERROR"
@@ -280,13 +248,13 @@ func (n *namer) getNamePartClusterName(obj interface{}) string {
switch obj.(type) {
case *api.Cluster:
cluster := obj.(*api.Cluster)
- return n.namePartClusterName(cluster.Address.ClusterName)
+ return n.namePartClusterName(cluster.Runtime.Address.ClusterName)
case *api.ChiShard:
shard := obj.(*api.ChiShard)
- return n.namePartClusterName(shard.Address.ClusterName)
+ return n.namePartClusterName(shard.Runtime.Address.ClusterName)
case *api.ChiHost:
host := obj.(*api.ChiHost)
- return n.namePartClusterName(host.Address.ClusterName)
+ return n.namePartClusterName(host.Runtime.Address.ClusterName)
}
return "ERROR"
@@ -297,10 +265,10 @@ func (n *namer) getNamePartShardName(obj interface{}) string {
switch obj.(type) {
case *api.ChiShard:
shard := obj.(*api.ChiShard)
- return n.namePartShardName(shard.Address.ShardName)
+ return n.namePartShardName(shard.Runtime.Address.ShardName)
case *api.ChiHost:
host := obj.(*api.ChiHost)
- return n.namePartShardName(host.Address.ShardName)
+ return n.namePartShardName(host.Runtime.Address.ShardName)
}
return "ERROR"
@@ -308,67 +276,67 @@ func (n *namer) getNamePartShardName(obj interface{}) string {
// getNamePartReplicaName
func (n *namer) getNamePartReplicaName(host *api.ChiHost) string {
- return n.namePartReplicaName(host.Address.ReplicaName)
+ return n.namePartReplicaName(host.Runtime.Address.ReplicaName)
}
// getNamePartHostName
func (n *namer) getNamePartHostName(host *api.ChiHost) string {
- return n.namePartHostName(host.Address.HostName)
+ return n.namePartHostName(host.Runtime.Address.HostName)
}
// getNamePartCHIScopeCycleSize
func getNamePartCHIScopeCycleSize(host *api.ChiHost) string {
- return strconv.Itoa(host.Address.CHIScopeCycleSize)
+ return strconv.Itoa(host.Runtime.Address.CHIScopeCycleSize)
}
// getNamePartCHIScopeCycleIndex
func getNamePartCHIScopeCycleIndex(host *api.ChiHost) string {
- return strconv.Itoa(host.Address.CHIScopeCycleIndex)
+ return strconv.Itoa(host.Runtime.Address.CHIScopeCycleIndex)
}
// getNamePartCHIScopeCycleOffset
func getNamePartCHIScopeCycleOffset(host *api.ChiHost) string {
- return strconv.Itoa(host.Address.CHIScopeCycleOffset)
+ return strconv.Itoa(host.Runtime.Address.CHIScopeCycleOffset)
}
// getNamePartClusterScopeCycleSize
func getNamePartClusterScopeCycleSize(host *api.ChiHost) string {
- return strconv.Itoa(host.Address.ClusterScopeCycleSize)
+ return strconv.Itoa(host.Runtime.Address.ClusterScopeCycleSize)
}
// getNamePartClusterScopeCycleIndex
func getNamePartClusterScopeCycleIndex(host *api.ChiHost) string {
- return strconv.Itoa(host.Address.ClusterScopeCycleIndex)
+ return strconv.Itoa(host.Runtime.Address.ClusterScopeCycleIndex)
}
// getNamePartClusterScopeCycleOffset
func getNamePartClusterScopeCycleOffset(host *api.ChiHost) string {
- return strconv.Itoa(host.Address.ClusterScopeCycleOffset)
+ return strconv.Itoa(host.Runtime.Address.ClusterScopeCycleOffset)
}
// getNamePartCHIScopeIndex
func getNamePartCHIScopeIndex(host *api.ChiHost) string {
- return strconv.Itoa(host.Address.CHIScopeIndex)
+ return strconv.Itoa(host.Runtime.Address.CHIScopeIndex)
}
// getNamePartClusterScopeIndex
func getNamePartClusterScopeIndex(host *api.ChiHost) string {
- return strconv.Itoa(host.Address.ClusterScopeIndex)
+ return strconv.Itoa(host.Runtime.Address.ClusterScopeIndex)
}
// getNamePartShardScopeIndex
func getNamePartShardScopeIndex(host *api.ChiHost) string {
- return strconv.Itoa(host.Address.ShardScopeIndex)
+ return strconv.Itoa(host.Runtime.Address.ShardScopeIndex)
}
// getNamePartReplicaScopeIndex
func getNamePartReplicaScopeIndex(host *api.ChiHost) string {
- return strconv.Itoa(host.Address.ReplicaScopeIndex)
+ return strconv.Itoa(host.Runtime.Address.ReplicaScopeIndex)
}
// CreateConfigMapHostName returns a name for a ConfigMap for replica's personal config
func CreateConfigMapHostName(host *api.ChiHost) string {
- return macro(host).Line(configMapHostNamePattern)
+ return Macro(host).Line(configMapHostNamePattern)
}
// CreateConfigMapHostMigrationName returns a name for a ConfigMap for replica's personal config
@@ -378,12 +346,12 @@ func CreateConfigMapHostName(host *api.ChiHost) string {
// CreateConfigMapCommonName returns a name for a ConfigMap for replica's common config
func CreateConfigMapCommonName(chi *api.ClickHouseInstallation) string {
- return macro(chi).Line(configMapCommonNamePattern)
+ return Macro(chi).Line(configMapCommonNamePattern)
}
// CreateConfigMapCommonUsersName returns a name for a ConfigMap for replica's common users config
func CreateConfigMapCommonUsersName(chi *api.ClickHouseInstallation) string {
- return macro(chi).Line(configMapCommonUsersNamePattern)
+ return Macro(chi).Line(configMapCommonUsersNamePattern)
}
// CreateCHIServiceName creates a name of a root ClickHouseInstallation Service resource
@@ -404,7 +372,7 @@ func CreateCHIServiceName(chi *api.ClickHouseInstallation) string {
}
// Create Service name based on name pattern available
- return macro(chi).Line(pattern)
+ return Macro(chi).Line(pattern)
}
// CreateCHIServiceFQDN creates a FQD name of a root ClickHouseInstallation Service resource
@@ -446,7 +414,7 @@ func CreateClusterServiceName(cluster *api.Cluster) string {
}
// Create Service name based on name pattern available
- return macro(cluster).Line(pattern)
+ return Macro(cluster).Line(pattern)
}
// CreateShardServiceName returns a name of a shard's Service
@@ -467,7 +435,7 @@ func CreateShardServiceName(shard *api.ChiShard) string {
}
// Create Service name based on name pattern available
- return macro(shard).Line(pattern)
+ return Macro(shard).Line(pattern)
}
// CreateShardName returns a name of a shard
@@ -498,6 +466,11 @@ func CreateHostName(host *api.ChiHost, shard *api.ChiShard, shardIndex int, repl
return fmt.Sprintf("%s-%s", shard.Name, replica.Name)
}
+// CreateHostTemplateName returns a name of a HostTemplate
+func CreateHostTemplateName(host *api.ChiHost) string {
+ return "HostTemplate" + host.Name
+}
+
// CreateInstanceHostname returns hostname (pod-hostname + service or FQDN) which can be used as a replica name
// in all places where ClickHouse requires replica name. These are such places as:
// 1. "remote_servers.xml" config file
@@ -524,27 +497,22 @@ func IsAutoGeneratedHostName(
replica *api.ChiReplica,
replicaIndex int,
) bool {
- if name == CreateHostName(host, shard, shardIndex, replica, replicaIndex) {
+ switch {
+ case name == CreateHostName(host, shard, shardIndex, replica, replicaIndex):
// Current version of the name
return true
- }
-
- if name == fmt.Sprintf("%d-%d", shardIndex, replicaIndex) {
+ case name == fmt.Sprintf("%d-%d", shardIndex, replicaIndex):
// old version - index-index
return true
- }
-
- if name == fmt.Sprintf("%d", shardIndex) {
+ case name == fmt.Sprintf("%d", shardIndex):
// old version - index
return true
- }
-
- if name == fmt.Sprintf("%d", replicaIndex) {
+ case name == fmt.Sprintf("%d", replicaIndex):
// old version - index
return true
+ default:
+ return false
}
-
- return false
}
// CreateStatefulSetName creates a name of a StatefulSet for ClickHouse instance
@@ -565,7 +533,7 @@ func CreateStatefulSetName(host *api.ChiHost) string {
}
// Create StatefulSet name based on name pattern available
- return macro(host).Line(pattern)
+ return Macro(host).Line(pattern)
}
// CreateStatefulSetServiceName returns a name of a StatefulSet-related Service for ClickHouse instance
@@ -586,7 +554,7 @@ func CreateStatefulSetServiceName(host *api.ChiHost) string {
}
// Create Service name based on name pattern available
- return macro(host).Line(pattern)
+ return Macro(host).Line(pattern)
}
// CreatePodHostname returns a hostname of a Pod of a ClickHouse instance.
@@ -606,16 +574,16 @@ func createPodFQDN(host *api.ChiHost) string {
// Start with default pattern
pattern := podFQDNPattern
- if host.CHI.Spec.NamespaceDomainPattern != "" {
+ if host.GetCHI().Spec.NamespaceDomainPattern != "" {
// NamespaceDomainPattern has been explicitly specified
- pattern = "%s." + host.CHI.Spec.NamespaceDomainPattern
+ pattern = "%s." + host.GetCHI().Spec.NamespaceDomainPattern
}
// Create FQDN based on pattern available
return fmt.Sprintf(
pattern,
CreatePodHostname(host),
- host.Address.Namespace,
+ host.Runtime.Address.Namespace,
)
}
@@ -686,7 +654,7 @@ func CreateFQDNs(obj interface{}, scope interface{}, excludeSelf bool) []string
// For example, `template` can be defined in operator config:
// HostRegexpTemplate: chi-{chi}-[^.]+\\d+-\\d+\\.{namespace}.svc.cluster.local$"
func CreatePodHostnameRegexp(chi *api.ClickHouseInstallation, template string) string {
- return macro(chi).Line(template)
+ return Macro(chi).Line(template)
}
// CreatePodName creates Pod name based on specified StatefulSet or Host
@@ -775,13 +743,13 @@ func CreateClusterAutoSecretName(cluster *api.Cluster) string {
if cluster.Name == "" {
return fmt.Sprintf(
"%s-auto-secret",
- cluster.CHI.Name,
+ cluster.Runtime.CHI.Name,
)
}
return fmt.Sprintf(
"%s-%s-auto-secret",
- cluster.CHI.Name,
+ cluster.Runtime.CHI.Name,
cluster.Name,
)
}
diff --git a/pkg/model/chi/normalizer/context.go b/pkg/model/chi/normalizer/context.go
new file mode 100644
index 000000000..ca9bdb016
--- /dev/null
+++ b/pkg/model/chi/normalizer/context.go
@@ -0,0 +1,54 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package normalizer
+
+import api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+
+// Context specifies CHI-related normalization context
+type Context struct {
+ // chi specifies current CHI being normalized
+ chi *api.ClickHouseInstallation
+ // options specifies normalization options
+ options *Options
+}
+
+// NewContext creates new Context
+func NewContext(options *Options) *Context {
+ return &Context{
+ options: options,
+ }
+}
+
+func (c *Context) GetTarget() *api.ClickHouseInstallation {
+ if c == nil {
+ return nil
+ }
+ return c.chi
+}
+
+func (c *Context) SetTarget(chi *api.ClickHouseInstallation) *api.ClickHouseInstallation {
+ if c == nil {
+ return nil
+ }
+ c.chi = chi
+ return c.chi
+}
+
+func (c *Context) Options() *Options {
+ if c == nil {
+ return nil
+ }
+ return c.options
+}
diff --git a/pkg/model/chi/normalizer/entities/host.go b/pkg/model/chi/normalizer/entities/host.go
new file mode 100644
index 000000000..ac40667e2
--- /dev/null
+++ b/pkg/model/chi/normalizer/entities/host.go
@@ -0,0 +1,35 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package entities
+
+import (
+ core "k8s.io/api/core/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+)
+
+// NormalizeHostPorts ensures api.ChiReplica.Port is reasonable
+func NormalizeHostPorts(host *api.ChiHost) {
+ // Walk over all assigned ports of the host and append each port to the list of service's ports
+ model.HostWalkInvalidPorts(
+ host,
+ func(name string, port *int32, protocol core.Protocol) bool {
+ *port = api.PortUnassigned()
+ // Do not abort, continue iterating
+ return false
+ },
+ )
+}
diff --git a/pkg/model/chi/normalizer.go b/pkg/model/chi/normalizer/normalizer.go
similarity index 67%
rename from pkg/model/chi/normalizer.go
rename to pkg/model/chi/normalizer/normalizer.go
index 01bca4d8f..64726fdaf 100644
--- a/pkg/model/chi/normalizer.go
+++ b/pkg/model/chi/normalizer/normalizer.go
@@ -12,10 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package chi
+package normalizer
import (
- "context"
"crypto/sha256"
"encoding/hex"
"fmt"
@@ -23,252 +22,114 @@ import (
"sort"
"strings"
- core "k8s.io/api/core/v1"
- meta "k8s.io/apimachinery/pkg/apis/meta/v1"
- kube "k8s.io/client-go/kubernetes"
-
"github.com/google/uuid"
+ core "k8s.io/api/core/v1"
+
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/apis/deployment"
"github.com/altinity/clickhouse-operator/pkg/chop"
- "github.com/altinity/clickhouse-operator/pkg/controller"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ "github.com/altinity/clickhouse-operator/pkg/model/chi/creator"
+ entitiesNormalizer "github.com/altinity/clickhouse-operator/pkg/model/chi/normalizer/entities"
+ templatesNormalizer "github.com/altinity/clickhouse-operator/pkg/model/chi/normalizer/templates"
"github.com/altinity/clickhouse-operator/pkg/util"
)
-// NormalizerContext specifies CHI-related normalization context
-type NormalizerContext struct {
- // start specifies start CHI from which normalization has started
- start *api.ClickHouseInstallation
- // chi specifies current CHI being normalized
- chi *api.ClickHouseInstallation
- // options specifies normalization options
- options *NormalizerOptions
-}
-
-// NewNormalizerContext creates new NormalizerContext
-func NewNormalizerContext(options *NormalizerOptions) *NormalizerContext {
- return &NormalizerContext{
- options: options,
- }
-}
-
-// NormalizerOptions specifies normalization options
-type NormalizerOptions struct {
- // WithDefaultCluster specifies whether to insert default cluster in case no cluster specified
- WithDefaultCluster bool
- // DefaultUserAdditionalIPs specifies set of additional IPs applied to default user
- DefaultUserAdditionalIPs []string
- DefaultUserInsertHostRegex bool
-}
-
-// NewNormalizerOptions creates new NormalizerOptions
-func NewNormalizerOptions() *NormalizerOptions {
- return &NormalizerOptions{
- DefaultUserInsertHostRegex: true,
- }
-}
+type secretGet func(namespace, name string) (*core.Secret, error)
// Normalizer specifies structures normalizer
type Normalizer struct {
- kubeClient kube.Interface
- ctx *NormalizerContext
+ secretGet secretGet
+ ctx *Context
}
// NewNormalizer creates new normalizer
-func NewNormalizer(kubeClient kube.Interface) *Normalizer {
+func NewNormalizer(secretGet secretGet) *Normalizer {
return &Normalizer{
- kubeClient: kubeClient,
- }
-}
-
-func newCHI() *api.ClickHouseInstallation {
- return &api.ClickHouseInstallation{
- TypeMeta: meta.TypeMeta{
- Kind: api.ClickHouseInstallationCRDResourceKind,
- APIVersion: api.SchemeGroupVersion.String(),
- },
+ secretGet: secretGet,
}
}
// CreateTemplatedCHI produces ready-to-use CHI object
func (n *Normalizer) CreateTemplatedCHI(
chi *api.ClickHouseInstallation,
- options *NormalizerOptions,
+ options *Options,
) (*api.ClickHouseInstallation, error) {
// New CHI starts with new context
- n.ctx = NewNormalizerContext(options)
+ n.ctx = NewContext(options)
+
+ // Ensure normalization entity present
+ chi = n.ensureNormalizationEntity(chi)
- // Normalize start CHI
- chi = n.normalizeStartCHI(chi)
- // Create new chi that will be populated with data during normalization process
- n.ctx.chi = n.createBaseCHI()
+ // Create new target that will be populated with data during normalization process
+ n.ctx.SetTarget(n.createTarget())
- // At this moment context chi is either newly created 'empty' CHI or a system-wide template
+ // At this moment target is either newly created 'empty' CHI or a system-wide template
- // Apply templates - both auto and explicitly requested - on top of context chi
- n.applyCHITemplates(chi)
+ // Apply templates - both auto and explicitly requested - on top of context target
+ for _, template := range templatesNormalizer.ApplyCHITemplates(n.ctx.GetTarget(), chi) {
+ n.ctx.GetTarget().EnsureStatus().PushUsedTemplate(template)
+ }
- // After all templates applied, place provided CHI on top of the whole stack
- n.ctx.chi.MergeFrom(chi, api.MergeTypeOverrideByNonEmptyValues)
+ // After all templates applied, place provided CHI on top of the whole stack (target)
+ n.ctx.GetTarget().MergeFrom(chi, api.MergeTypeOverrideByNonEmptyValues)
return n.normalize()
}
-func (n *Normalizer) normalizeStartCHI(chi *api.ClickHouseInstallation) *api.ClickHouseInstallation {
+func (n *Normalizer) ensureNormalizationEntity(chi *api.ClickHouseInstallation) *api.ClickHouseInstallation {
if chi == nil {
// No CHI specified - meaning we are building over provided 'empty' CHI with no clusters inside
- chi = newCHI()
- n.ctx.options.WithDefaultCluster = false
+ chi = creator.NewCHI()
+ n.ctx.Options().WithDefaultCluster = false
} else {
// Even in case having CHI provided, we need to insert default cluster in case no clusters specified
- n.ctx.options.WithDefaultCluster = true
+ n.ctx.Options().WithDefaultCluster = true
}
return chi
}
-func (n *Normalizer) createBaseCHI() *api.ClickHouseInstallation {
+func (n *Normalizer) createTarget() *api.ClickHouseInstallation {
// What base should be used to create CHI
if chop.Config().Template.CHI.Runtime.Template == nil {
// No template specified - start with clear page
- return newCHI()
+ return creator.NewCHI()
} else {
// Template specified - start with template
return chop.Config().Template.CHI.Runtime.Template.DeepCopy()
}
}
-// prepareListOfCHITemplates prepares list of CHI templates to be used by CHI
-func (n *Normalizer) prepareListOfCHITemplates(chi *api.ClickHouseInstallation) []api.ChiUseTemplate {
- // useTemplates specifies list of templates to be applied to the CHI
- var useTemplates []api.ChiUseTemplate
-
- // 1. Get list of auto templates available
- if autoTemplates := chop.Config().GetAutoTemplates(); len(autoTemplates) > 0 {
- log.V(1).M(chi).F().Info("Found auto-templates num: %d", len(autoTemplates))
- for _, template := range autoTemplates {
- log.V(1).M(chi).F().Info("Adding auto-template to list of applicable templates: %s/%s ", template.Namespace, template.Name)
- useTemplates = append(useTemplates, api.ChiUseTemplate{
- Name: template.Name,
- Namespace: template.Namespace,
- UseType: useTypeMerge,
- })
- }
- }
-
- // 2. Append templates, explicitly requested by the CHI
- if len(chi.Spec.UseTemplates) > 0 {
- log.V(1).M(chi).F().Info("Found manual-templates num: %d", len(chi.Spec.UseTemplates))
- useTemplates = append(useTemplates, chi.Spec.UseTemplates...)
- }
-
- // In case useTemplates must contain reasonable data, thus has to be normalized
- if len(useTemplates) > 0 {
- log.V(1).M(chi).F().Info("Found applicable templates num: %d", len(useTemplates))
- n.normalizeUseTemplates(useTemplates)
- }
-
- return useTemplates
-}
-
-// applyCHITemplates applies CHI templates over n.ctx.chi
-func (n *Normalizer) applyCHITemplates(chi *api.ClickHouseInstallation) {
- // At this moment n.chi is either newly created 'empty' CHI or a system-wide template
-
- // useTemplates specifies list of templates to be applied to the CHI
- useTemplates := n.prepareListOfCHITemplates(chi)
-
- // Apply templates - both auto and explicitly requested
- for i := range useTemplates {
- // Convenience wrapper
- useTemplate := &useTemplates[i]
- template := chop.Config().FindTemplate(useTemplate, chi.Namespace)
-
- if template == nil {
- log.V(1).M(chi).F().Warning("Skip template: %s/%s UNABLE to find listed template. ", useTemplate.Namespace, useTemplate.Name)
- continue // skip to the next template
- }
-
- // What CHI this template wants to be applied to?
- // This is determined by matching selector of the template and CHI's labels
- selector := template.Spec.Templating.GetCHISelector()
- labels := chi.Labels
-
- if !selector.Matches(labels) {
- // This template does not want to be applied to this CHI
- log.V(1).M(chi).F().Info("Skip template: %s/%s. Selector: %v does not match labels: %v", useTemplate.Namespace, useTemplate.Name, selector, labels)
- continue // skip to the next template
- }
-
- //
- // Template is found and matches, let's apply template
- //
-
- log.V(1).M(chi).F().Info("Apply template: %s/%s. Selector: %v matches labels: %v", useTemplate.Namespace, useTemplate.Name, selector, labels)
- n.ctx.chi = n.mergeCHIFromTemplate(n.ctx.chi, template)
-
- // And append used template to the list of used templates
- n.ctx.chi.EnsureStatus().PushUsedTemplate(useTemplate)
- } // list of templates
-
- log.V(1).M(chi).F().Info("Used templates count: %d", n.ctx.chi.EnsureStatus().GetUsedTemplatesCount())
-}
-
-func (n *Normalizer) mergeCHIFromTemplate(chi, template *api.ClickHouseInstallation) *api.ClickHouseInstallation {
- // Merge template's Spec over CHI's Spec
- (&chi.Spec).MergeFrom(&template.Spec, api.MergeTypeOverrideByNonEmptyValues)
-
- // Merge template's Labels over CHI's Labels
- chi.Labels = util.MergeStringMapsOverwrite(
- chi.Labels,
- util.CopyMapFilter(
- template.Labels,
- chop.Config().Label.Include,
- chop.Config().Label.Exclude,
- ),
- )
-
- // Merge template's Annotations over CHI's Annotations
- chi.Annotations = util.MergeStringMapsOverwrite(
- chi.Annotations, util.CopyMapFilter(
- template.Annotations,
- chop.Config().Annotation.Include,
- append(chop.Config().Annotation.Exclude, util.ListSkippedAnnotations()...),
- ),
- )
-
- return chi
-}
-
// normalize normalizes whole CHI.
// Returns normalized CHI
func (n *Normalizer) normalize() (*api.ClickHouseInstallation, error) {
// Walk over ChiSpec datatype fields
- n.ctx.chi.Spec.TaskID = n.normalizeTaskID(n.ctx.chi.Spec.TaskID)
- n.ctx.chi.Spec.UseTemplates = n.normalizeUseTemplates(n.ctx.chi.Spec.UseTemplates)
- n.ctx.chi.Spec.Stop = n.normalizeStop(n.ctx.chi.Spec.Stop)
- n.ctx.chi.Spec.Restart = n.normalizeRestart(n.ctx.chi.Spec.Restart)
- n.ctx.chi.Spec.Troubleshoot = n.normalizeTroubleshoot(n.ctx.chi.Spec.Troubleshoot)
- n.ctx.chi.Spec.NamespaceDomainPattern = n.normalizeNamespaceDomainPattern(n.ctx.chi.Spec.NamespaceDomainPattern)
- n.ctx.chi.Spec.Templating = n.normalizeTemplating(n.ctx.chi.Spec.Templating)
- n.ctx.chi.Spec.Reconciling = n.normalizeReconciling(n.ctx.chi.Spec.Reconciling)
- n.ctx.chi.Spec.Defaults = n.normalizeDefaults(n.ctx.chi.Spec.Defaults)
- n.ctx.chi.Spec.Configuration = n.normalizeConfiguration(n.ctx.chi.Spec.Configuration)
- n.ctx.chi.Spec.Templates = n.normalizeTemplates(n.ctx.chi.Spec.Templates)
+ n.ctx.GetTarget().Spec.TaskID = n.normalizeTaskID(n.ctx.GetTarget().Spec.TaskID)
+ n.ctx.GetTarget().Spec.UseTemplates = n.normalizeUseTemplates(n.ctx.GetTarget().Spec.UseTemplates)
+ n.ctx.GetTarget().Spec.Stop = n.normalizeStop(n.ctx.GetTarget().Spec.Stop)
+ n.ctx.GetTarget().Spec.Restart = n.normalizeRestart(n.ctx.GetTarget().Spec.Restart)
+ n.ctx.GetTarget().Spec.Troubleshoot = n.normalizeTroubleshoot(n.ctx.GetTarget().Spec.Troubleshoot)
+ n.ctx.GetTarget().Spec.NamespaceDomainPattern = n.normalizeNamespaceDomainPattern(n.ctx.GetTarget().Spec.NamespaceDomainPattern)
+ n.ctx.GetTarget().Spec.Templating = n.normalizeTemplating(n.ctx.GetTarget().Spec.Templating)
+ n.ctx.GetTarget().Spec.Reconciling = n.normalizeReconciling(n.ctx.GetTarget().Spec.Reconciling)
+ n.ctx.GetTarget().Spec.Defaults = n.normalizeDefaults(n.ctx.GetTarget().Spec.Defaults)
+ n.ctx.GetTarget().Spec.Configuration = n.normalizeConfiguration(n.ctx.GetTarget().Spec.Configuration)
+ n.ctx.GetTarget().Spec.Templates = n.normalizeTemplates(n.ctx.GetTarget().Spec.Templates)
// UseTemplates already done
n.finalizeCHI()
n.fillStatus()
- return n.ctx.chi, nil
+ return n.ctx.GetTarget(), nil
}
// finalizeCHI performs some finalization tasks, which should be done after CHI is normalized
func (n *Normalizer) finalizeCHI() {
- n.ctx.chi.FillSelfCalculatedAddressInfo()
- n.ctx.chi.FillCHIPointer()
- n.ctx.chi.WalkHosts(func(host *api.ChiHost) error {
+ n.ctx.GetTarget().FillSelfCalculatedAddressInfo()
+ n.ctx.GetTarget().FillCHIPointer()
+ n.ctx.GetTarget().WalkHosts(func(host *api.ChiHost) error {
hostTemplate := n.getHostTemplate(host)
hostApplyHostTemplate(host, hostTemplate)
return nil
@@ -278,22 +139,20 @@ func (n *Normalizer) finalizeCHI() {
// fillCHIAddressInfo
func (n *Normalizer) fillCHIAddressInfo() {
- n.ctx.chi.WalkHosts(func(host *api.ChiHost) error {
- host.Address.StatefulSet = CreateStatefulSetName(host)
- host.Address.FQDN = CreateFQDN(host)
+ n.ctx.GetTarget().WalkHosts(func(host *api.ChiHost) error {
+ host.Runtime.Address.StatefulSet = model.CreateStatefulSetName(host)
+ host.Runtime.Address.FQDN = model.CreateFQDN(host)
return nil
})
}
// getHostTemplate gets Host Template to be used to normalize Host
func (n *Normalizer) getHostTemplate(host *api.ChiHost) *api.ChiHostTemplate {
- statefulSetName := CreateStatefulSetName(host)
-
// Which host template would be used - either explicitly defined in or a default one
hostTemplate, ok := host.GetHostTemplate()
if ok {
- // Host references known HostTemplate
- log.V(2).M(host).F().Info("StatefulSet %s uses custom hostTemplate %s", statefulSetName, hostTemplate.Name)
+ // Host explicitly references known HostTemplate
+ log.V(2).M(host).F().Info("host: %s uses custom hostTemplate %s", host.Name, hostTemplate.Name)
return hostTemplate
}
@@ -301,20 +160,19 @@ func (n *Normalizer) getHostTemplate(host *api.ChiHost) *api.ChiHostTemplate {
// However, with default template there is a nuance - hostNetwork requires different default host template
// Check hostNetwork case at first
- podTemplate, ok := host.GetPodTemplate()
- if ok {
+ if podTemplate, ok := host.GetPodTemplate(); ok {
if podTemplate.Spec.HostNetwork {
// HostNetwork
- hostTemplate = newDefaultHostTemplateForHostNetwork(statefulSetName)
+ hostTemplate = creator.NewDefaultHostTemplateForHostNetwork(model.CreateHostTemplateName(host))
}
}
- // In case hostTemplate still is not assigned - use default one
+ // In case hostTemplate still is not picked - use default one
if hostTemplate == nil {
- hostTemplate = newDefaultHostTemplate(statefulSetName)
+ hostTemplate = creator.NewDefaultHostTemplate(model.CreateHostTemplateName(host))
}
- log.V(3).M(host).F().Info("StatefulSet %s use default hostTemplate", statefulSetName)
+ log.V(3).M(host).F().Info("host: %s use default hostTemplate", host.Name)
return hostTemplate
}
@@ -348,39 +206,39 @@ func hostApplyHostTemplate(host *api.ChiHost, template *api.ChiHostTemplate) {
}
case deployment.PortDistributionClusterScopeIndex:
if api.IsPortUnassigned(host.TCPPort) {
- base := chDefaultTCPPortNumber
+ base := model.ChDefaultTCPPortNumber
if api.IsPortAssigned(template.Spec.TCPPort) {
base = template.Spec.TCPPort
}
- host.TCPPort = base + int32(host.Address.ClusterScopeIndex)
+ host.TCPPort = base + int32(host.Runtime.Address.ClusterScopeIndex)
}
if api.IsPortUnassigned(host.TLSPort) {
- base := chDefaultTLSPortNumber
+ base := model.ChDefaultTLSPortNumber
if api.IsPortAssigned(template.Spec.TLSPort) {
base = template.Spec.TLSPort
}
- host.TLSPort = base + int32(host.Address.ClusterScopeIndex)
+ host.TLSPort = base + int32(host.Runtime.Address.ClusterScopeIndex)
}
if api.IsPortUnassigned(host.HTTPPort) {
- base := chDefaultHTTPPortNumber
+ base := model.ChDefaultHTTPPortNumber
if api.IsPortAssigned(template.Spec.HTTPPort) {
base = template.Spec.HTTPPort
}
- host.HTTPPort = base + int32(host.Address.ClusterScopeIndex)
+ host.HTTPPort = base + int32(host.Runtime.Address.ClusterScopeIndex)
}
if api.IsPortUnassigned(host.HTTPSPort) {
- base := chDefaultHTTPSPortNumber
+ base := model.ChDefaultHTTPSPortNumber
if api.IsPortAssigned(template.Spec.HTTPSPort) {
base = template.Spec.HTTPSPort
}
- host.HTTPSPort = base + int32(host.Address.ClusterScopeIndex)
+ host.HTTPSPort = base + int32(host.Runtime.Address.ClusterScopeIndex)
}
if api.IsPortUnassigned(host.InterserverHTTPPort) {
- base := chDefaultInterserverHTTPPortNumber
+ base := model.ChDefaultInterserverHTTPPortNumber
if api.IsPortAssigned(template.Spec.InterserverHTTPPort) {
base = template.Spec.InterserverHTTPPort
}
- host.InterserverHTTPPort = base + int32(host.Address.ClusterScopeIndex)
+ host.InterserverHTTPPort = base + int32(host.Runtime.Address.ClusterScopeIndex)
}
}
}
@@ -393,13 +251,16 @@ func hostApplyHostTemplate(host *api.ChiHost, template *api.ChiHostTemplate) {
// hostApplyPortsFromSettings
func hostApplyPortsFromSettings(host *api.ChiHost) {
// Use host personal settings at first
- ensurePortValuesFromSettings(host, host.GetSettings(), false)
+ hostEnsurePortValuesFromSettings(host, host.GetSettings(), false)
// Fallback to common settings
- ensurePortValuesFromSettings(host, host.GetCHI().Spec.Configuration.Settings, true)
+ hostEnsurePortValuesFromSettings(host, host.GetCHI().Spec.Configuration.Settings, true)
}
-// ensurePortValuesFromSettings fetches port spec from settings, if any provided
-func ensurePortValuesFromSettings(host *api.ChiHost, settings *api.Settings, final bool) {
+// hostEnsurePortValuesFromSettings fetches port spec from settings, if any provided
+func hostEnsurePortValuesFromSettings(host *api.ChiHost, settings *api.Settings, final bool) {
+ //
+ // 1. Setup fallback/default ports
+ //
// For intermittent (non-final) setup fallback values should be from "MustBeAssignedLater" family,
// because this is not final setup (just intermittent) and all these ports may be overwritten later
fallbackTCPPort := api.PortUnassigned()
@@ -408,19 +269,22 @@ func ensurePortValuesFromSettings(host *api.ChiHost, settings *api.Settings, fin
fallbackHTTPSPort := api.PortUnassigned()
fallbackInterserverHTTPPort := api.PortUnassigned()
+ // On the other hand, for final setup we need to assign real numbers to ports
if final {
- // This is final setup and we need to assign real numbers to ports
if host.IsInsecure() {
- fallbackTCPPort = chDefaultTCPPortNumber
- fallbackHTTPPort = chDefaultHTTPPortNumber
+ fallbackTCPPort = model.ChDefaultTCPPortNumber
+ fallbackHTTPPort = model.ChDefaultHTTPPortNumber
}
if host.IsSecure() {
- fallbackTLSPort = chDefaultTLSPortNumber
- fallbackHTTPSPort = chDefaultHTTPSPortNumber
+ fallbackTLSPort = model.ChDefaultTLSPortNumber
+ fallbackHTTPSPort = model.ChDefaultHTTPSPortNumber
}
- fallbackInterserverHTTPPort = chDefaultInterserverHTTPPortNumber
+ fallbackInterserverHTTPPort = model.ChDefaultInterserverHTTPPortNumber
}
+ //
+ // 2. Setup ports
+ //
host.TCPPort = api.EnsurePortValue(host.TCPPort, settings.GetTCPPort(), fallbackTCPPort)
host.TLSPort = api.EnsurePortValue(host.TLSPort, settings.GetTCPPortSecure(), fallbackTLSPort)
host.HTTPPort = api.EnsurePortValue(host.HTTPPort, settings.GetHTTPPort(), fallbackHTTPPort)
@@ -430,16 +294,16 @@ func ensurePortValuesFromSettings(host *api.ChiHost, settings *api.Settings, fin
// fillStatus fills .status section of a CHI with values based on current CHI
func (n *Normalizer) fillStatus() {
- endpoint := CreateCHIServiceFQDN(n.ctx.chi)
+ endpoint := model.CreateCHIServiceFQDN(n.ctx.GetTarget())
pods := make([]string, 0)
fqdns := make([]string, 0)
- n.ctx.chi.WalkHosts(func(host *api.ChiHost) error {
- pods = append(pods, CreatePodName(host))
- fqdns = append(fqdns, CreateFQDN(host))
+ n.ctx.GetTarget().WalkHosts(func(host *api.ChiHost) error {
+ pods = append(pods, model.CreatePodName(host))
+ fqdns = append(fqdns, model.CreateFQDN(host))
return nil
})
ip, _ := chop.Get().ConfigManager.GetRuntimeParam(deployment.OPERATOR_POD_IP)
- n.ctx.chi.FillStatus(endpoint, pods, fqdns, ip)
+ n.ctx.GetTarget().FillStatus(endpoint, pods, fqdns, ip)
}
// normalizeTaskID normalizes .spec.taskID
@@ -488,12 +352,21 @@ func (n *Normalizer) normalizeTroubleshoot(troubleshoot *api.StringBool) *api.St
return api.NewStringBool(false)
}
+func isNamespaceDomainPatternValid(namespaceDomainPattern string) bool {
+ if strings.Count(namespaceDomainPattern, "%s") > 1 {
+ return false
+ } else {
+ return true
+ }
+}
+
// normalizeNamespaceDomainPattern normalizes .spec.namespaceDomainPattern
func (n *Normalizer) normalizeNamespaceDomainPattern(namespaceDomainPattern string) string {
- if strings.Count(namespaceDomainPattern, "%s") > 1 {
- return ""
+ if isNamespaceDomainPatternValid(namespaceDomainPattern) {
+ return namespaceDomainPattern
}
- return namespaceDomainPattern
+ // In case namespaceDomainPattern is not valid - do not use it
+ return ""
}
// normalizeDefaults normalizes .spec.defaults
@@ -525,13 +398,13 @@ func (n *Normalizer) normalizeConfiguration(conf *api.Configuration) *api.Config
conf = api.NewConfiguration()
}
conf.Zookeeper = n.normalizeConfigurationZookeeper(conf.Zookeeper)
- n.normalizeConfigurationSettingsBased(conf)
+ n.normalizeConfigurationAllSettingsBasedSections(conf)
conf.Clusters = n.normalizeClusters(conf.Clusters)
return conf
}
-// normalizeConfigurationSettingsBased normalizes Settings-based configuration
-func (n *Normalizer) normalizeConfigurationSettingsBased(conf *api.Configuration) {
+// normalizeConfigurationAllSettingsBasedSections normalizes Settings-based configuration
+func (n *Normalizer) normalizeConfigurationAllSettingsBasedSections(conf *api.Configuration) {
conf.Users = n.normalizeConfigurationUsers(conf.Users)
conf.Profiles = n.normalizeConfigurationProfiles(conf.Profiles)
conf.Quotas = n.normalizeConfigurationQuotas(conf.Quotas)
@@ -546,26 +419,10 @@ func (n *Normalizer) normalizeTemplates(templates *api.ChiTemplates) *api.ChiTem
return nil
}
- for i := range templates.HostTemplates {
- hostTemplate := &templates.HostTemplates[i]
- n.normalizeHostTemplate(hostTemplate)
- }
-
- for i := range templates.PodTemplates {
- podTemplate := &templates.PodTemplates[i]
- n.normalizePodTemplate(podTemplate)
- }
-
- for i := range templates.VolumeClaimTemplates {
- vcTemplate := &templates.VolumeClaimTemplates[i]
- n.normalizeVolumeClaimTemplate(vcTemplate)
- }
-
- for i := range templates.ServiceTemplates {
- serviceTemplate := &templates.ServiceTemplates[i]
- n.normalizeServiceTemplate(serviceTemplate)
- }
-
+ n.normalizeHostTemplates(templates)
+ n.normalizePodTemplates(templates)
+ n.normalizeVolumeClaimTemplates(templates)
+ n.normalizeServiceTemplates(templates)
return templates
}
@@ -648,270 +505,95 @@ func (n *Normalizer) normalizeCleanup(str *string, value string) {
}
}
-// normalizeHostTemplate normalizes .spec.templates.hostTemplates
-func (n *Normalizer) normalizeHostTemplate(template *api.ChiHostTemplate) {
- // Name
-
- // PortDistribution
-
- if template.PortDistribution == nil {
- // In case no PortDistribution provided - setup default one
- template.PortDistribution = []api.ChiPortDistribution{
- {Type: deployment.PortDistributionUnspecified},
- }
- }
- // Normalize PortDistribution
- for i := range template.PortDistribution {
- portDistribution := &template.PortDistribution[i]
- switch portDistribution.Type {
- case
- deployment.PortDistributionUnspecified,
- deployment.PortDistributionClusterScopeIndex:
- // distribution is known
- default:
- // distribution is not known
- portDistribution.Type = deployment.PortDistributionUnspecified
- }
+func (n *Normalizer) normalizeHostTemplates(templates *api.ChiTemplates) {
+ for i := range templates.HostTemplates {
+ n.normalizeHostTemplate(&templates.HostTemplates[i])
}
-
- // Spec
- n.normalizeHostTemplateSpec(&template.Spec)
-
- // Introduce HostTemplate into Index
- n.ctx.chi.Spec.Templates.EnsureHostTemplatesIndex().Set(template.Name, template)
}
-// normalizePodTemplate normalizes .spec.templates.podTemplates
-func (n *Normalizer) normalizePodTemplate(template *api.ChiPodTemplate) {
- // Name
-
- // Zone
- if len(template.Zone.Values) == 0 {
- // In case no values specified - no key is reasonable
- template.Zone.Key = ""
- } else if template.Zone.Key == "" {
- // We have values specified, but no key
- // Use default zone key in this case
- template.Zone.Key = core.LabelTopologyZone
- } else {
- // We have both key and value(s) specified explicitly
- }
-
- // PodDistribution
- for i := range template.PodDistribution {
- if additionalPoDistributions := n.normalizePodDistribution(&template.PodDistribution[i]); additionalPoDistributions != nil {
- template.PodDistribution = append(template.PodDistribution, additionalPoDistributions...)
- }
+func (n *Normalizer) normalizePodTemplates(templates *api.ChiTemplates) {
+ for i := range templates.PodTemplates {
+ n.normalizePodTemplate(&templates.PodTemplates[i])
}
+}
- // Spec
- template.Spec.Affinity = MergeAffinity(template.Spec.Affinity, NewAffinity(template))
-
- // In case we have hostNetwork specified, we need to have ClusterFirstWithHostNet DNS policy, because of
- // https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
- // which tells: For Pods running with hostNetwork, you should explicitly set its DNS policy “ClusterFirstWithHostNet”.
- if template.Spec.HostNetwork {
- template.Spec.DNSPolicy = core.DNSClusterFirstWithHostNet
+func (n *Normalizer) normalizeVolumeClaimTemplates(templates *api.ChiTemplates) {
+ for i := range templates.VolumeClaimTemplates {
+ n.normalizeVolumeClaimTemplate(&templates.VolumeClaimTemplates[i])
}
-
- // Introduce PodTemplate into Index
- n.ctx.chi.Spec.Templates.EnsurePodTemplatesIndex().Set(template.Name, template)
}
-const defaultTopologyKey = core.LabelHostname
-
-func (n *Normalizer) normalizePodDistribution(podDistribution *api.ChiPodDistribution) []api.ChiPodDistribution {
- if podDistribution.TopologyKey == "" {
- podDistribution.TopologyKey = defaultTopologyKey
+func (n *Normalizer) normalizeServiceTemplates(templates *api.ChiTemplates) {
+ for i := range templates.ServiceTemplates {
+ n.normalizeServiceTemplate(&templates.ServiceTemplates[i])
}
- switch podDistribution.Type {
- case
- deployment.PodDistributionUnspecified,
- // AntiAffinity section
- deployment.PodDistributionClickHouseAntiAffinity,
- deployment.PodDistributionShardAntiAffinity,
- deployment.PodDistributionReplicaAntiAffinity:
- // PodDistribution is known
- if podDistribution.Scope == "" {
- podDistribution.Scope = deployment.PodDistributionScopeCluster
- }
- return nil
- case
- deployment.PodDistributionAnotherNamespaceAntiAffinity,
- deployment.PodDistributionAnotherClickHouseInstallationAntiAffinity,
- deployment.PodDistributionAnotherClusterAntiAffinity:
- // PodDistribution is known
- return nil
- case
- deployment.PodDistributionMaxNumberPerNode:
- // PodDistribution is known
- if podDistribution.Number < 0 {
- podDistribution.Number = 0
- }
- return nil
- case
- // Affinity section
- deployment.PodDistributionNamespaceAffinity,
- deployment.PodDistributionClickHouseInstallationAffinity,
- deployment.PodDistributionClusterAffinity,
- deployment.PodDistributionShardAffinity,
- deployment.PodDistributionReplicaAffinity,
- deployment.PodDistributionPreviousTailAffinity:
- // PodDistribution is known
- return nil
-
- case deployment.PodDistributionCircularReplication:
- // PodDistribution is known
- // PodDistributionCircularReplication is a shortcut to simplify complex set of other distributions
- // All shortcuts have to be expanded
-
- if podDistribution.Scope == "" {
- podDistribution.Scope = deployment.PodDistributionScopeCluster
- }
-
- // TODO need to support multi-cluster
- cluster := n.ctx.chi.Spec.Configuration.Clusters[0]
-
- // Expand shortcut
- return []api.ChiPodDistribution{
- {
- Type: deployment.PodDistributionShardAntiAffinity,
- Scope: podDistribution.Scope,
- },
- {
- Type: deployment.PodDistributionReplicaAntiAffinity,
- Scope: podDistribution.Scope,
- },
- {
- Type: deployment.PodDistributionMaxNumberPerNode,
- Scope: podDistribution.Scope,
- Number: cluster.Layout.ReplicasCount,
- },
+}
- {
- Type: deployment.PodDistributionPreviousTailAffinity,
- },
+// normalizeHostTemplate normalizes .spec.templates.hostTemplates
+func (n *Normalizer) normalizeHostTemplate(template *api.ChiHostTemplate) {
+ templatesNormalizer.NormalizeHostTemplate(template)
+ // Introduce HostTemplate into Index
+ n.ctx.GetTarget().Spec.Templates.EnsureHostTemplatesIndex().Set(template.Name, template)
+}
- {
- Type: deployment.PodDistributionNamespaceAffinity,
- },
- {
- Type: deployment.PodDistributionClickHouseInstallationAffinity,
- },
- {
- Type: deployment.PodDistributionClusterAffinity,
- },
- }
+// normalizePodTemplate normalizes .spec.templates.podTemplates
+func (n *Normalizer) normalizePodTemplate(template *api.ChiPodTemplate) {
+ // TODO need to support multi-cluster
+ replicasCount := 1
+ if len(n.ctx.GetTarget().Spec.Configuration.Clusters) > 0 {
+ replicasCount = n.ctx.GetTarget().Spec.Configuration.Clusters[0].Layout.ReplicasCount
}
-
- // PodDistribution is not known
- podDistribution.Type = deployment.PodDistributionUnspecified
- return nil
+ templatesNormalizer.NormalizePodTemplate(replicasCount, template)
+ // Introduce PodTemplate into Index
+ n.ctx.GetTarget().Spec.Templates.EnsurePodTemplatesIndex().Set(template.Name, template)
}
// normalizeVolumeClaimTemplate normalizes .spec.templates.volumeClaimTemplates
func (n *Normalizer) normalizeVolumeClaimTemplate(template *api.ChiVolumeClaimTemplate) {
- // Check name
- // Skip for now
-
- // StorageManagement
- n.normalizeStorageManagement(&template.StorageManagement)
-
- // Check Spec
- // Skip for now
-
+ templatesNormalizer.NormalizeVolumeClaimTemplate(template)
// Introduce VolumeClaimTemplate into Index
- n.ctx.chi.Spec.Templates.EnsureVolumeClaimTemplatesIndex().Set(template.Name, template)
-}
-
-// normalizeStorageManagement normalizes StorageManagement
-func (n *Normalizer) normalizeStorageManagement(storage *api.StorageManagement) {
- // Check PVCProvisioner
- if !storage.PVCProvisioner.IsValid() {
- storage.PVCProvisioner = api.PVCProvisionerUnspecified
- }
-
- // Check PVCReclaimPolicy
- if !storage.PVCReclaimPolicy.IsValid() {
- storage.PVCReclaimPolicy = api.PVCReclaimPolicyUnspecified
- }
+ n.ctx.GetTarget().Spec.Templates.EnsureVolumeClaimTemplatesIndex().Set(template.Name, template)
}
// normalizeServiceTemplate normalizes .spec.templates.serviceTemplates
func (n *Normalizer) normalizeServiceTemplate(template *api.ChiServiceTemplate) {
- // Check name
- // Check GenerateName
- // Check ObjectMeta
- // Check Spec
-
+ templatesNormalizer.NormalizeServiceTemplate(template)
// Introduce ServiceClaimTemplate into Index
- n.ctx.chi.Spec.Templates.EnsureServiceTemplatesIndex().Set(template.Name, template)
+ n.ctx.GetTarget().Spec.Templates.EnsureServiceTemplatesIndex().Set(template.Name, template)
}
-// normalizeUseTemplates normalizes list of templates use specifications
-func (n *Normalizer) normalizeUseTemplates(useTemplates []api.ChiUseTemplate) []api.ChiUseTemplate {
- for i := range useTemplates {
- useTemplate := &useTemplates[i]
- n.normalizeUseTemplate(useTemplate)
- }
- return useTemplates
-}
-
-// normalizeUseTemplate normalizes ChiUseTemplate
-func (n *Normalizer) normalizeUseTemplate(useTemplate *api.ChiUseTemplate) {
- // Check Name
- if useTemplate.Name == "" {
- // This is strange
- }
-
- // Check Namespace
- if useTemplate.Namespace == "" {
- // So far do nothing with empty namespace
- }
-
- // Ensure UseType
- switch useTemplate.UseType {
- case useTypeMerge:
- // Known use type, all is fine, do nothing
- default:
- // Unknown - use default value
- useTemplate.UseType = useTypeMerge
- }
+// normalizeUseTemplates is a wrapper to hold the name of normalized section
+func (n *Normalizer) normalizeUseTemplates(templates []*api.ChiTemplateRef) []*api.ChiTemplateRef {
+ return templatesNormalizer.NormalizeTemplatesList(templates)
}
// normalizeClusters normalizes clusters
func (n *Normalizer) normalizeClusters(clusters []*api.Cluster) []*api.Cluster {
// We need to have at least one cluster available
clusters = n.ensureClusters(clusters)
-
// Normalize all clusters
for i := range clusters {
clusters[i] = n.normalizeCluster(clusters[i])
}
-
return clusters
}
-// newDefaultCluster
-func (n *Normalizer) newDefaultCluster() *api.Cluster {
- return &api.Cluster{
- Name: "cluster",
- }
-}
-
// ensureClusters
func (n *Normalizer) ensureClusters(clusters []*api.Cluster) []*api.Cluster {
+ // May be we have cluster(s) available
if len(clusters) > 0 {
return clusters
}
- if n.ctx.options.WithDefaultCluster {
+ // In case no clusters available, we may want to create a default one
+ if n.ctx.Options().WithDefaultCluster {
return []*api.Cluster{
- n.newDefaultCluster(),
+ creator.NewDefaultCluster(),
}
}
- return []*api.Cluster{}
+ // Nope, no clusters expected
+ return nil
}
// normalizeConfigurationZookeeper normalizes .spec.configuration.zookeeper
@@ -925,7 +607,7 @@ func (n *Normalizer) normalizeConfigurationZookeeper(zk *api.ChiZookeeperConfig)
// Convenience wrapper
node := &zk.Nodes[i]
if api.IsPortUnassigned(node.Port) {
- node.Port = zkDefaultPort
+ node.Port = model.ZkDefaultPort
}
}
@@ -955,14 +637,16 @@ func (n *Normalizer) substSettingsFieldWithDataFromDataSource(
) bool {
// Has to have source field specified
if !settings.Has(srcSecretRefField) {
+ // No substitution done
return false
}
// Fetch data source address from the source setting field
setting := settings.Get(srcSecretRefField)
- secretAddress, err := setting.FetchDataSourceAddress(n.ctx.chi.Namespace, parseScalarString)
+ secretAddress, err := setting.FetchDataSourceAddress(n.ctx.GetTarget().Namespace, parseScalarString)
if err != nil {
// This is not necessarily an error, just no address specified, most likely setting is not data source ref
+ // No substitution done
return false
}
@@ -974,13 +658,13 @@ func (n *Normalizer) substSettingsFieldWithDataFromDataSource(
}
// In case we are NOT replacing the same field with its new value, then remove the source field.
- // Typically non-replaced source field is not expected to be included into the final ClickHouse config,
- // mainly because very often these source fields are synthetic ones (clickhouse does not know them).
+ // Typically non-replaced source field is not expected to be included into the final config,
+ // mainly because very often these source fields are synthetic ones (do not exist in config fields list).
if dstField != srcSecretRefField {
settings.Delete(srcSecretRefField)
}
- // All is done
+ // Substitution done
return true
}
@@ -996,7 +680,7 @@ func (n *Normalizer) substSettingsFieldWithSecretFieldValue(
if err != nil {
return nil, err
}
-
+ // Create new setting with the value
return api.NewSettingScalar(secretFieldValue), nil
})
}
@@ -1027,7 +711,7 @@ func (n *Normalizer) substSettingsFieldWithEnvRefToSecretField(
},
},
)
-
+ // Create new setting w/o value but with attribute to read from ENV var
return api.NewSettingScalar("").SetAttribute("from_env", envVarName), nil
})
}
@@ -1062,7 +746,7 @@ func (n *Normalizer) substSettingsFieldWithMountedFile(settings *api.Settings, s
})
// TODO setting may have specified mountPath explicitly
- mountPath := filepath.Join(dirPathSecretFilesConfig, filenameInSettingsOrFiles, secretAddress.Name)
+ mountPath := filepath.Join(model.DirPathSecretFilesConfig, filenameInSettingsOrFiles, secretAddress.Name)
// TODO setting may have specified subPath explicitly
// Mount as file
//subPath := filename
@@ -1075,12 +759,11 @@ func (n *Normalizer) substSettingsFieldWithMountedFile(settings *api.Settings, s
SubPath: subPath,
})
+ // Do not create new setting, but old setting would be deleted
return nil, fmt.Errorf("no need to create a new setting")
})
}
-const internodeClusterSecretEnvName = "CLICKHOUSE_INTERNODE_CLUSTER_SECRET"
-
func (n *Normalizer) appendClusterSecretEnvVar(cluster *api.Cluster) {
switch cluster.Secret.Source() {
case api.ClusterSecretSourcePlaintext:
@@ -1091,7 +774,7 @@ func (n *Normalizer) appendClusterSecretEnvVar(cluster *api.Cluster) {
// Set the password for internode communication using an ENV VAR
n.appendAdditionalEnvVar(
core.EnvVar{
- Name: internodeClusterSecretEnvName,
+ Name: model.InternodeClusterSecretEnvName,
ValueFrom: &core.EnvVarSource{
SecretKeyRef: cluster.Secret.GetSecretKeyRef(),
},
@@ -1102,9 +785,9 @@ func (n *Normalizer) appendClusterSecretEnvVar(cluster *api.Cluster) {
// Set the password for internode communication using an ENV VAR
n.appendAdditionalEnvVar(
core.EnvVar{
- Name: internodeClusterSecretEnvName,
+ Name: model.InternodeClusterSecretEnvName,
ValueFrom: &core.EnvVarSource{
- SecretKeyRef: cluster.Secret.GetAutoSecretKeyRef(CreateClusterAutoSecretName(cluster)),
+ SecretKeyRef: cluster.Secret.GetAutoSecretKeyRef(model.CreateClusterAutoSecretName(cluster)),
},
},
)
@@ -1117,14 +800,14 @@ func (n *Normalizer) appendAdditionalEnvVar(envVar core.EnvVar) {
return
}
- for _, existingEnvVar := range n.ctx.chi.Attributes.AdditionalEnvVars {
+ for _, existingEnvVar := range n.ctx.GetTarget().EnsureRuntime().EnsureAttributes().AdditionalEnvVars {
if existingEnvVar.Name == envVar.Name {
// Such a variable already exists
return
}
}
- n.ctx.chi.Attributes.AdditionalEnvVars = append(n.ctx.chi.Attributes.AdditionalEnvVars, envVar)
+ n.ctx.GetTarget().EnsureRuntime().EnsureAttributes().AdditionalEnvVars = append(n.ctx.GetTarget().EnsureRuntime().EnsureAttributes().AdditionalEnvVars, envVar)
}
func (n *Normalizer) appendAdditionalVolume(volume core.Volume) {
@@ -1133,14 +816,14 @@ func (n *Normalizer) appendAdditionalVolume(volume core.Volume) {
return
}
- for _, existingVolume := range n.ctx.chi.Attributes.AdditionalVolumes {
+ for _, existingVolume := range n.ctx.GetTarget().EnsureRuntime().EnsureAttributes().AdditionalVolumes {
if existingVolume.Name == volume.Name {
// Such a variable already exists
return
}
}
- n.ctx.chi.Attributes.AdditionalVolumes = append(n.ctx.chi.Attributes.AdditionalVolumes, volume)
+ n.ctx.GetTarget().EnsureRuntime().EnsureAttributes().AdditionalVolumes = append(n.ctx.GetTarget().EnsureRuntime().EnsureAttributes().AdditionalVolumes, volume)
}
func (n *Normalizer) appendAdditionalVolumeMount(volumeMount core.VolumeMount) {
@@ -1149,32 +832,33 @@ func (n *Normalizer) appendAdditionalVolumeMount(volumeMount core.VolumeMount) {
return
}
- for _, existingVolumeMount := range n.ctx.chi.Attributes.AdditionalVolumeMounts {
+ for _, existingVolumeMount := range n.ctx.GetTarget().EnsureRuntime().EnsureAttributes().AdditionalVolumeMounts {
if existingVolumeMount.Name == volumeMount.Name {
// Such a variable already exists
return
}
}
- n.ctx.chi.Attributes.AdditionalVolumeMounts = append(n.ctx.chi.Attributes.AdditionalVolumeMounts, volumeMount)
+ n.ctx.GetTarget().EnsureRuntime().EnsureAttributes().AdditionalVolumeMounts = append(n.ctx.GetTarget().EnsureRuntime().EnsureAttributes().AdditionalVolumeMounts, volumeMount)
}
var ErrSecretValueNotFound = fmt.Errorf("secret value not found")
// fetchSecretFieldValue fetches the value of the specified field in the specified secret
-// TODO this is the only useage of k8s API in the normalizer. How to remove it?
+// TODO this is the only usage of k8s API in the normalizer. How to remove it?
func (n *Normalizer) fetchSecretFieldValue(secretAddress api.ObjectAddress) (string, error) {
// Fetch the secret
- secret, err := n.kubeClient.CoreV1().Secrets(secretAddress.Namespace).Get(context.TODO(), secretAddress.Name, controller.NewGetOptions())
+ secret, err := n.secretGet(secretAddress.Namespace, secretAddress.Name)
if err != nil {
log.V(1).M(secretAddress.Namespace, secretAddress.Name).F().Info("unable to read secret %s %v", secretAddress, err)
return "", ErrSecretValueNotFound
}
// Find the field within the secret
- for k, value := range secret.Data {
- if secretAddress.Key == k {
+ for key, value := range secret.Data {
+ if secretAddress.Key == key {
+ // The field found!
return string(value), nil
}
}
@@ -1229,10 +913,10 @@ func (n *Normalizer) normalizeConfigurationUsers(users *api.Settings) *api.Setti
}
func (n *Normalizer) removePlainPassword(user *api.SettingsUser) {
+ // If user has any of encrypted password(s) specified, we need to delete existing plaintext password.
+ // Set `remove` flag for user's plaintext `password`, which is specified as empty in stock ClickHouse users.xml,
+ // thus we need to overwrite it.
if user.Has("password_double_sha1_hex") || user.Has("password_sha256_hex") {
- // If user has encrypted password specified, we need to delete existing plaintext password.
- // Set "remove" flag for user's "password", which is specified as empty in stock ClickHouse users.xml,
- // thus we need to overwrite it.
user.Set("password", api.NewSettingScalar("").SetAttribute("remove", "1"))
}
}
@@ -1270,14 +954,14 @@ func (n *Normalizer) normalizeConfigurationUserEnsureMandatoryFields(user *api.S
profile := chop.Config().ClickHouse.Config.User.Default.Profile
quota := chop.Config().ClickHouse.Config.User.Default.Quota
ips := append([]string{}, chop.Config().ClickHouse.Config.User.Default.NetworksIP...)
- hostRegexp := CreatePodHostnameRegexp(n.ctx.chi, chop.Config().ClickHouse.Config.Network.HostRegexpTemplate)
+ hostRegexp := model.CreatePodHostnameRegexp(n.ctx.GetTarget(), chop.Config().ClickHouse.Config.Network.HostRegexpTemplate)
// Some users may have special options for mandatory fields
switch user.Username() {
case defaultUsername:
// "default" user
- ips = append(ips, n.ctx.options.DefaultUserAdditionalIPs...)
- if !n.ctx.options.DefaultUserInsertHostRegex {
+ ips = append(ips, n.ctx.Options().DefaultUserAdditionalIPs...)
+ if !n.ctx.Options().DefaultUserInsertHostRegex {
hostRegexp = ""
}
case chop.Config().ClickHouse.Access.Username:
@@ -1383,7 +1067,7 @@ func (n *Normalizer) normalizeConfigurationUserPassword(user *api.SettingsUser)
}
// It may come that plaintext password is still empty.
- // For example, "default" quite often has empty password.
+ // For example, user `default` quite often has empty password.
if passwordPlaintext == "" {
// This is fine
// This is all for this user
@@ -1451,17 +1135,17 @@ func (n *Normalizer) normalizeConfigurationFiles(files *api.Settings) *api.Setti
// normalizeCluster normalizes cluster and returns deployments usage counters for this cluster
func (n *Normalizer) normalizeCluster(cluster *api.Cluster) *api.Cluster {
if cluster == nil {
- cluster = n.newDefaultCluster()
+ cluster = creator.NewDefaultCluster()
}
- cluster.CHI = n.ctx.chi
+ cluster.Runtime.CHI = n.ctx.GetTarget()
// Inherit from .spec.configuration.zookeeper
- cluster.InheritZookeeperFrom(n.ctx.chi)
+ cluster.InheritZookeeperFrom(n.ctx.GetTarget())
// Inherit from .spec.configuration.files
- cluster.InheritFilesFrom(n.ctx.chi)
+ cluster.InheritFilesFrom(n.ctx.GetTarget())
// Inherit from .spec.defaults
- cluster.InheritTemplatesFrom(n.ctx.chi)
+ cluster.InheritTemplatesFrom(n.ctx.GetTarget())
cluster.Zookeeper = n.normalizeConfigurationZookeeper(cluster.Zookeeper)
cluster.Settings = n.normalizeConfigurationSettings(cluster.Settings)
@@ -1517,15 +1201,6 @@ func (n *Normalizer) createHostsField(cluster *api.Cluster) {
cluster.WalkHostsByReplicas(hostMergeFunc)
}
-// Values for Schema Policy
-const (
- SchemaPolicyReplicaNone = "None"
- SchemaPolicyReplicaAll = "All"
- SchemaPolicyShardNone = "None"
- SchemaPolicyShardAll = "All"
- SchemaPolicyShardDistributedTablesOnly = "DistributedTablesOnly"
-)
-
// normalizeClusterLayoutShardsCountAndReplicasCount ensures at least 1 shard and 1 replica counters
func (n *Normalizer) normalizeClusterSchemaPolicy(policy *api.SchemaPolicy) *api.SchemaPolicy {
if policy == nil {
@@ -1533,30 +1208,30 @@ func (n *Normalizer) normalizeClusterSchemaPolicy(policy *api.SchemaPolicy) *api
}
switch strings.ToLower(policy.Replica) {
- case strings.ToLower(SchemaPolicyReplicaNone):
+ case strings.ToLower(model.SchemaPolicyReplicaNone):
// Known value, overwrite it to ensure case-ness
- policy.Replica = SchemaPolicyReplicaNone
- case strings.ToLower(SchemaPolicyReplicaAll):
+ policy.Replica = model.SchemaPolicyReplicaNone
+ case strings.ToLower(model.SchemaPolicyReplicaAll):
// Known value, overwrite it to ensure case-ness
- policy.Replica = SchemaPolicyReplicaAll
+ policy.Replica = model.SchemaPolicyReplicaAll
default:
// Unknown value, fallback to default
- policy.Replica = SchemaPolicyReplicaAll
+ policy.Replica = model.SchemaPolicyReplicaAll
}
switch strings.ToLower(policy.Shard) {
- case strings.ToLower(SchemaPolicyShardNone):
+ case strings.ToLower(model.SchemaPolicyShardNone):
// Known value, overwrite it to ensure case-ness
- policy.Shard = SchemaPolicyShardNone
- case strings.ToLower(SchemaPolicyShardAll):
+ policy.Shard = model.SchemaPolicyShardNone
+ case strings.ToLower(model.SchemaPolicyShardAll):
// Known value, overwrite it to ensure case-ness
- policy.Shard = SchemaPolicyShardAll
- case strings.ToLower(SchemaPolicyShardDistributedTablesOnly):
+ policy.Shard = model.SchemaPolicyShardAll
+ case strings.ToLower(model.SchemaPolicyShardDistributedTablesOnly):
// Known value, overwrite it to ensure case-ness
- policy.Shard = SchemaPolicyShardDistributedTablesOnly
+ policy.Shard = model.SchemaPolicyShardDistributedTablesOnly
default:
// unknown value, fallback to default
- policy.Shard = SchemaPolicyShardAll
+ policy.Shard = model.SchemaPolicyShardAll
}
return policy
@@ -1740,22 +1415,22 @@ func (n *Normalizer) normalizeReplicaShardsCount(replica *api.ChiReplica, layout
// normalizeShardName normalizes shard name
func (n *Normalizer) normalizeShardName(shard *api.ChiShard, index int) {
- if (len(shard.Name) > 0) && !IsAutoGeneratedShardName(shard.Name, shard, index) {
+ if (len(shard.Name) > 0) && !model.IsAutoGeneratedShardName(shard.Name, shard, index) {
// Has explicitly specified name already
return
}
- shard.Name = CreateShardName(shard, index)
+ shard.Name = model.CreateShardName(shard, index)
}
// normalizeReplicaName normalizes replica name
func (n *Normalizer) normalizeReplicaName(replica *api.ChiReplica, index int) {
- if (len(replica.Name) > 0) && !IsAutoGeneratedReplicaName(replica.Name, replica, index) {
+ if (len(replica.Name) > 0) && !model.IsAutoGeneratedReplicaName(replica.Name, replica, index) {
// Has explicitly specified name already
return
}
- replica.Name = CreateReplicaName(replica, index)
+ replica.Name = model.CreateReplicaName(replica, index)
}
// normalizeShardName normalizes shard weight
@@ -1797,8 +1472,9 @@ func (n *Normalizer) normalizeHost(
shardIndex int,
replicaIndex int,
) {
+
n.normalizeHostName(host, shard, shardIndex, replica, replicaIndex)
- n.normalizeHostPorts(host)
+ entitiesNormalizer.NormalizeHostPorts(host)
// Inherit from either Shard or Replica
var s *api.ChiShard
var r *api.ChiReplica
@@ -1814,11 +1490,6 @@ func (n *Normalizer) normalizeHost(
host.InheritTemplatesFrom(s, r, nil)
}
-// normalizeHostTemplateSpec is the same as normalizeHost but for a template
-func (n *Normalizer) normalizeHostTemplateSpec(host *api.ChiHost) {
- n.normalizeHostPorts(host)
-}
-
// normalizeHostName normalizes host's name
func (n *Normalizer) normalizeHostName(
host *api.ChiHost,
@@ -1827,40 +1498,12 @@ func (n *Normalizer) normalizeHostName(
replica *api.ChiReplica,
replicaIndex int,
) {
- if (len(host.GetName()) > 0) && !IsAutoGeneratedHostName(host.GetName(), host, shard, shardIndex, replica, replicaIndex) {
+ if (len(host.GetName()) > 0) && !model.IsAutoGeneratedHostName(host.GetName(), host, shard, shardIndex, replica, replicaIndex) {
// Has explicitly specified name already
return
}
- host.Name = CreateHostName(host, shard, shardIndex, replica, replicaIndex)
-}
-
-// normalizeHostPorts ensures api.ChiReplica.Port is reasonable
-func (n *Normalizer) normalizeHostPorts(host *api.ChiHost) {
- // Deprecated
- if api.IsPortInvalid(host.Port) {
- host.Port = api.PortUnassigned()
- }
-
- if api.IsPortInvalid(host.TCPPort) {
- host.TCPPort = api.PortUnassigned()
- }
-
- if api.IsPortInvalid(host.TLSPort) {
- host.TLSPort = api.PortUnassigned()
- }
-
- if api.IsPortInvalid(host.HTTPPort) {
- host.HTTPPort = api.PortUnassigned()
- }
-
- if api.IsPortInvalid(host.HTTPSPort) {
- host.HTTPSPort = api.PortUnassigned()
- }
-
- if api.IsPortInvalid(host.InterserverHTTPPort) {
- host.InterserverHTTPPort = api.PortUnassigned()
- }
+ host.Name = model.CreateHostName(host, shard, shardIndex, replica, replicaIndex)
}
// normalizeShardInternalReplication ensures reasonable values in
diff --git a/pkg/model/chi/normalizer/options.go b/pkg/model/chi/normalizer/options.go
new file mode 100644
index 000000000..4fdd8b92f
--- /dev/null
+++ b/pkg/model/chi/normalizer/options.go
@@ -0,0 +1,31 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package normalizer
+
+// Options specifies normalization options
+type Options struct {
+ // WithDefaultCluster specifies whether to insert default cluster in case no cluster specified
+ WithDefaultCluster bool
+ // DefaultUserAdditionalIPs specifies set of additional IPs applied to default user
+ DefaultUserAdditionalIPs []string
+ DefaultUserInsertHostRegex bool
+}
+
+// NewOptions creates new Options
+func NewOptions() *Options {
+ return &Options{
+ DefaultUserInsertHostRegex: true,
+ }
+}
diff --git a/pkg/model/chi/normalizer/templates/chi.go b/pkg/model/chi/normalizer/templates/chi.go
new file mode 100644
index 000000000..01eaf36c9
--- /dev/null
+++ b/pkg/model/chi/normalizer/templates/chi.go
@@ -0,0 +1,190 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package templates
+
+import (
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/chop"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+const (
+ // .spec.useTemplate.useType
+ UseTypeMerge = "merge"
+)
+
+// prepareListOfTemplates prepares list of CHI templates to be used by the CHI
+func prepareListOfTemplates(chi *api.ClickHouseInstallation) (templates []*api.ChiTemplateRef) {
+ // 1. Get list of auto templates available
+ templates = append(templates, prepareListOfAutoTemplates(chi)...)
+ // 2. Append templates which are explicitly requested by the CHI
+ templates = append(templates, prepareListOfManualTemplates(chi)...)
+ // 3 Normalize list of templates
+ templates = NormalizeTemplatesList(templates)
+
+ log.V(1).M(chi).F().Info("Found applicable templates num: %d", len(templates))
+ return templates
+}
+
+func prepareListOfAutoTemplates(chi *api.ClickHouseInstallation) (templates []*api.ChiTemplateRef) {
+ // 1. Get list of auto templates available
+ if autoTemplates := chop.Config().GetAutoTemplates(); len(autoTemplates) > 0 {
+ log.V(1).M(chi).F().Info("Found auto-templates num: %d", len(autoTemplates))
+ for _, template := range autoTemplates {
+ log.V(1).M(chi).F().Info(
+ "Adding auto-template to the list of applicable templates: %s/%s ",
+ template.Namespace, template.Name)
+ templates = append(templates, &api.ChiTemplateRef{
+ Name: template.Name,
+ Namespace: template.Namespace,
+ UseType: UseTypeMerge,
+ })
+ }
+ }
+
+ return templates
+}
+
+func prepareListOfManualTemplates(chi *api.ClickHouseInstallation) (templates []*api.ChiTemplateRef) {
+ if len(chi.Spec.UseTemplates) > 0 {
+ log.V(1).M(chi).F().Info("Found manual-templates num: %d", len(chi.Spec.UseTemplates))
+ templates = append(templates, chi.Spec.UseTemplates...)
+ }
+
+ return templates
+}
+
+// ApplyCHITemplates applies templates over target n.ctx.chi
+func ApplyCHITemplates(target, chi *api.ClickHouseInstallation) (appliedTemplates []*api.ChiTemplateRef) {
+ // Prepare list of templates to be applied to the CHI
+ templates := prepareListOfTemplates(chi)
+
+ // Apply templates from the list and count applied templates - just to make nice log entry
+ for _, template := range templates {
+ if applyTemplate(target, template, chi) {
+ appliedTemplates = append(appliedTemplates, template)
+ }
+ }
+
+ log.V(1).M(chi).F().Info("Applied templates num: %d", len(appliedTemplates))
+ return appliedTemplates
+}
+
+// applyTemplate applies a template over target n.ctx.chi
+// `chi *api.ClickHouseInstallation` is used to determine whether the template should be applied or not only
+func applyTemplate(target *api.ClickHouseInstallation, templateRef *api.ChiTemplateRef, chi *api.ClickHouseInstallation) bool {
+ if templateRef == nil {
+ log.Warning("unable to apply template - nil templateRef provided")
+ // Template is not applied
+ return false
+ }
+
+ // What template are we going to apply?
+ defaultNamespace := chi.Namespace
+ template := chop.Config().FindTemplate(templateRef, defaultNamespace)
+ if template == nil {
+ log.V(1).M(templateRef.Namespace, templateRef.Name).F().Warning(
+ "skip template - UNABLE to find by templateRef: %s/%s",
+ templateRef.Namespace, templateRef.Name)
+ // Template is not applied
+ return false
+ }
+
+ // What target(s) this template wants to be applied to?
+ // This is determined by matching selector of the template and target's labels
+ // Convenience wrapper
+ selector := template.Spec.Templating.GetSelector()
+ if !selector.Matches(chi.Labels) {
+ // This template does not want to be applied to this CHI
+ log.V(1).M(templateRef.Namespace, templateRef.Name).F().Info(
+ "Skip template: %s/%s. Selector: %v does not match labels: %v",
+ templateRef.Namespace, templateRef.Name, selector, chi.Labels)
+ // Template is not applied
+ return false
+ }
+
+ //
+ // Template is found and wants to be applied on the target
+ //
+
+ log.V(1).M(templateRef.Namespace, templateRef.Name).F().Info(
+ "Apply template: %s/%s. Selector: %v matches labels: %v",
+ templateRef.Namespace, templateRef.Name, selector, chi.Labels)
+
+ // Let's apply template and append used template to the list of used templates
+ mergeFromTemplate(target, template)
+
+ // Template is applied
+ return true
+}
+
+func mergeFromTemplate(target, template *api.ClickHouseInstallation) *api.ClickHouseInstallation {
+ // Merge template's Labels over target's Labels
+ target.Labels = util.MergeStringMapsOverwrite(
+ target.Labels,
+ util.CopyMapFilter(
+ template.Labels,
+ chop.Config().Label.Include,
+ chop.Config().Label.Exclude,
+ ),
+ )
+
+ // Merge template's Annotations over target's Annotations
+ target.Annotations = util.MergeStringMapsOverwrite(
+ target.Annotations, util.CopyMapFilter(
+ template.Annotations,
+ chop.Config().Annotation.Include,
+ append(chop.Config().Annotation.Exclude, util.ListSkippedAnnotations()...),
+ ),
+ )
+
+ // Merge template's Spec over target's Spec
+ (&target.Spec).MergeFrom(&template.Spec, api.MergeTypeOverrideByNonEmptyValues)
+
+ return target
+}
+
+// NormalizeTemplatesList normalizes list of templates use specifications
+func NormalizeTemplatesList(templates []*api.ChiTemplateRef) []*api.ChiTemplateRef {
+ for i := range templates {
+ templates[i] = normalizeTemplateRef(templates[i])
+ }
+ return templates
+}
+
+// normalizeTemplateRef normalizes ChiTemplateRef
+func normalizeTemplateRef(templateRef *api.ChiTemplateRef) *api.ChiTemplateRef {
+ // Check Name
+ if templateRef.Name == "" {
+ // This is strange
+ }
+
+ // Check Namespace
+ if templateRef.Namespace == "" {
+ // So far do nothing with empty namespace
+ }
+
+ // Ensure UseType
+ switch templateRef.UseType {
+ case UseTypeMerge:
+ // Known use type, all is fine, do nothing
+ default:
+ // Unknown use type - overwrite with default value
+ templateRef.UseType = UseTypeMerge
+ }
+
+ return templateRef
+}
diff --git a/pkg/model/chi/normalizer/templates/host.go b/pkg/model/chi/normalizer/templates/host.go
new file mode 100644
index 000000000..554c2601f
--- /dev/null
+++ b/pkg/model/chi/normalizer/templates/host.go
@@ -0,0 +1,59 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package templates
+
+import (
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/deployment"
+ "github.com/altinity/clickhouse-operator/pkg/model/chi/normalizer/entities"
+)
+
+// NormalizeHostTemplate normalizes .spec.templates.hostTemplates
+func NormalizeHostTemplate(template *api.ChiHostTemplate) {
+ // Name
+
+ // PortDistribution
+
+ if template.PortDistribution == nil {
+ // In case no PortDistribution provided - setup default one
+ template.PortDistribution = []api.ChiPortDistribution{
+ {
+ Type: deployment.PortDistributionUnspecified,
+ },
+ }
+ }
+
+ // Normalize PortDistribution
+ for i := range template.PortDistribution {
+ portDistribution := &template.PortDistribution[i]
+ switch portDistribution.Type {
+ case
+ deployment.PortDistributionUnspecified,
+ deployment.PortDistributionClusterScopeIndex:
+ // distribution is known
+ default:
+ // distribution is not known
+ portDistribution.Type = deployment.PortDistributionUnspecified
+ }
+ }
+
+ // Spec
+ normalizeHostTemplateSpec(&template.Spec)
+}
+
+// normalizeHostTemplateSpec is the same as normalizeHost but for a template
+func normalizeHostTemplateSpec(host *api.ChiHost) {
+ entities.NormalizeHostPorts(host)
+}
diff --git a/pkg/model/chi/normalizer/templates/pod.go b/pkg/model/chi/normalizer/templates/pod.go
new file mode 100644
index 000000000..d697f23c0
--- /dev/null
+++ b/pkg/model/chi/normalizer/templates/pod.go
@@ -0,0 +1,159 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package templates
+
+import (
+ core "k8s.io/api/core/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/deployment"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+)
+
+// NormalizePodTemplate normalizes .spec.templates.podTemplates
+func NormalizePodTemplate(replicasCount int, template *api.ChiPodTemplate) {
+ // Name
+ // GenerateName
+ // No normalization so far for these
+
+ // Zone
+ normalizePodTemplateZone(template)
+
+ // PodDistribution
+ normalizePodTemplateDistribution(replicasCount, template)
+
+ // Spec
+ template.Spec.Affinity = model.MergeAffinity(template.Spec.Affinity, model.NewAffinity(template))
+
+ // In case we have hostNetwork specified, we need to have ClusterFirstWithHostNet DNS policy, because of
+ // https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
+ // which tells: For Pods running with hostNetwork, you should explicitly set its DNS policy “ClusterFirstWithHostNet”.
+ if template.Spec.HostNetwork {
+ template.Spec.DNSPolicy = core.DNSClusterFirstWithHostNet
+ }
+}
+
+func normalizePodTemplateZone(template *api.ChiPodTemplate) {
+ switch {
+ case len(template.Zone.Values) == 0:
+ // In case no values specified - no key is reasonable
+ template.Zone.Key = ""
+ case template.Zone.Key == "":
+ // We have values specified, but no key
+ // Use default zone key in this case
+ template.Zone.Key = core.LabelTopologyZone
+ default:
+ // We have both key and value(s) specified explicitly
+ // No need to do anything, all params are set
+ }
+}
+
+func normalizePodTemplateDistribution(replicasCount int, template *api.ChiPodTemplate) {
+ for i := range template.PodDistribution {
+ if additionalPodDistributions := normalizePodDistribution(replicasCount, &template.PodDistribution[i]); additionalPodDistributions != nil {
+ template.PodDistribution = append(template.PodDistribution, additionalPodDistributions...)
+ }
+ }
+}
+
+const defaultTopologyKey = core.LabelHostname
+
+func normalizePodDistribution(replicasCount int, podDistribution *api.ChiPodDistribution) []api.ChiPodDistribution {
+ // Ensure topology key
+ if podDistribution.TopologyKey == "" {
+ podDistribution.TopologyKey = defaultTopologyKey
+ }
+
+ switch podDistribution.Type {
+ case
+ deployment.PodDistributionUnspecified,
+ // AntiAffinity section
+ deployment.PodDistributionClickHouseAntiAffinity,
+ deployment.PodDistributionShardAntiAffinity,
+ deployment.PodDistributionReplicaAntiAffinity:
+ // PodDistribution is known
+ if podDistribution.Scope == "" {
+ podDistribution.Scope = deployment.PodDistributionScopeCluster
+ }
+ return nil
+ case
+ deployment.PodDistributionAnotherNamespaceAntiAffinity,
+ deployment.PodDistributionAnotherClickHouseInstallationAntiAffinity,
+ deployment.PodDistributionAnotherClusterAntiAffinity:
+ // PodDistribution is known
+ return nil
+ case
+ deployment.PodDistributionMaxNumberPerNode:
+ // PodDistribution is known
+ if podDistribution.Number < 0 {
+ podDistribution.Number = 0
+ }
+ return nil
+ case
+ // Affinity section
+ deployment.PodDistributionNamespaceAffinity,
+ deployment.PodDistributionClickHouseInstallationAffinity,
+ deployment.PodDistributionClusterAffinity,
+ deployment.PodDistributionShardAffinity,
+ deployment.PodDistributionReplicaAffinity,
+ deployment.PodDistributionPreviousTailAffinity:
+ // PodDistribution is known
+ return nil
+
+ case deployment.PodDistributionCircularReplication:
+ // PodDistribution is known
+ // PodDistributionCircularReplication is a shortcut to simplify complex set of other distributions
+ // All shortcuts have to be expanded
+
+ if podDistribution.Scope == "" {
+ podDistribution.Scope = deployment.PodDistributionScopeCluster
+ }
+
+ // Expand shortcut
+ return []api.ChiPodDistribution{
+ {
+ Type: deployment.PodDistributionShardAntiAffinity,
+ Scope: podDistribution.Scope,
+ },
+ {
+ Type: deployment.PodDistributionReplicaAntiAffinity,
+ Scope: podDistribution.Scope,
+ },
+ {
+ Type: deployment.PodDistributionMaxNumberPerNode,
+ Scope: podDistribution.Scope,
+ Number: replicasCount,
+ },
+
+ {
+ Type: deployment.PodDistributionPreviousTailAffinity,
+ },
+
+ {
+ Type: deployment.PodDistributionNamespaceAffinity,
+ },
+ {
+ Type: deployment.PodDistributionClickHouseInstallationAffinity,
+ },
+ {
+ Type: deployment.PodDistributionClusterAffinity,
+ },
+ }
+ }
+
+ // PodDistribution is not known
+ podDistribution.Type = deployment.PodDistributionUnspecified
+ return nil
+}
diff --git a/pkg/model/chi/normalizer/templates/service.go b/pkg/model/chi/normalizer/templates/service.go
new file mode 100644
index 000000000..b3f95bbe4
--- /dev/null
+++ b/pkg/model/chi/normalizer/templates/service.go
@@ -0,0 +1,25 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package templates
+
+import api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+
+// NormalizeServiceTemplate normalizes .spec.templates.volumeClaimTemplates
+func NormalizeServiceTemplate(template *api.ChiServiceTemplate) {
+ // Check name
+ // Check GenerateName
+ // Check ObjectMeta
+ // Check Spec
+}
diff --git a/pkg/model/chi/normalizer/templates/volume_claim.go b/pkg/model/chi/normalizer/templates/volume_claim.go
new file mode 100644
index 000000000..d59f5b8cd
--- /dev/null
+++ b/pkg/model/chi/normalizer/templates/volume_claim.go
@@ -0,0 +1,42 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package templates
+
+import api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+
+// NormalizeVolumeClaimTemplate normalizes .spec.templates.volumeClaimTemplates
+func NormalizeVolumeClaimTemplate(template *api.ChiVolumeClaimTemplate) {
+ // Check name
+ // Skip for now
+
+ // StorageManagement
+ normalizeStorageManagement(&template.StorageManagement)
+
+ // Check Spec
+ // Skip for now
+}
+
+// normalizeStorageManagement normalizes StorageManagement
+func normalizeStorageManagement(storage *api.StorageManagement) {
+ // Check PVCProvisioner
+ if !storage.PVCProvisioner.IsValid() {
+ storage.PVCProvisioner = api.PVCProvisionerUnspecified
+ }
+
+ // Check PVCReclaimPolicy
+ if !storage.PVCReclaimPolicy.IsValid() {
+ storage.PVCReclaimPolicy = api.PVCReclaimPolicyUnspecified
+ }
+}
diff --git a/pkg/model/chi/cluster.go b/pkg/model/chi/schemer/cluster.go
similarity index 94%
rename from pkg/model/chi/cluster.go
rename to pkg/model/chi/schemer/cluster.go
index 013848133..21ae1f36c 100644
--- a/pkg/model/chi/cluster.go
+++ b/pkg/model/chi/schemer/cluster.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package chi
+package schemer
import (
"context"
@@ -20,6 +20,7 @@ import (
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
"github.com/altinity/clickhouse-operator/pkg/model/clickhouse"
"github.com/altinity/clickhouse-operator/pkg/util"
)
@@ -102,28 +103,28 @@ func (c *Cluster) QueryUnzipAndApplyUUIDs(ctx context.Context, endpoints []strin
// ExecCHI runs set of SQL queries over the whole CHI
func (c *Cluster) ExecCHI(ctx context.Context, chi *api.ClickHouseInstallation, SQLs []string, _opts ...*clickhouse.QueryOptions) error {
- hosts := CreateFQDNs(chi, nil, false)
+ hosts := model.CreateFQDNs(chi, nil, false)
opts := clickhouse.QueryOptionsNormalize(_opts...)
return c.SetHosts(hosts).ExecAll(ctx, SQLs, opts)
}
// ExecCluster runs set of SQL queries over the cluster
func (c *Cluster) ExecCluster(ctx context.Context, cluster *api.Cluster, SQLs []string, _opts ...*clickhouse.QueryOptions) error {
- hosts := CreateFQDNs(cluster, nil, false)
+ hosts := model.CreateFQDNs(cluster, nil, false)
opts := clickhouse.QueryOptionsNormalize(_opts...)
return c.SetHosts(hosts).ExecAll(ctx, SQLs, opts)
}
// ExecShard runs set of SQL queries over the shard replicas
func (c *Cluster) ExecShard(ctx context.Context, shard *api.ChiShard, SQLs []string, _opts ...*clickhouse.QueryOptions) error {
- hosts := CreateFQDNs(shard, nil, false)
+ hosts := model.CreateFQDNs(shard, nil, false)
opts := clickhouse.QueryOptionsNormalize(_opts...)
return c.SetHosts(hosts).ExecAll(ctx, SQLs, opts)
}
// ExecHost runs set of SQL queries over the replica
func (c *Cluster) ExecHost(ctx context.Context, host *api.ChiHost, SQLs []string, _opts ...*clickhouse.QueryOptions) error {
- hosts := CreateFQDNs(host, api.ChiHost{}, false)
+ hosts := model.CreateFQDNs(host, api.ChiHost{}, false)
opts := clickhouse.QueryOptionsNormalize(_opts...)
c.SetHosts(hosts)
if opts.GetSilent() {
@@ -136,7 +137,7 @@ func (c *Cluster) ExecHost(ctx context.Context, host *api.ChiHost, SQLs []string
// QueryHost runs specified query on specified host
func (c *Cluster) QueryHost(ctx context.Context, host *api.ChiHost, sql string, _opts ...*clickhouse.QueryOptions) (*clickhouse.QueryResult, error) {
- hosts := CreateFQDNs(host, api.ChiHost{}, false)
+ hosts := model.CreateFQDNs(host, api.ChiHost{}, false)
opts := clickhouse.QueryOptionsNormalize(_opts...)
c.SetHosts(hosts)
if opts.GetSilent() {
diff --git a/pkg/model/chi/schemer/distributed.go b/pkg/model/chi/schemer/distributed.go
index 5a271a833..1adc3e3ce 100644
--- a/pkg/model/chi/schemer/distributed.go
+++ b/pkg/model/chi/schemer/distributed.go
@@ -19,15 +19,15 @@ import (
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
"github.com/altinity/clickhouse-operator/pkg/util"
)
// shouldCreateDistributedObjects determines whether distributed objects should be created
func shouldCreateDistributedObjects(host *api.ChiHost) bool {
- hosts := chi.CreateFQDNs(host, api.Cluster{}, false)
+ hosts := model.CreateFQDNs(host, api.Cluster{}, false)
- if host.GetCluster().SchemaPolicy.Shard == chi.SchemaPolicyShardNone {
+ if host.GetCluster().SchemaPolicy.Shard == model.SchemaPolicyShardNone {
log.V(1).M(host).F().Info("SchemaPolicy.Shard says there is no need to distribute objects")
return false
}
@@ -56,22 +56,22 @@ func (s *ClusterSchemer) getDistributedObjectsSQLs(ctx context.Context, host *ap
databaseNames, createDatabaseSQLs := debugCreateSQLs(
s.QueryUnzip2Columns(
ctx,
- chi.CreateFQDNs(host, api.ClickHouseInstallation{}, false),
- s.sqlCreateDatabaseDistributed(host.Address.ClusterName),
+ model.CreateFQDNs(host, api.ClickHouseInstallation{}, false),
+ s.sqlCreateDatabaseDistributed(host.Runtime.Address.ClusterName),
),
)
tableNames, createTableSQLs := debugCreateSQLs(
s.QueryUnzipAndApplyUUIDs(
ctx,
- chi.CreateFQDNs(host, api.ClickHouseInstallation{}, false),
- s.sqlCreateTableDistributed(host.Address.ClusterName),
+ model.CreateFQDNs(host, api.ClickHouseInstallation{}, false),
+ s.sqlCreateTableDistributed(host.Runtime.Address.ClusterName),
),
)
functionNames, createFunctionSQLs := debugCreateSQLs(
s.QueryUnzip2Columns(
ctx,
- chi.CreateFQDNs(host, api.ClickHouseInstallation{}, false),
- s.sqlCreateFunction(host.Address.ClusterName),
+ model.CreateFQDNs(host, api.ClickHouseInstallation{}, false),
+ s.sqlCreateFunction(host.Runtime.Address.ClusterName),
),
)
return util.ConcatSlices([][]string{databaseNames, tableNames, functionNames}),
diff --git a/pkg/model/chi/schemer/replicated.go b/pkg/model/chi/schemer/replicated.go
index f472d7dc6..003ccbd76 100644
--- a/pkg/model/chi/schemer/replicated.go
+++ b/pkg/model/chi/schemer/replicated.go
@@ -19,16 +19,16 @@ import (
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
"github.com/altinity/clickhouse-operator/pkg/util"
)
// shouldCreateReplicatedObjects determines whether replicated objects should be created
func shouldCreateReplicatedObjects(host *api.ChiHost) bool {
- shard := chi.CreateFQDNs(host, api.ChiShard{}, false)
- cluster := chi.CreateFQDNs(host, api.Cluster{}, false)
+ shard := model.CreateFQDNs(host, api.ChiShard{}, false)
+ cluster := model.CreateFQDNs(host, api.Cluster{}, false)
- if host.GetCluster().SchemaPolicy.Shard == chi.SchemaPolicyShardAll {
+ if host.GetCluster().SchemaPolicy.Shard == model.SchemaPolicyShardAll {
// We have explicit request to create replicated objects on each shard
// However, it is reasonable to have at least two instances in a cluster
if len(cluster) >= 2 {
@@ -37,7 +37,7 @@ func shouldCreateReplicatedObjects(host *api.ChiHost) bool {
}
}
- if host.GetCluster().SchemaPolicy.Replica == chi.SchemaPolicyReplicaNone {
+ if host.GetCluster().SchemaPolicy.Replica == model.SchemaPolicyReplicaNone {
log.V(1).M(host).F().Info("SchemaPolicy.Replica says there is no need to replicate objects")
return false
}
@@ -66,22 +66,22 @@ func (s *ClusterSchemer) getReplicatedObjectsSQLs(ctx context.Context, host *api
databaseNames, createDatabaseSQLs := debugCreateSQLs(
s.QueryUnzip2Columns(
ctx,
- chi.CreateFQDNs(host, api.ClickHouseInstallation{}, false),
- s.sqlCreateDatabaseReplicated(host.Address.ClusterName),
+ model.CreateFQDNs(host, api.ClickHouseInstallation{}, false),
+ s.sqlCreateDatabaseReplicated(host.Runtime.Address.ClusterName),
),
)
tableNames, createTableSQLs := debugCreateSQLs(
s.QueryUnzipAndApplyUUIDs(
ctx,
- chi.CreateFQDNs(host, api.ClickHouseInstallation{}, false),
- s.sqlCreateTableReplicated(host.Address.ClusterName),
+ model.CreateFQDNs(host, api.ClickHouseInstallation{}, false),
+ s.sqlCreateTableReplicated(host.Runtime.Address.ClusterName),
),
)
functionNames, createFunctionSQLs := debugCreateSQLs(
s.QueryUnzip2Columns(
ctx,
- chi.CreateFQDNs(host, api.ClickHouseInstallation{}, false),
- s.sqlCreateFunction(host.Address.ClusterName),
+ model.CreateFQDNs(host, api.ClickHouseInstallation{}, false),
+ s.sqlCreateFunction(host.Runtime.Address.ClusterName),
),
)
return util.ConcatSlices([][]string{databaseNames, tableNames, functionNames}),
diff --git a/pkg/model/chi/schemer/schemer.go b/pkg/model/chi/schemer/schemer.go
index 66d980f5d..20b7f45bd 100644
--- a/pkg/model/chi/schemer/schemer.go
+++ b/pkg/model/chi/schemer/schemer.go
@@ -20,21 +20,22 @@ import (
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ "github.com/altinity/clickhouse-operator/pkg/apis/swversion"
+ model "github.com/altinity/clickhouse-operator/pkg/model/chi"
"github.com/altinity/clickhouse-operator/pkg/model/clickhouse"
"github.com/altinity/clickhouse-operator/pkg/util"
)
// ClusterSchemer specifies cluster schema manager
type ClusterSchemer struct {
- *chi.Cluster
- version *api.CHVersion
+ *Cluster
+ version *swversion.SoftWareVersion
}
// NewClusterSchemer creates new Schemer object
-func NewClusterSchemer(clusterConnectionParams *clickhouse.ClusterConnectionParams, version *api.CHVersion) *ClusterSchemer {
+func NewClusterSchemer(clusterConnectionParams *clickhouse.ClusterConnectionParams, version *swversion.SoftWareVersion) *ClusterSchemer {
return &ClusterSchemer{
- Cluster: chi.NewCluster().SetClusterConnectionParams(clusterConnectionParams),
+ Cluster: NewCluster().SetClusterConnectionParams(clusterConnectionParams),
version: version,
}
}
@@ -50,9 +51,9 @@ func (s *ClusterSchemer) HostSyncTables(ctx context.Context, host *api.ChiHost)
// HostDropReplica calls SYSTEM DROP REPLICA
func (s *ClusterSchemer) HostDropReplica(ctx context.Context, hostToRunOn, hostToDrop *api.ChiHost) error {
- replica := chi.CreateInstanceHostname(hostToDrop)
- shard := hostToRunOn.Address.ShardIndex
- log.V(1).M(hostToRunOn).F().Info("Drop replica: %v at %v", replica, hostToRunOn.Address.HostName)
+ replica := model.CreateInstanceHostname(hostToDrop)
+ shard := hostToRunOn.Runtime.Address.ShardIndex
+ log.V(1).M(hostToRunOn).F().Info("Drop replica: %v at %v", replica, hostToRunOn.Runtime.Address.HostName)
return s.ExecHost(ctx, hostToRunOn, s.sqlDropReplica(shard, replica), clickhouse.NewQueryOptions().SetRetry(false))
}
@@ -84,8 +85,8 @@ func (s *ClusterSchemer) HostCreateTables(ctx context.Context, host *api.ChiHost
return nil
}
- log.V(1).M(host).F().S().Info("Migrating schema objects to host %s", host.Address.HostName)
- defer log.V(1).M(host).F().E().Info("Migrating schema objects to host %s", host.Address.HostName)
+ log.V(1).M(host).F().S().Info("Migrating schema objects to host %s", host.Runtime.Address.HostName)
+ defer log.V(1).M(host).F().E().Info("Migrating schema objects to host %s", host.Runtime.Address.HostName)
replicatedObjectNames,
replicatedCreateSQLs,
@@ -94,14 +95,14 @@ func (s *ClusterSchemer) HostCreateTables(ctx context.Context, host *api.ChiHost
var err1 error
if len(replicatedCreateSQLs) > 0 {
- log.V(1).M(host).F().Info("Creating replicated objects at %s: %v", host.Address.HostName, replicatedObjectNames)
+ log.V(1).M(host).F().Info("Creating replicated objects at %s: %v", host.Runtime.Address.HostName, replicatedObjectNames)
log.V(2).M(host).F().Info("\n%v", replicatedCreateSQLs)
err1 = s.ExecHost(ctx, host, replicatedCreateSQLs, clickhouse.NewQueryOptions().SetRetry(true))
}
var err2 error
if len(distributedCreateSQLs) > 0 {
- log.V(1).M(host).F().Info("Creating distributed objects at %s: %v", host.Address.HostName, distributedObjectNames)
+ log.V(1).M(host).F().Info("Creating distributed objects at %s: %v", host.Runtime.Address.HostName, distributedObjectNames)
log.V(2).M(host).F().Info("\n%v", distributedCreateSQLs)
err2 = s.ExecHost(ctx, host, distributedCreateSQLs, clickhouse.NewQueryOptions().SetRetry(true))
}
diff --git a/pkg/model/chi/schemer/sql.go b/pkg/model/chi/schemer/sql.go
index 83a30f365..4faf02ccc 100644
--- a/pkg/model/chi/schemer/sql.go
+++ b/pkg/model/chi/schemer/sql.go
@@ -202,7 +202,7 @@ func (s *ClusterSchemer) sqlCreateTableReplicated(cluster string) string {
clusterAllReplicas('%s', system.tables) tables
WHERE
database NOT IN (%s) AND
- has((select groupArray(name) from system.databases where engine in (%s)), database) AND
+ has((SELECT groupArray(name) FROM system.databases WHERE engine IN (%s)), database) AND
create_table_query != '' AND
name NOT LIKE '.inner.%%' AND
name NOT LIKE '.inner_id.%%'
diff --git a/pkg/model/chi/volumer.go b/pkg/model/chi/volumer.go
index 5ed0dd178..9d90f620a 100644
--- a/pkg/model/chi/volumer.go
+++ b/pkg/model/chi/volumer.go
@@ -20,17 +20,9 @@ import (
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
)
-func (c *Creator) getVolumeClaimTemplate(volumeMount *core.VolumeMount) (*api.ChiVolumeClaimTemplate, bool) {
- volumeClaimTemplateName := volumeMount.Name
- volumeClaimTemplate, ok := c.chi.GetVolumeClaimTemplate(volumeClaimTemplateName)
- // Sometimes it is impossible to find VolumeClaimTemplate related to specified volumeMount.
- // May be this volumeMount is not created from VolumeClaimTemplate, it may be a reference to a ConfigMap
- return volumeClaimTemplate, ok
-}
-
func GetVolumeClaimTemplate(host *api.ChiHost, volumeMount *core.VolumeMount) (*api.ChiVolumeClaimTemplate, bool) {
volumeClaimTemplateName := volumeMount.Name
- volumeClaimTemplate, ok := host.CHI.GetVolumeClaimTemplate(volumeClaimTemplateName)
+ volumeClaimTemplate, ok := host.GetCHI().GetVolumeClaimTemplate(volumeClaimTemplateName)
// Sometimes it is impossible to find VolumeClaimTemplate related to specified volumeMount.
// May be this volumeMount is not created from VolumeClaimTemplate, it may be a reference to a ConfigMap
return volumeClaimTemplate, ok
@@ -44,15 +36,15 @@ func getPVCReclaimPolicy(host *api.ChiHost, template *api.ChiVolumeClaimTemplate
return template.PVCReclaimPolicy
}
- if host.CHI.Spec.Defaults.StorageManagement.PVCReclaimPolicy != api.PVCReclaimPolicyUnspecified {
- return host.CHI.Spec.Defaults.StorageManagement.PVCReclaimPolicy
+ if host.GetCHI().Spec.Defaults.StorageManagement.PVCReclaimPolicy != api.PVCReclaimPolicyUnspecified {
+ return host.GetCHI().Spec.Defaults.StorageManagement.PVCReclaimPolicy
}
// Default value
return api.PVCReclaimPolicyDelete
}
-func getPVCProvisioner(host *api.ChiHost, template *api.ChiVolumeClaimTemplate) api.PVCProvisioner {
+func GetPVCProvisioner(host *api.ChiHost, template *api.ChiVolumeClaimTemplate) api.PVCProvisioner {
// Order by priority
// VolumeClaimTemplate.PVCProvisioner, in case specified
@@ -60,8 +52,8 @@ func getPVCProvisioner(host *api.ChiHost, template *api.ChiVolumeClaimTemplate)
return template.PVCProvisioner
}
- if host.CHI.Spec.Defaults.StorageManagement.PVCProvisioner != api.PVCProvisionerUnspecified {
- return host.CHI.Spec.Defaults.StorageManagement.PVCProvisioner
+ if host.GetCHI().Spec.Defaults.StorageManagement.PVCProvisioner != api.PVCProvisionerUnspecified {
+ return host.GetCHI().Spec.Defaults.StorageManagement.PVCProvisioner
}
// Default value
diff --git a/pkg/model/chk/creator.go b/pkg/model/chk/creator.go
index 0e7522c6c..8d55f690b 100644
--- a/pkg/model/chk/creator.go
+++ b/pkg/model/chk/creator.go
@@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/chop"
)
// CreateConfigMap returns a config map containing ClickHouse Keeper config XML
@@ -60,15 +61,12 @@ func CreateStatefulSet(chk *api.ClickHouseKeeperInstallation) *apps.StatefulSet
Labels: labels,
},
Spec: apps.StatefulSetSpec{
- ServiceName: getHeadlessServiceName(chk),
Replicas: &replicas,
+ ServiceName: getHeadlessServiceName(chk),
Selector: &meta.LabelSelector{
MatchLabels: labels,
},
- UpdateStrategy: apps.StatefulSetUpdateStrategy{
- Type: apps.RollingUpdateStatefulSetStrategyType,
- },
- PodManagementPolicy: apps.OrderedReadyPodManagement,
+
Template: core.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
GenerateName: chk.GetName(),
@@ -78,6 +76,12 @@ func CreateStatefulSet(chk *api.ClickHouseKeeperInstallation) *apps.StatefulSet
Spec: createPodTemplateSpec(chk),
},
VolumeClaimTemplates: getVolumeClaimTemplates(chk),
+
+ PodManagementPolicy: apps.OrderedReadyPodManagement,
+ UpdateStrategy: apps.StatefulSetUpdateStrategy{
+ Type: apps.RollingUpdateStatefulSetStrategyType,
+ },
+ RevisionHistoryLimit: chop.Config().GetRevisionHistoryLimit(),
},
}
}
@@ -177,9 +181,12 @@ func createInitContainers(chk *api.ClickHouseKeeperInstallation) []core.Containe
}
if len(initContainers[0].Command) == 0 {
initContainers[0].Command = []string{
- "bash",
- "-xc",
- "export KEEPER_ID=${HOSTNAME##*-}; sed \"s/KEEPER_ID/$KEEPER_ID/g\" /tmp/clickhouse-keeper/keeper_config.xml > /etc/clickhouse-keeper/keeper_config.xml; cat /etc/clickhouse-keeper/keeper_config.xml",
+ `bash`,
+ `-xc`,
+ // Build keeper config
+ `export KEEPER_ID=${HOSTNAME##*-}; ` +
+ `sed "s/KEEPER_ID/${KEEPER_ID}/g" /tmp/clickhouse-keeper/keeper_config.xml > /etc/clickhouse-keeper/keeper_config.xml; ` +
+ `cat /etc/clickhouse-keeper/keeper_config.xml`,
}
}
initContainers[0].VolumeMounts = append(initContainers[0].VolumeMounts,
@@ -215,7 +222,9 @@ func createContainers(chk *api.ClickHouseKeeperInstallation) []core.Container {
}
if containers[0].LivenessProbe == nil {
probeScript := fmt.Sprintf(
- "date && OK=$(exec 3<>/dev/tcp/127.0.0.1/%d ; printf 'ruok' >&3 ; IFS=; tee <&3; exec 3<&- ;); if [[ \"$OK\" == \"imok\" ]]; then exit 0; else exit 1; fi",
+ `date && `+
+ `OK=$(exec 3<>/dev/tcp/127.0.0.1/%d; printf 'ruok' >&3; IFS=; tee <&3; exec 3<&-;);`+
+ `if [[ "${OK}" == "imok" ]]; then exit 0; else exit 1; fi`,
chk.Spec.GetClientPort())
containers[0].LivenessProbe = &core.Probe{
ProbeHandler: core.ProbeHandler{
diff --git a/pkg/model/chk/normalizer.go b/pkg/model/chk/normalizer.go
index ac8dbd4fc..66a8eb889 100644
--- a/pkg/model/chk/normalizer.go
+++ b/pkg/model/chk/normalizer.go
@@ -17,45 +17,29 @@ package chk
import (
"strings"
- core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/apis/deployment"
- "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ "github.com/altinity/clickhouse-operator/pkg/model/chi/normalizer"
+ templatesNormalizer "github.com/altinity/clickhouse-operator/pkg/model/chi/normalizer/templates"
)
// NormalizerContext specifies CHI-related normalization context
type NormalizerContext struct {
- // start specifies start CHK from which normalization has started
- start *apiChk.ClickHouseKeeperInstallation
// chk specifies current CHK being normalized
chk *apiChk.ClickHouseKeeperInstallation
// options specifies normalization options
- options *NormalizerOptions
+ options *normalizer.Options
}
// NewNormalizerContext creates new NormalizerContext
-func NewNormalizerContext(options *NormalizerOptions) *NormalizerContext {
+func NewNormalizerContext(options *normalizer.Options) *NormalizerContext {
return &NormalizerContext{
options: options,
}
}
-// NormalizerOptions specifies normalization options
-type NormalizerOptions struct {
- // WithDefaultCluster specifies whether to insert default cluster in case no cluster specified
- WithDefaultCluster bool
-}
-
-// NewNormalizerOptions creates new NormalizerOptions
-func NewNormalizerOptions() *NormalizerOptions {
- return &NormalizerOptions{
- WithDefaultCluster: true,
- }
-}
-
// Normalizer specifies structures normalizer
type Normalizer struct {
ctx *NormalizerContext
@@ -78,7 +62,7 @@ func newCHK() *apiChk.ClickHouseKeeperInstallation {
// CreateTemplatedCHK produces ready-to-use CHK object
func (n *Normalizer) CreateTemplatedCHK(
chk *apiChk.ClickHouseKeeperInstallation,
- options *NormalizerOptions,
+ options *normalizer.Options,
) (*apiChk.ClickHouseKeeperInstallation, error) {
// New CHI starts with new context
n.ctx = NewNormalizerContext(options)
@@ -146,7 +130,7 @@ func (n *Normalizer) normalizeConfiguration(conf *apiChk.ChkConfiguration) *apiC
}
// normalizeTemplates normalizes .spec.templates
-func (n *Normalizer) normalizeTemplates(templates *apiChk.ChkTemplates) *apiChk.ChkTemplates {
+func (n *Normalizer) normalizeTemplates(templates *apiChi.ChiTemplates) *apiChi.ChiTemplates {
if templates == nil {
//templates = apiChi.NewChiTemplates()
return nil
@@ -172,167 +156,26 @@ func (n *Normalizer) normalizeTemplates(templates *apiChk.ChkTemplates) *apiChk.
// normalizePodTemplate normalizes .spec.templates.podTemplates
func (n *Normalizer) normalizePodTemplate(template *apiChi.ChiPodTemplate) {
- // Name
-
- // Zone
- if len(template.Zone.Values) == 0 {
- // In case no values specified - no key is reasonable
- template.Zone.Key = ""
- } else if template.Zone.Key == "" {
- // We have values specified, but no key
- // Use default zone key in this case
- template.Zone.Key = core.LabelTopologyZone
- } else {
- // We have both key and value(s) specified explicitly
- }
-
- // PodDistribution
- for i := range template.PodDistribution {
- if additionalPoDistributions := n.normalizePodDistribution(&template.PodDistribution[i]); additionalPoDistributions != nil {
- template.PodDistribution = append(template.PodDistribution, additionalPoDistributions...)
- }
- }
-
- // Spec
- template.Spec.Affinity = chi.MergeAffinity(template.Spec.Affinity, chi.NewAffinity(template))
-
- // In case we have hostNetwork specified, we need to have ClusterFirstWithHostNet DNS policy, because of
- // https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
- // which tells: For Pods running with hostNetwork, you should explicitly set its DNS policy “ClusterFirstWithHostNet”.
- if template.Spec.HostNetwork {
- template.Spec.DNSPolicy = core.DNSClusterFirstWithHostNet
+ // TODO need to support multi-cluster
+ replicasCount := 1
+ if len(n.ctx.chk.Spec.Configuration.Clusters) > 0 {
+ replicasCount = n.ctx.chk.Spec.Configuration.Clusters[0].Layout.ReplicasCount
}
-
+ templatesNormalizer.NormalizePodTemplate(replicasCount, template)
// Introduce PodTemplate into Index
n.ctx.chk.Spec.Templates.EnsurePodTemplatesIndex().Set(template.Name, template)
}
-const defaultTopologyKey = core.LabelHostname
-
-func (n *Normalizer) normalizePodDistribution(podDistribution *apiChi.ChiPodDistribution) []apiChi.ChiPodDistribution {
- if podDistribution.TopologyKey == "" {
- podDistribution.TopologyKey = defaultTopologyKey
- }
- switch podDistribution.Type {
- case
- deployment.PodDistributionUnspecified,
- // AntiAffinity section
- deployment.PodDistributionClickHouseAntiAffinity,
- deployment.PodDistributionShardAntiAffinity,
- deployment.PodDistributionReplicaAntiAffinity:
- // PodDistribution is known
- if podDistribution.Scope == "" {
- podDistribution.Scope = deployment.PodDistributionScopeCluster
- }
- return nil
- case
- deployment.PodDistributionAnotherNamespaceAntiAffinity,
- deployment.PodDistributionAnotherClickHouseInstallationAntiAffinity,
- deployment.PodDistributionAnotherClusterAntiAffinity:
- // PodDistribution is known
- return nil
- case
- deployment.PodDistributionMaxNumberPerNode:
- // PodDistribution is known
- if podDistribution.Number < 0 {
- podDistribution.Number = 0
- }
- return nil
- case
- // Affinity section
- deployment.PodDistributionNamespaceAffinity,
- deployment.PodDistributionClickHouseInstallationAffinity,
- deployment.PodDistributionClusterAffinity,
- deployment.PodDistributionShardAffinity,
- deployment.PodDistributionReplicaAffinity,
- deployment.PodDistributionPreviousTailAffinity:
- // PodDistribution is known
- return nil
-
- case deployment.PodDistributionCircularReplication:
- // PodDistribution is known
- // PodDistributionCircularReplication is a shortcut to simplify complex set of other distributions
- // All shortcuts have to be expanded
-
- if podDistribution.Scope == "" {
- podDistribution.Scope = deployment.PodDistributionScopeCluster
- }
-
- // TODO need to support multi-cluster
- cluster := n.ctx.chk.Spec.Configuration.Clusters[0]
-
- // Expand shortcut
- return []apiChi.ChiPodDistribution{
- {
- Type: deployment.PodDistributionShardAntiAffinity,
- Scope: podDistribution.Scope,
- },
- {
- Type: deployment.PodDistributionReplicaAntiAffinity,
- Scope: podDistribution.Scope,
- },
- {
- Type: deployment.PodDistributionMaxNumberPerNode,
- Scope: podDistribution.Scope,
- Number: cluster.Layout.ReplicasCount,
- },
-
- {
- Type: deployment.PodDistributionPreviousTailAffinity,
- },
-
- {
- Type: deployment.PodDistributionNamespaceAffinity,
- },
- {
- Type: deployment.PodDistributionClickHouseInstallationAffinity,
- },
- {
- Type: deployment.PodDistributionClusterAffinity,
- },
- }
- }
-
- // PodDistribution is not known
- podDistribution.Type = deployment.PodDistributionUnspecified
- return nil
-}
-
// normalizeVolumeClaimTemplate normalizes .spec.templates.volumeClaimTemplates
func (n *Normalizer) normalizeVolumeClaimTemplate(template *apiChi.ChiVolumeClaimTemplate) {
- // Check name
- // Skip for now
-
- // StorageManagement
- n.normalizeStorageManagement(&template.StorageManagement)
-
- // Check Spec
- // Skip for now
-
+ templatesNormalizer.NormalizeVolumeClaimTemplate(template)
// Introduce VolumeClaimTemplate into Index
n.ctx.chk.Spec.Templates.EnsureVolumeClaimTemplatesIndex().Set(template.Name, template)
}
-// normalizeStorageManagement normalizes StorageManagement
-func (n *Normalizer) normalizeStorageManagement(storage *apiChi.StorageManagement) {
- // Check PVCProvisioner
- if !storage.PVCProvisioner.IsValid() {
- storage.PVCProvisioner = apiChi.PVCProvisionerUnspecified
- }
-
- // Check PVCReclaimPolicy
- if !storage.PVCReclaimPolicy.IsValid() {
- storage.PVCReclaimPolicy = apiChi.PVCReclaimPolicyUnspecified
- }
-}
-
// normalizeServiceTemplate normalizes .spec.templates.serviceTemplates
func (n *Normalizer) normalizeServiceTemplate(template *apiChi.ChiServiceTemplate) {
- // Check name
- // Check GenerateName
- // Check ObjectMeta
- // Check Spec
-
+ templatesNormalizer.NormalizeServiceTemplate(template)
// Introduce ServiceClaimTemplate into Index
n.ctx.chk.Spec.Templates.EnsureServiceTemplatesIndex().Set(template.Name, template)
}
diff --git a/pkg/model/k8s/container.go b/pkg/model/k8s/container.go
new file mode 100644
index 000000000..8c7e3fe6d
--- /dev/null
+++ b/pkg/model/k8s/container.go
@@ -0,0 +1,101 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package k8s
+
+import (
+ core "k8s.io/api/core/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+)
+
+// PodSpecAddContainer adds container to PodSpec
+func PodSpecAddContainer(podSpec *core.PodSpec, container core.Container) {
+ podSpec.Containers = append(podSpec.Containers, container)
+}
+
+// ContainerAppendVolumeMounts appends multiple VolumeMount(s) to the specified container
+func ContainerAppendVolumeMounts(container *core.Container, volumeMounts ...core.VolumeMount) {
+ for _, volumeMount := range volumeMounts {
+ ContainerAppendVolumeMount(container, volumeMount)
+ }
+}
+
+// ContainerAppendVolumeMount appends one VolumeMount to the specified container
+func ContainerAppendVolumeMount(container *core.Container, volumeMount core.VolumeMount) {
+ //
+ // Sanity checks
+ //
+
+ if container == nil {
+ return
+ }
+
+ // VolumeMount has to have reasonable data - Name and MountPath
+ if (volumeMount.Name == "") || (volumeMount.MountPath == "") {
+ return
+ }
+
+ // Check that:
+ // 1. Mountable item (VolumeClaimTemplate or Volume) specified in this VolumeMount is NOT already mounted
+ // in this container by any other VolumeMount (to avoid double-mount of a mountable item)
+ // 2. And specified `mountPath` (say '/var/lib/clickhouse') is NOT already mounted in this container
+ // by any VolumeMount (to avoid double-mount/rewrite into single `mountPath`)
+ for i := range container.VolumeMounts {
+ // Convenience wrapper
+ existingVolumeMount := &container.VolumeMounts[i]
+
+ // 1. Check whether this mountable item is already listed in VolumeMount of this container
+ if volumeMount.Name == existingVolumeMount.Name {
+ // This .templates.VolumeClaimTemplate is already used in VolumeMount
+ return
+ }
+
+ // 2. Check whether `mountPath` (say '/var/lib/clickhouse') is already mounted
+ if volumeMount.MountPath == existingVolumeMount.MountPath {
+ // `mountPath` (say /var/lib/clickhouse) is already mounted
+ return
+ }
+ }
+
+ // Add VolumeMount to ClickHouse container to `mountPath` point
+ container.VolumeMounts = append(container.VolumeMounts, volumeMount)
+}
+
+// ContainerEnsurePortByName
+func ContainerEnsurePortByName(container *core.Container, name string, port int32) {
+ if api.IsPortUnassigned(port) {
+ return
+ }
+
+ // Find port with specified name
+ for i := range container.Ports {
+ containerPort := &container.Ports[i]
+ if containerPort.Name == name {
+ // Port with specified name found in the container
+ // Overwrite existing port spec:
+ // 1. No host port
+ // 2. Specify new port value
+ containerPort.HostPort = 0
+ containerPort.ContainerPort = port
+ return
+ }
+ }
+
+ // Port with specified name found NOT in the container. Need to append.
+ container.Ports = append(container.Ports, core.ContainerPort{
+ Name: name,
+ ContainerPort: port,
+ })
+}
diff --git a/pkg/model/k8s/service.go b/pkg/model/k8s/service.go
new file mode 100644
index 000000000..567e8d31d
--- /dev/null
+++ b/pkg/model/k8s/service.go
@@ -0,0 +1,34 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package k8s
+
+import (
+ "fmt"
+
+ core "k8s.io/api/core/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+)
+
+// ServiceSpecVerifyPorts verifies core.ServiceSpec to have reasonable ports specified
+func ServiceSpecVerifyPorts(spec *core.ServiceSpec) error {
+ for i := range spec.Ports {
+ servicePort := &spec.Ports[i]
+ if api.IsPortInvalid(servicePort.Port) {
+ return fmt.Errorf(fmt.Sprintf("incorrect port :%d", servicePort.Port))
+ }
+ }
+ return nil
+}
diff --git a/pkg/model/k8s/stateful_set.go b/pkg/model/k8s/stateful_set.go
new file mode 100644
index 000000000..cc8010b56
--- /dev/null
+++ b/pkg/model/k8s/stateful_set.go
@@ -0,0 +1,131 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package k8s
+
+import (
+ apps "k8s.io/api/apps/v1"
+ core "k8s.io/api/core/v1"
+)
+
+// StatefulSetContainerGet gets container from the StatefulSet either by name or by index
+func StatefulSetContainerGet(statefulSet *apps.StatefulSet, name string, index int) (*core.Container, bool) {
+ // Find by name
+ if len(name) > 0 {
+ for i := range statefulSet.Spec.Template.Spec.Containers {
+ // Convenience wrapper
+ container := &statefulSet.Spec.Template.Spec.Containers[i]
+ if container.Name == name {
+ return container, true
+ }
+ }
+ }
+
+ // Find by index
+ if index >= 0 {
+ if len(statefulSet.Spec.Template.Spec.Containers) > index {
+ return &statefulSet.Spec.Template.Spec.Containers[index], true
+ }
+ }
+
+ return nil, false
+}
+
+// IsStatefulSetGeneration returns whether StatefulSet has requested generation or not
+func IsStatefulSetGeneration(statefulSet *apps.StatefulSet, generation int64) bool {
+ if statefulSet == nil {
+ return false
+ }
+
+ // StatefulSet has .spec generation we are looking for
+ return (statefulSet.Generation == generation) &&
+ // and this .spec generation is being applied to replicas - it is observed right now
+ (statefulSet.Status.ObservedGeneration == statefulSet.Generation) &&
+ // and all replicas are of expected generation
+ (statefulSet.Status.CurrentReplicas == *statefulSet.Spec.Replicas) &&
+ // and all replicas are updated - meaning rolling update completed over all replicas
+ (statefulSet.Status.UpdatedReplicas == *statefulSet.Spec.Replicas) &&
+ // and current revision is an updated one - meaning rolling update completed over all replicas
+ (statefulSet.Status.CurrentRevision == statefulSet.Status.UpdateRevision)
+}
+
+// IsStatefulSetReady returns whether StatefulSet is ready
+func IsStatefulSetReady(statefulSet *apps.StatefulSet) bool {
+ if statefulSet == nil {
+ return false
+ }
+
+ if statefulSet.Spec.Replicas == nil {
+ return false
+ }
+ // All replicas are in "Ready" status - meaning ready to be used - no failure inside
+ return statefulSet.Status.ReadyReplicas == *statefulSet.Spec.Replicas
+}
+
+// IsStatefulSetNotReady returns whether StatefulSet is not ready
+func IsStatefulSetNotReady(statefulSet *apps.StatefulSet) bool {
+ if statefulSet == nil {
+ return false
+ }
+
+ return !IsStatefulSetReady(statefulSet)
+}
+
+func StatefulSetHasVolumeClaimTemplateByName(statefulSet *apps.StatefulSet, name string) bool {
+ // Check whether provided VolumeClaimTemplate name is already listed in statefulSet.Spec.VolumeClaimTemplates
+ for i := range statefulSet.Spec.VolumeClaimTemplates {
+ // Convenience wrapper
+ volumeClaimTemplate := &statefulSet.Spec.VolumeClaimTemplates[i]
+ if volumeClaimTemplate.Name == name {
+ // This VolumeClaimTemplate name is already listed in statefulSet.Spec.VolumeClaimTemplates
+ return true
+ }
+ }
+
+ return false
+}
+
+func StatefulSetHasVolumeByName(statefulSet *apps.StatefulSet, name string) bool {
+ for i := range statefulSet.Spec.Template.Spec.Volumes {
+ // Convenience wrapper
+ volume := &statefulSet.Spec.Template.Spec.Volumes[i]
+ if volume.Name == name {
+ // This Volume name is already listed in statefulSet.Spec.Template.Spec.Volumes
+ return true
+ }
+ }
+
+ return false
+}
+
+// StatefulSetAppendVolumes appends multiple Volume(s) to the specified StatefulSet
+func StatefulSetAppendVolumes(statefulSet *apps.StatefulSet, volumes ...core.Volume) {
+ statefulSet.Spec.Template.Spec.Volumes = append(
+ statefulSet.Spec.Template.Spec.Volumes,
+ volumes...,
+ )
+}
+
+func StatefulSetAppendVolumeMounts(statefulSet *apps.StatefulSet, volumeMounts ...core.VolumeMount) {
+ // And reference these Volumes in each Container via VolumeMount
+ // So Pod will have VolumeMounts mounted as Volumes
+ for i := range statefulSet.Spec.Template.Spec.Containers {
+ // Convenience wrapper
+ container := &statefulSet.Spec.Template.Spec.Containers[i]
+ ContainerAppendVolumeMounts(
+ container,
+ volumeMounts...,
+ )
+ }
+}
diff --git a/release b/release
index 9e40e75c5..40a6dfede 100644
--- a/release
+++ b/release
@@ -1 +1 @@
-0.23.3
+0.23.4
diff --git a/releases b/releases
index 33fcb6b04..e2b10d252 100644
--- a/releases
+++ b/releases
@@ -1,3 +1,4 @@
+0.23.3
0.23.2
0.23.1
0.23.0
diff --git a/tests/e2e/manifests/chi/test-009-operator-upgrade-1.yaml b/tests/e2e/manifests/chi/test-009-operator-upgrade-1.yaml
index 59c07d729..4b25313f4 100644
--- a/tests/e2e/manifests/chi/test-009-operator-upgrade-1.yaml
+++ b/tests/e2e/manifests/chi/test-009-operator-upgrade-1.yaml
@@ -5,6 +5,11 @@ metadata:
labels:
test: test
spec:
+ defaults:
+ templates:
+ dataVolumeClaimTemplate: default
+ podTemplate: default
+ serviceTemplate: chi-service-template
useTemplates:
- name: clickhouse-version
configuration:
@@ -15,11 +20,19 @@ spec:
users:
test_009/password: test_009
test_009/networks/ip: 0.0.0.0/0
- defaults:
- templates:
- dataVolumeClaimTemplate: default
- podTemplate: default
templates:
+ serviceTemplates:
+ - name: chi-service-template
+ generateName: "clickhouse-{chi}"
+ spec:
+ ports:
+ - name: http
+ port: 8123
+ targetPort: 8123
+ - name: client
+ port: 9000
+ targetPort: 9000
+ type: LoadBalancer
volumeClaimTemplates:
- name: default
spec:
diff --git a/tests/e2e/manifests/chi/test-009-operator-upgrade-2.yaml b/tests/e2e/manifests/chi/test-009-operator-upgrade-2.yaml
index 5371ed8f4..1aafadcf8 100644
--- a/tests/e2e/manifests/chi/test-009-operator-upgrade-2.yaml
+++ b/tests/e2e/manifests/chi/test-009-operator-upgrade-2.yaml
@@ -11,7 +11,6 @@ spec:
podTemplate: clickhouse-template
dataVolumeClaimTemplate: aws-ebs-volume-claim
serviceTemplate: chi-service-template
-
configuration:
settings:
logger/level: information
diff --git a/tests/e2e/manifests/chi/test-016-settings-06.yaml b/tests/e2e/manifests/chi/test-016-settings-06.yaml
new file mode 100644
index 000000000..e42be7739
--- /dev/null
+++ b/tests/e2e/manifests/chi/test-016-settings-06.yaml
@@ -0,0 +1,103 @@
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+
+metadata:
+ name: test-016-settings
+
+spec:
+ useTemplates:
+ - name: clickhouse-version
+ configuration:
+ clusters:
+ - name: default
+ layout:
+ shardsCount: 1
+ profiles:
+ default/max_memory_usage: 7000000000 # test for big value
+ users:
+ user2/profile: default
+ user2/quota: default
+ user2/password: qwerty
+ user2/networks/ip: "::/0"
+ settings:
+ macros/layer: "03"
+ query_log: _removed_
+ # should not cause a restart
+ dictionaries_config: config.d/dic*.xml
+ logger/level: information
+ max_server_memory_usage_to_ram_ratio: 0.8
+ max_table_size_to_drop: 1000000000
+ max_concurrent_queries: 200
+ models_config: /var/lib/clickhouse/user_data/models/*.xml
+ user_defined_executable_functions_config: /var/lib/clickhouse/user_data/udf/*.xml
+ files:
+ custom.xml: |
+
+
+ test-changed
+
+
+ config.d/custom4.xml: |
+
+
+ test-custom4
+
+
+ users.d/my_users.xml:
+
+
+
+
+ default
+ default
+
+
+
+ default
+ default
+
+
+
+ config.d/remote_servers.xml:
+
+
+
+
+ false
+
+ chi-test-016-settings-default-0-0
+ 9000
+
+
+
+
+
+ config.d/dict_three.xml: |
+
+
+ three
+
+ 60
+
+
+
+ dummy
+
+
+ three
+ dummy
+ UInt8
+ 0
+
+
+
+
diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py
index 60f6c78bc..2fa0af0a7 100644
--- a/tests/e2e/test_operator.py
+++ b/tests/e2e/test_operator.py
@@ -15,6 +15,7 @@
from testflows.asserts import error
from testflows.core import *
from e2e.steps import *
+from datetime import datetime
@TestScenario
@@ -212,6 +213,8 @@ def test_operator_upgrade(self, manifest, service, version_from, version_to=None
version_to = current().context.operator_version
with Given(f"clickhouse-operator from {version_from}"):
util.install_operator_version(version_from)
+ time.sleep(15)
+
chi = yaml_manifest.get_chi_name(util.get_full_path(manifest, True))
cluster = chi
@@ -565,7 +568,7 @@ def test_008_3(self):
@Name("test_009_1. Test operator upgrade")
@Requirements(RQ_SRS_026_ClickHouseOperator_Managing_UpgradingOperator("1.0"))
@Tags("NO_PARALLEL")
-def test_009_1(self, version_from="0.23.2", version_to=None):
+def test_009_1(self, version_from="0.23.3", version_to=None):
if version_to is None:
version_to = self.context.operator_version
@@ -587,7 +590,7 @@ def test_009_1(self, version_from="0.23.2", version_to=None):
@TestScenario
@Name("test_009_2. Test operator upgrade")
@Tags("NO_PARALLEL")
-def test_009_2(self, version_from="0.23.2", version_to=None):
+def test_009_2(self, version_from="0.23.3", version_to=None):
if version_to is None:
version_to = self.context.operator_version
@@ -1918,6 +1921,22 @@ def test_016(self):
)
assert out == "test-changed"
+ # test-016-settings-06.yaml
+ with When("Add I change a number of settings that does not requre a restart"):
+ start_time = kubectl.get_field("pod", f"chi-{chi}-default-0-0-0", ".status.startTime")
+ kubectl.create_and_check(
+ manifest="manifests/chi/test-016-settings-06.yaml",
+ check={
+ "do_not_delete": 1,
+ },
+ )
+
+ with And("ClickHouse SHOULD NOT BE restarted"):
+ new_start_time = kubectl.get_field("pod", f"chi-{chi}-default-0-0-0", ".status.startTime")
+ assert start_time == new_start_time
+
+
+
with Finally("I clean up"):
with By("deleting test namespace"):
delete_test_namespace()
@@ -3255,21 +3274,36 @@ def run_select_query(self, host, user, password, query, res1, res2, trigger_even
ok = 0
partial = 0
errors = 0
+ run = 0
+ partial_runs = []
+ error_runs = []
cmd = f'exec -n {self.context.test_namespace} {client_pod} -- clickhouse-client --user={user} --password={password} -h {host} -q "{query}"'
while not trigger_event.is_set():
+ run += 1
+ # Adjust time to glog's format
+ now = datetime.utcnow().strftime("%H:%M:%S.%f")
cnt_test = kubectl.launch(cmd, ok_to_fail=True, shell=shell)
if cnt_test == res1:
ok += 1
if cnt_test == res2:
partial += 1
+ partial_runs.append(run)
+ partial_runs.append(now)
if cnt_test != res1 and cnt_test != res2:
errors += 1
+ error_runs.append(run)
+ error_runs.append(now)
print("*** RUN_QUERY ERROR ***")
print(cnt_test)
time.sleep(0.5)
with By(
- f"{ok} queries have been executed with no errors, {partial} queries returned incomplete results. {errors} queries have failed"
+ f"{run} queries have been executed, of which: " +
+ f"{ok} queries have been executed with no errors, " +
+ f"{partial} queries returned incomplete results, " +
+ f"{errors} queries have failed. " +
+ f"incomplete results runs: {partial_runs} " +
+ f"error runs: {error_runs}"
):
assert errors == 0
if partial > 0:
diff --git a/tests/regression.py b/tests/regression.py
index 9bc8b6fed..d1a48ec72 100755
--- a/tests/regression.py
+++ b/tests/regression.py
@@ -7,6 +7,7 @@
xfails = {
# test_operator.py
+ #"/regression/e2e.test_operator/test_009_1*": [(Fail, "Test 009_1 sometimes fails due to unknown reasons")],
# "/regression/e2e.test_operator/test_028*": [(Fail, "In case of: 1) operator restarted on the different IP and 2) long time before operator received event, this test would fail due to RollingUpdate option")],
# "/regression/e2e.test_operator/test_032*": [(Fail, "Test 32 sometimes fails due to unknown reasons")],
# test_clickhouse.py