forked from elastic/helm-charts
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvalues.yaml
executable file
·105 lines (83 loc) · 2.61 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
---
elasticsearchURL: "" # "http://elasticsearch-master:9200"
elasticsearchHosts: "http://elasticsearch-master:9200"
replicas: 1
# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
extraEnvs: []
# - name: MY_ENVIRONMENT_VAR
# value: the_value_goes_here
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts: []
# - name: elastic-certificates
# secretName: elastic-certificates
# path: /usr/share/elasticsearch/config/certs
image: "docker.elastic.co/kibana/kibana"
imageTag: "7.1.0"
imagePullPolicy: "IfNotPresent"
resources:
requests:
cpu: "100m"
memory: "500m"
limits:
cpu: "1000m"
memory: "1Gi"
protocol: http
healthCheckPath: "/app/kibana"
# Allows you to add any config files in /usr/share/kibana/config/
# such as kibana.yml
kibanaConfig: {}
# kibana.yml: |
# key:
# nestedkey: value
# If Pod Security Policy in use it may be required to specify security context as well as service account
podSecurityContext: {}
#runAsUser: "place the user id here"
#fsGroup: "place the group id here"
serviceAccount: ""
# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
antiAffinityTopologyKey: "kubernetes.io/hostname"
# Hard means that by default pods will only be scheduled if there are enough nodes for them
# and that they will never end up on the same node. Setting this to soft will do this "best effort"
antiAffinity: "hard"
httpPort: 5601
# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1
updateStrategy:
type: "Recreate"
service:
type: ClusterIP
port: 5601
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
imagePullSecrets: []
nodeSelector: {}
tolerations: []
affinity: {}
nameOverride: ""
fullnameOverride: ""