Skip to content

Commit 26e0d62

Browse files
committed
important fix for qryn and uptrace clickhouse storage
1 parent 5f80050 commit 26e0d62

File tree

3 files changed

+50
-28
lines changed

3 files changed

+50
-28
lines changed

example/main.tf

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ module "vpc" {
123123

124124
module "eks" {
125125
# source = "../"
126-
source = "github.com/tldr-devops/aws-eks-terraform?ref=1.0"
126+
source = "github.com/tldr-devops/aws-eks-terraform?ref=1.1"
127127

128128
cluster_name = local.cluster_name
129129
cluster_version = local.cluster_version

modules/qryn/main.tf

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,8 @@ locals {
3939
service_account = "qryn-clickhouse"
4040

4141
# https://github.com/bitnami/charts/blob/main/bitnami/clickhouse/values.yaml
42+
# https://uptrace.dev/get/config.html#s3-storage
43+
# https://altinity.com/blog/clickhouse-mergetree-on-s3-administrative-best-practices
4244
clickhouse_values = [
4345
<<-EOT
4446
shards: 1
@@ -48,7 +50,7 @@ locals {
4850
existingSecret: "qryn-clickhouse-password"
4951
existingSecretKey: "password"
5052
persistence:
51-
size: 5Gi
53+
size: 10Gi
5254
automountServiceAccountToken: true
5355
serviceAccount:
5456
create: true
@@ -73,18 +75,26 @@ locals {
7375
<cache_enabled>true</cache_enabled>
7476
<data_cache_enabled>true</data_cache_enabled>
7577
<enable_filesystem_cache>true</enable_filesystem_cache>
76-
<cache_on_write_operations>true</cache_on_write_operations>
77-
<max_cache_size>4Gi</max_cache_size>
78+
<cache_on_write_operations>false</cache_on_write_operations>
79+
<max_cache_size>2Gi</max_cache_size>
7880
<cache_path>/bitnami/clickhouse/data/disks/s3_default/cache/</cache_path>
7981
</disk_s3>
8082
</disks>
8183
<policies>
8284
<policy_s3_only>
85+
<!-- items with equal priorities are ordered by their position in config -->
8386
<volumes>
84-
<volume_s3>
87+
<hot>
88+
<disk>default</disk>
89+
</hot>
90+
<cold>
8591
<disk>disk_s3</disk>
86-
</volume_s3>
92+
<prefer_not_to_merge>true</prefer_not_to_merge>
93+
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
94+
</cold>
8795
</volumes>
96+
<!-- move data to s3 when disk usage will be more than 90% -->
97+
<move_factor>0.2</move_factor>
8898
</policy_s3_only>
8999
</policies>
90100
</storage_configuration>

modules/uptrace/main.tf

Lines changed: 34 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,8 @@ locals {
146146
service_account = "uptrace-clickhouse"
147147

148148
# https://github.com/bitnami/charts/blob/main/bitnami/clickhouse/values.yaml
149+
# https://uptrace.dev/get/config.html#s3-storage
150+
# https://altinity.com/blog/clickhouse-mergetree-on-s3-administrative-best-practices
149151
clickhouse_values = [
150152
<<-EOT
151153
shards: 1
@@ -155,7 +157,7 @@ locals {
155157
existingSecret: "uptrace-clickhouse-password"
156158
existingSecretKey: "password"
157159
persistence:
158-
size: 5Gi
160+
size: 10Gi
159161
automountServiceAccountToken: true
160162
serviceAccount:
161163
create: true
@@ -181,7 +183,7 @@ locals {
181183
<cache_enabled>true</cache_enabled>
182184
<data_cache_enabled>true</data_cache_enabled>
183185
<enable_filesystem_cache>true</enable_filesystem_cache>
184-
<cache_on_write_operations>true</cache_on_write_operations>
186+
<cache_on_write_operations>false</cache_on_write_operations>
185187
<max_cache_size>1Gi</max_cache_size>
186188
<cache_path>/bitnami/clickhouse/data/disks/s3_default/cache/</cache_path>
187189
</s3_default>
@@ -194,7 +196,7 @@ locals {
194196
<cache_enabled>true</cache_enabled>
195197
<data_cache_enabled>true</data_cache_enabled>
196198
<enable_filesystem_cache>true</enable_filesystem_cache>
197-
<cache_on_write_operations>true</cache_on_write_operations>
199+
<cache_on_write_operations>false</cache_on_write_operations>
198200
<max_cache_size>1Gi</max_cache_size>
199201
<cache_path>/bitnami/clickhouse/data/disks/s3_metrics/cache/</cache_path>
200202
</s3_metrics>
@@ -207,47 +209,57 @@ locals {
207209
<cache_enabled>true</cache_enabled>
208210
<data_cache_enabled>true</data_cache_enabled>
209211
<enable_filesystem_cache>true</enable_filesystem_cache>
210-
<cache_on_write_operations>true</cache_on_write_operations>
212+
<cache_on_write_operations>false</cache_on_write_operations>
211213
<max_cache_size>1Gi</max_cache_size>
212214
<cache_path>/bitnami/clickhouse/data/disks/s3_spans/cache/</cache_path>
213215
</s3_spans>
214216
</disks>
215217
<policies>
216218
<default>
219+
<!-- items with equal priorities are ordered by their position in config -->
217220
<volumes>
218-
<volume_s3_default>
221+
<hot>
222+
<disk>default</disk>
223+
</hot>
224+
<cold>
219225
<disk>s3_default</disk>
220-
</volume_s3_default>
226+
<prefer_not_to_merge>true</prefer_not_to_merge>
227+
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
228+
</cold>
221229
</volumes>
230+
<!-- move data to s3 when disk usage will be more than 90% -->
231+
<move_factor>0.2</move_factor>
222232
</default>
223233
<s3_metrics>
234+
<!-- items with equal priorities are ordered by their position in config -->
224235
<volumes>
225-
<volume_s3_metrics>
236+
<hot>
237+
<disk>default</disk>
238+
</hot>
239+
<cold>
226240
<disk>s3_metrics</disk>
227-
</volume_s3_metrics>
241+
<prefer_not_to_merge>true</prefer_not_to_merge>
242+
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
243+
</cold>
228244
</volumes>
245+
<!-- move data to s3 when disk usage will be more than 90% -->
246+
<move_factor>0.2</move_factor>
229247
</s3_metrics>
230248
<s3_spans>
231-
<volumes>
232-
<volume_s3_spans>
233-
<disk>s3_spans</disk>
234-
</volume_s3_spans>
235-
</volumes>
236-
</s3_spans>
237-
<duo>
238249
<!-- items with equal priorities are ordered by their position in config -->
239250
<volumes>
240251
<hot>
241252
<disk>default</disk>
242253
</hot>
243254
<cold>
244-
<disk>s3_default</disk>
255+
<disk>s3_spans</disk>
245256
<prefer_not_to_merge>true</prefer_not_to_merge>
257+
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
246258
</cold>
247259
</volumes>
248260
<!-- move data to s3 when disk usage will be more than 90% -->
249-
<move_factor>0.1</move_factor>
250-
</duo>
261+
<move_factor>0.2</move_factor>
262+
</s3_spans>
251263
</policies>
252264
</storage_configuration>
253265
</clickhouse>
@@ -284,14 +296,14 @@ locals {
284296
logLinePrefix: ""
285297
logTimezone: ""
286298
postgresqlDataDir: /bitnami/postgresql/data
287-
persistence:
288-
enabled: true
289-
mountPath: /bitnami/postgresql
290-
size: 1Gi
291299
readReplicas:
292300
replicaCount: 0
293301
primary:
294302
resourcesPreset: "none"
303+
persistence:
304+
enabled: true
305+
mountPath: /bitnami/postgresql
306+
size: 512Mi
295307
EOT
296308
]
297309
}

0 commit comments

Comments
 (0)