forked from vercel-labs/workflow-builder-template
-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
656 lines (624 loc) · 21.1 KB
/
docker-compose.yml
File metadata and controls
656 lines (624 loc) · 21.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
# =============================================================================
# KeeperHub Docker Compose
#
# Profiles:
# dev - Full development stack (no K8s Jobs, dispatcher runs as cron)
# minikube - Hybrid mode (services in Docker, executor in Minikube)
# prod - Production-ready containers
# test - Isolated E2E test stack (separate DB, ports, volumes)
# profile-workflows - Workflow profiling tools (js-sandbox for WASM fuel calibration)
#
# Usage:
# docker compose --profile dev up -d # Development (no K8s)
# docker compose --profile minikube up -d # Hybrid mode
# docker compose --profile test up -d # E2E test environment
# docker compose --profile profile-workflows up -d # Profiling tools
# make hybrid-setup # Full hybrid setup
# =============================================================================
# -----------------------------------------------------------------------------
# Primitives (from .env or defaults)
# -----------------------------------------------------------------------------
x-primitives:
db_name: &db_name "${POSTGRES_DB:-keeperhub}"
db_user: &db_user "${POSTGRES_USER:-postgres}"
db_password: &db_password "${POSTGRES_PASSWORD:-postgres}"
aws_region: &aws_region "us-east-1"
aws_access_key: &aws_access_key "test"
aws_secret_key: &aws_secret_key "test"
# -----------------------------------------------------------------------------
# Environment Groups
# -----------------------------------------------------------------------------
x-env-db: &env_db
POSTGRES_USER: *db_user
POSTGRES_PASSWORD: *db_password
POSTGRES_DB: *db_name
x-env-db-connection: &env_db_connection
DATABASE_URL: postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-keeperhub}
x-env-aws: &env_aws
AWS_REGION: *aws_region
AWS_ACCESS_KEY_ID: *aws_access_key
AWS_SECRET_ACCESS_KEY: *aws_secret_key
# Use host.minikube.internal so URLs work from both host and Minikube
AWS_ENDPOINT_URL: http://host.minikube.internal:4566
x-env-sqs: &env_sqs
SQS_QUEUE_URL: http://host.minikube.internal:4566/000000000000/keeperhub-workflow-queue
x-env-service-keys: &env_service_keys
SCHEDULER_SERVICE_API_KEY: local-scheduler-key-for-dev
MCP_SERVICE_API_KEY: local-mcp-key-for-dev
EVENTS_SERVICE_API_KEY: local-events-key-for-dev
# -----------------------------------------------------------------------------
# Services
# -----------------------------------------------------------------------------
services:
# ===========================================================================
# Database
# ===========================================================================
db:
image: postgres:16-alpine
# container_name removed to allow worktrees to share or run independently
hostname: db
restart: unless-stopped
environment:
<<: *env_db
ports:
- "5433:5432" # External 5433 to avoid host port conflicts, internal 5432
volumes:
- keeperhub_db_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres}"]
interval: 5s
timeout: 5s
retries: 5
profiles:
- dev
- minikube
- prod
- profile-workflows
# ===========================================================================
# LocalStack (SQS)
# ===========================================================================
localstack:
image: localstack/localstack:latest
# container_name removed to allow worktrees to share or run independently
hostname: localstack
ports:
- "4566:4566"
environment:
- SERVICES=sqs
- DEBUG=0
- SQS_ENDPOINT_STRATEGY=path
# Use host.minikube.internal so queue URLs work from both host and Minikube
- LOCALSTACK_HOST=host.minikube.internal:4566
- LOCALSTACK_AUTH_TOKEN=${LOCALSTACK_AUTH_TOKEN:-}
volumes:
- ./deploy/local/hybrid/init-localstack.sh:/etc/localstack/init/ready.d/init-aws.sh:ro
- keeperhub_localstack_data:/var/lib/localstack
healthcheck:
test: ["CMD-SHELL", "awslocal sqs list-queues || exit 1"]
interval: 10s
timeout: 5s
retries: 5
start_period: 15s
profiles:
- dev
- minikube
# ===========================================================================
# KeeperHub App (Next.js)
# ===========================================================================
app:
build:
context: .
dockerfile: Dockerfile
target: runner
image: keeperhub:latest
container_name: keeperhub-app
hostname: keeperhub
restart: unless-stopped
ports:
- "3000:3000"
environment:
<<: [*env_db_connection, *env_aws, *env_sqs]
NODE_ENV: production
KEEPERHUB_API_KEY: ${KEEPERHUB_API_KEY:-}
depends_on:
db:
condition: service_healthy
profiles:
- prod
# Development app with hot reload
app-dev:
image: node:22-alpine
container_name: keeperhub-app-dev
hostname: keeperhub
working_dir: /app
ports:
- "3000:3000"
volumes:
- .:/app
- keeperhub_node_modules:/app/node_modules
env_file:
- .env
environment:
<<: [*env_db_connection, *env_aws, *env_sqs, *env_service_keys]
HOSTNAME: "0.0.0.0"
KEEPERHUB_API_KEY: ${KEEPERHUB_API_KEY:-some-random-api-key}
NODE_OPTIONS: "--max-old-space-size=3072"
METRICS_COLLECTOR: prometheus
deploy:
resources:
limits:
memory: 4g
cpus: "2.0"
depends_on:
db:
condition: service_healthy
localstack:
condition: service_healthy
command: sh -c "npm install -g pnpm && CI=true pnpm install && pnpm dev --hostname 0.0.0.0"
profiles:
- dev
- minikube
# ===========================================================================
# Sandbox
# Standalone sandbox HTTP service for the Code workflow node. Main app
# dispatches via SANDBOX_BACKEND=remote + SANDBOX_URL=http://localhost:8787.
# Default local dev keeps SANDBOX_BACKEND unset (=local) so the in-pod
# child_process path runs without this container.
# ===========================================================================
sandbox:
build:
context: .
dockerfile: sandbox/Dockerfile
image: keeperhub-sandbox:latest
container_name: keeperhub-sandbox
hostname: sandbox
restart: unless-stopped
ports:
- "8787:8787"
environment:
NODE_ENV: production
SANDBOX_PORT: "8787"
healthcheck:
test: ["CMD-SHELL", "wget -q --spider http://localhost:8787/healthz || exit 1"]
interval: 10s
timeout: 5s
retries: 3
profiles:
- dev
- minikube
- prod
# ===========================================================================
# Schedule Dispatcher
# Runs every minute to check for due schedules and send messages to SQS
# ===========================================================================
dispatcher:
build:
context: .
dockerfile: Dockerfile
target: schedule-dispatcher
image: keeperhub-dispatcher:latest
container_name: keeperhub-dispatcher
restart: unless-stopped
working_dir: /app
environment:
<<: *env_db_connection
AWS_REGION: us-east-1
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT_URL: http://localstack:4566
SQS_QUEUE_URL: http://localstack:4566/000000000000/keeperhub-workflow-queue
KEEPERHUB_API_URL: http://app-dev:3000
KEEPERHUB_API_KEY: ${KEEPERHUB_API_KEY:-local-scheduler-key-for-dev}
depends_on:
db:
condition: service_healthy
localstack:
condition: service_healthy
app-dev:
condition: service_started
# Run dispatcher every minute using a simple loop
# (Docker doesn't have native cron, so we simulate it)
command: |
sh -c 'while true; do
echo "[Dispatcher] Running at $$(date)"
tsx schedule-dispatcher/index.ts
echo "[Dispatcher] Sleeping 60s..."
sleep 60
done'
profiles:
- dev
- minikube
# ===========================================================================
# Unified Executor
# Polls SQS for all trigger types and executes workflows
# (in-process for web2/read-only, K8s Jobs for web3 writes)
# ===========================================================================
executor:
build:
context: .
dockerfile: Dockerfile
target: executor
image: keeperhub-executor:latest
container_name: keeperhub-executor
restart: unless-stopped
working_dir: /app
environment:
<<: *env_db_connection
AWS_REGION: us-east-1
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT_URL: http://localstack:4566
SQS_QUEUE_URL: http://localstack:4566/000000000000/keeperhub-workflow-queue
INTEGRATION_ENCRYPTION_KEY: ${INTEGRATION_ENCRYPTION_KEY:-0000000000000000000000000000000000000000000000000000000000000000}
HEALTH_PORT: "3080"
depends_on:
db:
condition: service_healthy
localstack:
condition: service_healthy
app-dev:
condition: service_started
profiles:
- dev
# ===========================================================================
# Reaper (stale execution cleanup)
# Runs every 10 minutes to mark stuck workflow executions as timed out
# ===========================================================================
reaper:
image: curlimages/curl:latest
container_name: keeperhub-reaper
restart: unless-stopped
environment:
SCHEDULER_SERVICE_API_KEY: ${KEEPERHUB_API_KEY:-local-scheduler-key-for-dev}
depends_on:
app-dev:
condition: service_started
command: |
sh -c 'while true; do
echo "[Reaper] Running at $$(date)"
curl -sS -f -H "X-Service-Key: $$SCHEDULER_SERVICE_API_KEY" \
http://app-dev:3000/api/internal/reaper || echo "[Reaper] Failed"
echo ""
echo "[Reaper] Sleeping 600s..."
sleep 600
done'
profiles:
- dev
- minikube
# ===========================================================================
# Event Tracker (keeperhub-events)
# Monitors blockchain events and routes to SQS for unified executor
# ===========================================================================
event-tracker:
build:
context: ./keeperhub-events
dockerfile: event-tracker/Dockerfile
container_name: keeperhub-event-tracker
restart: unless-stopped
environment:
WATCH_MODE: "true"
KEEPERHUB_API_URL: http://keeperhub:3000
KEEPERHUB_API_KEY: ${KEEPERHUB_API_KEY:-local-events-key-for-dev}
REDIS_HOST: redis
REDIS_PORT: 6379
JWT_TOKEN_USERNAME: ${JWT_TOKEN_USERNAME:-}
JWT_TOKEN_PASSWORD: ${JWT_TOKEN_PASSWORD:-}
ETHERSCAN_API_KEY: ${ETHERSCAN_API_KEY:-}
SQS_QUEUE_URL: http://localstack:4566/000000000000/keeperhub-workflow-queue
AWS_REGION: us-east-1
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT_URL: http://localstack:4566
NODE_ENV: development
depends_on:
redis:
condition: service_started
localstack:
condition: service_healthy
app-dev:
condition: service_started
profiles:
- dev
- minikube
# ===========================================================================
# Block Dispatcher (keeperhub-scheduler)
# Monitors blockchain blocks and routes matching workflows to SQS
# ===========================================================================
block-dispatcher:
build:
context: .
dockerfile: Dockerfile
target: block-dispatcher
image: keeperhub-block:latest
container_name: keeperhub-block
restart: unless-stopped
working_dir: /app
environment:
KEEPERHUB_API_URL: http://app-dev:3000
KEEPERHUB_API_KEY: ${KEEPERHUB_API_KEY:-local-scheduler-key-for-dev}
AWS_REGION: us-east-1
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT_URL: http://localstack:4566
SQS_QUEUE_URL: http://localstack:4566/000000000000/keeperhub-workflow-queue
RECONCILE_INTERVAL_MS: "30000"
HEALTH_PORT: "3000"
depends_on:
localstack:
condition: service_healthy
app-dev:
condition: service_started
profiles:
- dev
- minikube
# ===========================================================================
# Database Migrator (one-shot)
# ===========================================================================
migrator:
build:
context: .
dockerfile: Dockerfile
target: migrator
image: keeperhub-migrator:latest
container_name: keeperhub-migrator
environment:
<<: *env_db_connection
depends_on:
db:
condition: service_healthy
command: pnpm db:migrate
profiles:
- migrator
# ===========================================================================
# Workflow Profiling Tools
# ===========================================================================
# JS Sandbox for WASM fuel calibration
# Uses Spidermonkey compiled to WASM for deterministic instruction counting
# See: scripts/runtime/workflow_runtime_analysis/calibrate-wasm-fuel.ts
js-sandbox:
image: forbeslindesay/secure-js-sandbox:latest
container_name: keeperhub-js-sandbox
hostname: js-sandbox
ports:
- "3001:3000" # External 3001 to avoid conflict with app on 3000
healthcheck:
test: ["CMD-SHELL", "wget -q --spider http://localhost:3000/health || exit 1"]
interval: 10s
timeout: 5s
retries: 3
profiles:
- profile-workflows
# ===========================================================================
# Redis (for caching)
# ===========================================================================
redis:
image: valkey/valkey:latest
container_name: keeperhub-redis
hostname: redis
ports:
- 6379:6379
restart: unless-stopped
profiles:
- dev
- minikube
# ===========================================================================
# E2E Test Services
# Isolated test stack with separate ports, DB, and volumes
# Usage: docker compose --profile test up -d
# ===========================================================================
test-db:
image: postgres:16-alpine
container_name: keeperhub-test-db
environment:
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
POSTGRES_DB: ${POSTGRES_DB:-workflow_builder_test}
ports:
- "5434:5432"
volumes:
- keeperhub_test_db_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres}"]
interval: 5s
timeout: 5s
retries: 5
profiles:
- test
test-localstack:
image: localstack/localstack:latest
container_name: keeperhub-test-localstack
ports:
- "4567:4566"
environment:
- SERVICES=sqs
- DEBUG=0
volumes:
- keeperhub_test_localstack_data:/var/lib/localstack
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:4566/_localstack/health"]
interval: 10s
timeout: 5s
retries: 5
profiles:
- test
test-localstack-init:
image: amazon/aws-cli:latest
container_name: keeperhub-test-localstack-init
depends_on:
test-localstack:
condition: service_healthy
environment:
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_DEFAULT_REGION: us-east-1
entrypoint: /bin/sh
command:
- -c
- |
aws --endpoint-url=http://test-localstack:4566 sqs create-queue \
--queue-name keeperhub-workflow-queue
echo "SQS queue created"
profiles:
- test
test-anvil:
image: ghcr.io/foundry-rs/foundry:latest
container_name: keeperhub-test-anvil
ports:
- "8546:8545"
entrypoint: anvil
command:
- --host
- 0.0.0.0
- --block-time
- "1"
- --chain-id
- "31337"
healthcheck:
test: ["CMD-SHELL", "cast block-number --rpc-url http://localhost:8545 > /dev/null 2>&1 || exit 1"]
interval: 5s
timeout: 3s
retries: 10
profiles:
- test
test-redis:
image: valkey/valkey:latest
container_name: keeperhub-test-redis
ports:
- "6380:6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 3s
retries: 5
profiles:
- test
test-app:
image: node:22-alpine
container_name: keeperhub-test-app
working_dir: /app
ports:
- "3001:3000"
volumes:
- .:/app
- keeperhub_test_node_modules:/app/node_modules
environment:
<<: *env_service_keys
DATABASE_URL: postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@test-db:5432/${POSTGRES_DB:-workflow_builder_test}
AWS_ENDPOINT_URL: http://test-localstack:4566
AWS_REGION: us-east-1
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
SQS_QUEUE_URL: http://test-localstack:4566/000000000000/keeperhub-workflow-queue
KEEPERHUB_URL: http://localhost:3000
HOSTNAME: "0.0.0.0"
depends_on:
test-db:
condition: service_healthy
test-localstack-init:
condition: service_completed_successfully
command: sh -c "npm install -g pnpm && pnpm install && pnpm db:push && pnpm dev --hostname 0.0.0.0"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
interval: 10s
timeout: 5s
retries: 10
start_period: 60s
profiles:
- test
test-dispatcher:
image: node:22-alpine
container_name: keeperhub-test-dispatcher
working_dir: /app
volumes:
- .:/app
- keeperhub_test_node_modules:/app/node_modules
environment:
KEEPERHUB_API_URL: http://test-app:3000
KEEPERHUB_API_KEY: ${KEEPERHUB_API_KEY:-local-scheduler-key-for-dev}
AWS_ENDPOINT_URL: http://test-localstack:4566
AWS_REGION: us-east-1
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
SQS_QUEUE_URL: http://test-localstack:4566/000000000000/keeperhub-workflow-queue
HEALTH_PORT: "3060"
depends_on:
test-app:
condition: service_healthy
command: sh -c "npm install -g pnpm && npx tsx keeperhub-scheduler/schedule-dispatcher/index.ts"
profiles:
- test
test-executor:
image: node:22-alpine
container_name: keeperhub-test-executor
working_dir: /app
volumes:
- .:/app
- keeperhub_test_node_modules:/app/node_modules
environment:
DATABASE_URL: postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@test-db:5432/${POSTGRES_DB:-workflow_builder_test}
AWS_ENDPOINT_URL: http://test-localstack:4566
AWS_REGION: us-east-1
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
SQS_QUEUE_URL: http://test-localstack:4566/000000000000/keeperhub-workflow-queue
KEEPERHUB_URL: http://test-app:3000
depends_on:
test-app:
condition: service_healthy
command: sh -c "npm install -g pnpm && npx tsx keeperhub-executor/index.ts"
profiles:
- test
test-runner:
image: node:22-alpine
container_name: keeperhub-test-runner
working_dir: /app
volumes:
- .:/app
- keeperhub_test_node_modules:/app/node_modules
environment:
DATABASE_URL: postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@test-db:5432/${POSTGRES_DB:-workflow_builder_test}
AWS_ENDPOINT_URL: http://test-localstack:4566
AWS_REGION: us-east-1
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
SQS_QUEUE_URL: http://test-localstack:4566/000000000000/keeperhub-workflow-queue
KEEPERHUB_URL: http://test-app:3000
FULL_E2E: "true"
depends_on:
test-app:
condition: service_healthy
test-dispatcher:
condition: service_started
test-executor:
condition: service_started
command: sh -c "npm install -g pnpm && pnpm install && pnpm test:e2e:schedule:full"
profiles:
- test
# =============================================================================
# Volumes
# External volumes allow worktrees to share the same Docker infrastructure
# =============================================================================
volumes:
keeperhub_db_data:
name: keeperhub_db_data
external: true
keeperhub_node_modules:
name: keeperhub_node_modules
external: true
keeperhub_localstack_data:
name: keeperhub_localstack_data
external: true
keeperhub_test_db_data:
name: keeperhub_test_db_data
keeperhub_test_localstack_data:
name: keeperhub_test_localstack_data
keeperhub_test_node_modules:
name: keeperhub_test_node_modules
# =============================================================================
# Networks
# External network allows worktrees to share the same Docker network
# =============================================================================
networks:
default:
name: keeperhub-network
external: true