forked from Light-Heart-Labs/DreamServer
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
181 lines (139 loc) · 9.82 KB
/
.env.example
File metadata and controls
181 lines (139 loc) · 9.82 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
# Dream Server Configuration
# Copy this file to .env and edit values before starting:
# cp .env.example .env
#
# The installer (install-core.sh) generates .env automatically with
# secure random secrets. This file documents all available variables.
# ═══════════════════════════════════════════════════════════════════
# REQUIRED — these must be set or docker compose will refuse to start
# ═══════════════════════════════════════════════════════════════════
# Session signing for Open WebUI (generate: openssl rand -hex 32)
WEBUI_SECRET=CHANGEME
# n8n workflow automation credentials
N8N_USER=admin
N8N_PASS=CHANGEME
# LiteLLM API gateway key (generate: echo "sk-dream-$(openssl rand -hex 16)")
LITELLM_KEY=CHANGEME
# OpenClaw agent framework token (generate: openssl rand -hex 24)
OPENCLAW_TOKEN=CHANGEME
# OpenCode web UI password (generate: openssl rand -base64 16)
OPENCODE_SERVER_PASSWORD=CHANGEME
# ═══════════════════════════════════════════════════════════════════
# LLM Backend Mode
# ═══════════════════════════════════════════════════════════════════
# local = llama-server (default, requires GPU or CPU inference)
# cloud = LiteLLM -> cloud APIs (no local GPU needed)
# hybrid = local primary, cloud fallback
DREAM_MODE=local
# Inference backend engine: llama-server (NVIDIA/CPU), lemonade (AMD), litellm (cloud)
# AMD hardware auto-selects lemonade for NPU/Vulkan/ROCm acceleration
LLM_BACKEND=llama-server
# API base path: /v1 for llama-server, /api/v1 for Lemonade (set by installer)
LLM_API_BASE_PATH=/v1
LLM_API_URL=http://llama-server:8080
# ═══════════════════════════════════════════════════════════════════
# Cloud API Keys (only needed for cloud/hybrid modes)
# ═══════════════════════════════════════════════════════════════════
ANTHROPIC_API_KEY=
OPENAI_API_KEY=
TOGETHER_API_KEY=
# ═══════════════════════════════════════════════════════════════════
# LLM Settings (llama-server)
# ═══════════════════════════════════════════════════════════════════
# Model GGUF filename (must exist in data/models/)
GGUF_FILE=Qwen3-8B-Q4_K_M.gguf
# Context window size (tokens)
CTX_SIZE=16384
# GPU backend: nvidia or amd
GPU_BACKEND=nvidia
# Model name (used by OpenClaw and dashboard)
LLM_MODEL=qwen3-8b
# llama-server inference tuning (advanced)
# LLAMA_BATCH_SIZE=2048 # Batch size for prompt processing (higher = faster prefill)
# LLAMA_THREADS=4 # CPU threads for non-GPU work
# LLAMA_PARALLEL=1 # Concurrent request slots (increase for multi-user)
# ═══════════════════════════════════════════════════════════════════
# Ports — all overridable, defaults shown
# ═══════════════════════════════════════════════════════════════════
OLLAMA_PORT=11434 # llama-server API (external → internal 8080)
WEBUI_PORT=3000 # Open WebUI (external → internal 8080)
# SEARXNG_PORT=8888 # SearXNG metasearch (external → internal 8080)
# PERPLEXICA_PORT=3004 # Perplexica deep research (external → internal 3000)
# WHISPER_PORT=9000 # Whisper STT (external → internal 8000)
# TTS_PORT=8880 # Kokoro TTS (external → internal 8880)
# N8N_PORT=5678 # n8n workflows (external → internal 5678)
# QDRANT_PORT=6333 # Qdrant vector DB (external → internal 6333)
# QDRANT_GRPC_PORT=6334 # Qdrant gRPC (external → internal 6334)
# EMBEDDINGS_PORT=8090 # Text embeddings (external → internal 80)
# LITELLM_PORT=4000 # LiteLLM gateway (external → internal 4000)
# OPENCLAW_PORT=7860 # OpenClaw agent (external → internal 18789)
# SHIELD_PORT=8085 # Privacy Shield (external → internal 8085)
# DASHBOARD_API_PORT=3002 # Dashboard API (external → internal 3002)
# DASHBOARD_PORT=3001 # Dashboard UI (external → internal 3001)
# COMFYUI_PORT=8188 # ComfyUI image gen (external → internal 8188)
# TOKEN_SPY_PORT=3005 # Token Spy usage monitor (external → internal 8080)
# OPENCODE_PORT=3003 # OpenCode IDE web UI (host service)
# ═══════════════════════════════════════════════════════════════════
# Optional Security
# ═══════════════════════════════════════════════════════════════════
# Dashboard API key (generate: openssl rand -hex 32)
# DASHBOARD_API_KEY=
# Qdrant API key (generate: openssl rand -hex 32)
# QDRANT_API_KEY=
# Open WebUI authentication (true/false)
# WEBUI_AUTH=true
# ═══════════════════════════════════════════════════════════════════
# Langfuse (LLM Observability) — optional, disabled by default
# ═══════════════════════════════════════════════════════════════════
LANGFUSE_PORT=3006
LANGFUSE_ENABLED=false
# Remaining LANGFUSE_* secrets are auto-generated during install
# ═══════════════════════════════════════════════════════════════════
# Multi-GPU Settings (auto-populated by installer for NVIDIA multi-GPU)
# ═══════════════════════════════════════════════════════════════════
# GPU_ASSIGNMENT_JSON_B64= # Base64-encoded GPU assignment JSON
# LLAMA_SERVER_GPU_UUIDS= # GPU UUIDs for llama-server (comma-separated)
# LLAMA_ARG_SPLIT_MODE=none # none | layer (pipeline) | row (tensor/hybrid)
# LLAMA_ARG_TENSOR_SPLIT= # Proportional VRAM weights (e.g. 3,1)
# COMFYUI_GPU_UUID= # GPU UUID for ComfyUI
# WHISPER_GPU_UUID= # GPU UUID for Whisper
# EMBEDDINGS_GPU_UUID= # GPU UUID for embeddings
# LLM_MODEL_SIZE_MB= # Approximate model size in MB
# ═══════════════════════════════════════════════════════════════════
# Optional — Voice, Web UI, n8n
# ═══════════════════════════════════════════════════════════════════
# Whisper model (tiny, base, small, medium, large-v3-turbo)
# WHISPER_MODEL=base
# System timezone (used by Open WebUI and n8n)
# TIMEZONE=UTC
# n8n settings
# N8N_AUTH=true # Enable n8n basic auth
# N8N_HOST=localhost # n8n hostname
# N8N_WEBHOOK_URL=http://localhost:5678 # n8n webhook URL (for external access)
# Embedding model for RAG
# EMBEDDING_MODEL=BAAI/bge-base-en-v1.5
# ═══════════════════════════════════════════════════════════════════
# AMD-specific (only needed with GPU_BACKEND=amd)
# ═══════════════════════════════════════════════════════════════════
# VIDEO_GID=44 # `getent group video | cut -d: -f3`
# RENDER_GID=992 # `getent group render | cut -d: -f3`
# ═══════════════════════════════════════════════════════════════════
# Advanced
# ═══════════════════════════════════════════════════════════════════
# Container user/group IDs
# UID=1000
# GID=1000
# Privacy Shield settings
# PII_CACHE_ENABLED=true
# PII_CACHE_SIZE=1000
# PII_CACHE_TTL=300
# LOG_LEVEL=info
# OpenClaw bootstrap model (small model for instant startup)
# BOOTSTRAP_MODEL=qwen3:8b-q4_K_M
# Dashboard API internal URLs (usually Docker-internal, not user-facing)
# KOKORO_URL=http://tts:8880
# N8N_URL=http://n8n:5678
# llama-server memory limit (Docker)
# LLAMA_SERVER_MEMORY_LIMIT=64G
# Image generation (ComfyUI + SDXL Lightning)
# ENABLE_IMAGE_GENERATION=true