Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
145 changes: 145 additions & 0 deletions dream-server/config/openclaw/inject-token.js
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ const path = require('path');
const token = process.env.OPENCLAW_GATEWAY_TOKEN || '';
const EXTERNAL_PORT = process.env.OPENCLAW_EXTERNAL_PORT || '7860';
const LLM_MODEL = process.env.LLM_MODEL || '';
const OPENCLAW_LLM_URL = process.env.OPENCLAW_LLM_URL || '';
const CONFIG_PATH = path.join(process.env.HOME || '/home/node', '.openclaw', 'openclaw.json');
const HTML_PATH = '/app/dist/control-ui/index.html';
const JS_PATH = '/app/dist/control-ui/auto-token.js';
Expand Down Expand Up @@ -93,8 +94,36 @@ try {
}
}

// Override LLM baseUrl for Token Spy monitoring (if OPENCLAW_LLM_URL is set)
const providers = config.models?.providers || config.providers || {};
if (OPENCLAW_LLM_URL && Object.keys(providers).length > 0) {
for (const [name, provider] of Object.entries(providers)) {
if (provider.baseUrl) {
const oldUrl = provider.baseUrl;
provider.baseUrl = OPENCLAW_LLM_URL;
console.log(`[inject-token] monitoring: provider ${name} baseUrl: ${oldUrl} -> ${OPENCLAW_LLM_URL}`);
}
}
}

// Enable OpenAI-compatible HTTP API (opt-in via OPENCLAW_HTTP_API=true)
if (process.env.OPENCLAW_HTTP_API === 'true') {
if (!config.gateway.http) config.gateway.http = {};
if (!config.gateway.http.endpoints) config.gateway.http.endpoints = {};
config.gateway.http.endpoints.chatCompletions = { enabled: true };
console.log('[inject-token] enabled HTTP /v1/chat/completions endpoint');
}

fs.writeFileSync(CONFIG_PATH, JSON.stringify(config, null, 2), 'utf8');
console.log('[inject-token] patched runtime config:', CONFIG_PATH);

// Log the browser-accessible URL with token for Docker users
if (token) {
console.log(`[inject-token] ┌─────────────────────────────────────────────┐`);
console.log(`[inject-token] │ OpenClaw Control UI: │`);
console.log(`[inject-token] │ http://localhost:${EXTERNAL_PORT}/#token=${token}`);
console.log(`[inject-token] └─────────────────────────────────────────────┘`);
}
} catch (err) {
console.error('[inject-token] config patch warning:', err.message);
}
Expand Down Expand Up @@ -133,3 +162,119 @@ if (token && fs.existsSync(HTML_PATH)) {
if (!token) console.warn('[inject-token] no OPENCLAW_GATEWAY_TOKEN set, skipping UI injection');
if (!fs.existsSync(HTML_PATH)) console.warn('[inject-token] Control UI HTML not found at', HTML_PATH);
}

// ── Part 3: Create merged config ─────────────────────────────────────────────

try {
const primaryConfigPath = process.env.OPENCLAW_CONFIG || '/config/openclaw.json';
if (fs.existsSync(primaryConfigPath)) {
const primary = JSON.parse(fs.readFileSync(primaryConfigPath, 'utf8'));

// Enable HTTP API in merged config (opt-in via OPENCLAW_HTTP_API=true)
if (process.env.OPENCLAW_HTTP_API === 'true') {
if (!primary.gateway) primary.gateway = {};
if (!primary.gateway.http) primary.gateway.http = {};
if (!primary.gateway.http.endpoints) primary.gateway.http.endpoints = {};
primary.gateway.http.endpoints.chatCompletions = { enabled: true };
}

// Fix provider baseUrl to match the actual LLM endpoint (OLLAMA_URL env)
// The static config template uses "http://llama-server:8080/v1" which only
// resolves when llama-server runs in Docker. On macOS it runs natively on
// the host, so OLLAMA_URL is set to "http://host.docker.internal:8080".
const ollamaUrl = process.env.OLLAMA_URL || '';
if (ollamaUrl) {
const provs = primary.providers || {};
for (const [name, prov] of Object.entries(provs)) {
if (prov.baseUrl) {
const oldUrl = prov.baseUrl;
prov.baseUrl = ollamaUrl.replace(/\/$/, '') + '/v1';
if (oldUrl !== prov.baseUrl) {
console.log(`[inject-token] merged config: provider ${name} baseUrl: ${oldUrl} -> ${prov.baseUrl}`);
}
}
}
}

const mergedPath = '/tmp/openclaw-config.json';
fs.writeFileSync(mergedPath, JSON.stringify(primary, null, 2), 'utf8');
console.log('[inject-token] created merged config at', mergedPath);
}
} catch (err) {
console.error('[inject-token] merged config warning:', err.message);
}

// ── Part 4: OpenAI-compat shim (opt-in via OPENCLAW_HTTP_API=true) ──────────
// OpenClaw serves /v1/chat/completions but not /v1/models.
// Open WebUI needs /v1/models to discover available models.
// This shim runs on port 18790, serves /v1/models, and proxies everything
// else to the gateway on 18789.
//
// Crash handling (all three layers):
// 1. Healthcheck: compose healthcheck hits :18790 — shim death → unhealthy
// 2. Restart loop: shim self-restarts up to 5 times with backoff
// 3. Logging: uncaughtException and server errors are logged to stderr

if (process.env.OPENCLAW_HTTP_API === 'true') {
try {
const shimScript = `
const http = require('http');
const GATEWAY_PORT = 18789;
const MODELS = JSON.stringify({
object: 'list',
data: [{ id: 'openclaw', object: 'model', created: ${Math.floor(Date.now() / 1000)}, owned_by: 'openclaw-gateway' }],
});

let restarts = 0;
function startServer() {
const server = http.createServer((req, res) => {
if (req.url === '/v1/models') {
res.writeHead(200, { 'Content-Type': 'application/json' });
return res.end(MODELS);
}
const proxy = http.request({ hostname: '127.0.0.1', port: GATEWAY_PORT, path: req.url, method: req.method, headers: req.headers }, (up) => {
res.writeHead(up.statusCode, up.headers);
up.pipe(res);
});
proxy.on('error', () => { res.writeHead(502); res.end('gateway unavailable'); });
req.pipe(proxy);
});
server.on('error', (err) => {
console.error('[openai-shim] server error: ' + err.message);
if (restarts < 5) {
restarts++;
const delay = restarts * 2000;
console.error('[openai-shim] restarting in ' + delay + 'ms (attempt ' + restarts + '/5)');
setTimeout(startServer, delay);
} else {
console.error('[openai-shim] too many failures, giving up — healthcheck will mark container unhealthy');
}
});
server.listen(18790, '0.0.0.0', () => {
restarts = 0;
console.log('[openai-shim] /v1/models + proxy on :18790');
});
}
startServer();

process.on('uncaughtException', (err) => {
console.error('[openai-shim] uncaught exception: ' + err.message);
});
process.on('SIGTERM', () => {
console.error('[openai-shim] received SIGTERM, shutting down');
process.exit(0);
});
`;
fs.writeFileSync('/tmp/openai-shim.js', shimScript);

const { spawn } = require('child_process');
const child = spawn('node', ['/tmp/openai-shim.js'], {
detached: true,
stdio: 'inherit',
});
child.unref();
console.log('[inject-token] started openai-shim (pid %d)', child.pid);
} catch (err) {
console.error('[inject-token] shim warning:', err.message);
}
}
14 changes: 12 additions & 2 deletions dream-server/extensions/services/openclaw/compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,11 @@ services:
- LLM_MODEL=${LLM_MODEL:-qwen3:30b-a3b}
- BOOTSTRAP_MODEL=${BOOTSTRAP_MODEL:-qwen3:8b-q4_K_M}
- OLLAMA_URL=${LLM_API_URL:-http://llama-server:8080}
- OPENCLAW_LLM_URL=${OPENCLAW_LLM_URL:-}
# To enable monitoring: OPENCLAW_LLM_URL=http://token-spy:8083/v1
- OPENCLAW_HTTP_API=${OPENCLAW_HTTP_API:-}
- SEARXNG_BASE_URL=http://searxng:8080
entrypoint: ["/bin/sh", "-c", "node /config/inject-token.js; exec docker-entrypoint.sh node openclaw.mjs gateway --allow-unconfigured --bind lan"]
entrypoint: ["/bin/sh", "-c", "node /config/inject-token.js; export OPENCLAW_CONFIG=/tmp/openclaw-config.json; exec docker-entrypoint.sh node openclaw.mjs gateway --allow-unconfigured --bind lan"]
volumes:
- ./config/openclaw:/config:ro
- ./data/openclaw:/data
Expand All @@ -33,8 +36,15 @@ services:
searxng:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "wget -qO- http://localhost:18789/ || exit 1"]
test: ["CMD-SHELL", "if [ \"$$OPENCLAW_HTTP_API\" = 'true' ]; then wget -qO- http://localhost:18790/v1/models || exit 1; else wget -qO- http://localhost:18789/ || exit 1; fi"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s

# When OpenClaw is enabled, register it as a second model backend in Open WebUI.
# Port 18790 is the OpenAI-compat shim (serves /v1/models + proxies /v1/chat/completions).
open-webui:
environment:
- OPENAI_API_BASE_URLS=${LLM_API_URL:-http://llama-server:8080}/v1;http://openclaw:18790/v1
- OPENAI_API_KEYS=;${OPENCLAW_TOKEN:-}
Loading