-
Notifications
You must be signed in to change notification settings - Fork 99
Expand file tree
/
Copy pathconfig.example.toml
More file actions
111 lines (98 loc) · 4.49 KB
/
config.example.toml
File metadata and controls
111 lines (98 loc) · 4.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# g3 Configuration Example
#
# Most settings have sensible defaults. A minimal config only needs:
#
# [providers]
# default_provider = "anthropic.default"
#
# [providers.anthropic.default]
# api_key = "your-api-key"
# model = "claude-sonnet-4-5"
#
# Everything else below is optional.
[providers]
default_provider = "anthropic.default"
# Optional: Specify different providers for each mode
# If not specified, these fall back to default_provider
# planner = "anthropic.planner" # Provider for planning mode
# coach = "anthropic.default" # Provider for coach in autonomous mode
# player = "anthropic.default" # Provider for player in autonomous mode
[providers.anthropic.default]
api_key = "your-anthropic-api-key"
model = "claude-sonnet-4-5"
# max_tokens = 64000 # Optional (default: provider's max)
# temperature = 0.3 # Optional
# cache_config = "ephemeral" # Optional: Enable prompt caching
# enable_1m_context = true # Optional: Enable 1M context (costs extra)
# thinking_budget_tokens = 10000 # Optional: Enable extended thinking mode
# Example: A separate config for planning mode with a more capable model
# [providers.anthropic.planner]
# api_key = "your-anthropic-api-key"
# model = "claude-opus-4-5"
# thinking_budget_tokens = 16000
# Databricks provider example
# [providers.databricks.default]
# host = "https://your-workspace.cloud.databricks.com"
# model = "databricks-claude-sonnet-4"
# use_oauth = true
# OpenAI provider example
# [providers.openai.default]
# api_key = "your-openai-api-key"
# model = "gpt-4-turbo"
# OpenAI-compatible providers (OpenRouter, Groq, etc.)
# [providers.openai_compatible.openrouter]
# api_key = "your-openrouter-api-key"
# model = "anthropic/claude-3.5-sonnet"
# base_url = "https://openrouter.ai/api/v1"
# =============================================================================
# Embedded providers (local models via llama.cpp with Metal acceleration)
# =============================================================================
# Download models from Hugging Face:
# huggingface-cli download bartowski/THUDM_GLM-4-32B-0414-GGUF \
# --include "THUDM_GLM-4-32B-0414-Q6_K_L.gguf" --local-dir ~/.g3/models/
#
# GLM-4 32B - Top-tier local model for coding/reasoning (context_length auto-detected from GGUF)
# [providers.embedded.glm4]
# model_path = "~/.g3/models/THUDM_GLM-4-32B-0414-Q6_K_L.gguf"
# model_type = "glm4" # Required: glm4, qwen, mistral, llama, codellama
# context_length = 32768 # Optional: auto-detected from GGUF (GLM-4 = 32K)
# max_tokens = 4096 # Optional: defaults to min(4096, context/4)
# temperature = 0.1
# gpu_layers = 99 # Use all GPU layers on Apple Silicon
# threads = 8
# GLM-4 9B - Smaller but very capable (minimal config - most settings auto-detected)
# [providers.embedded.glm4-9b]
# model_path = "~/.g3/models/THUDM_GLM-4-9B-0414-Q8_0.gguf"
# model_type = "glm4"
# gpu_layers = 99 # Optional but recommended for Apple Silicon
# Qwen3 4B - Small but powerful, good for ensemble usage (minimal config)
# [providers.embedded.qwen3]
# model_path = "~/.g3/models/qwen3-4b-q4_k_m.gguf"
# model_type = "qwen"
# gpu_layers = 99 # Optional but recommended for Apple Silicon
# =============================================================================
# Agent settings (all optional - these are the defaults)
# =============================================================================
# [agent]
# fallback_default_max_tokens = 8192
# enable_streaming = true
# timeout_seconds = 120
# auto_compact = true
# max_retry_attempts = 3
# autonomous_max_retry_attempts = 6
# max_context_length = 200000 # Override context window size
# =============================================================================
# Computer control (all optional - enabled by default)
# =============================================================================
# [computer_control]
# enabled = true # Requires OS accessibility permissions
# require_confirmation = true
# max_actions_per_second = 5
# =============================================================================
# WebDriver browser automation (all optional)
# =============================================================================
# [webdriver]
# enabled = true
# browser = "chrome-headless" # Default. Alternative: "safari"
# chrome_binary = "/path/to/chrome" # Optional: custom Chrome path
# chromedriver_binary = "/path/to/driver" # Optional: custom ChromeDriver path