-
Notifications
You must be signed in to change notification settings - Fork 34
Expand file tree
/
Copy pathconfig.toml
More file actions
94 lines (76 loc) · 1.59 KB
/
config.toml
File metadata and controls
94 lines (76 loc) · 1.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
[general]
model = "gemini-2.5-flash"
workflow = "single_agent"
output = "state"
structured = true
report = true
thread = 1
recursion_limit = 20
verbose = false
[logging]
level = "INFO"
file = "./chemgraph.log"
console = true
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
[features]
enable_experimental = false
enable_cache = true
cache_dir = "./cache"
cache_expiry = 24
[security]
validate_keys = true
rate_limit = true
max_requests_per_minute = 60
[api.openai]
base_url = "https://api.openai.com/v1"
timeout = 30
[api.groq]
base_url = "https://api.groq.com/openai/v1"
timeout = 30
[api.anthropic]
base_url = "https://api.anthropic.com"
timeout = 30
[api.google]
base_url = "https://generativelanguage.googleapis.com/v1beta"
timeout = 30
[api.local]
base_url = "http://localhost:11434"
timeout = 60
[chemistry.optimization]
method = "BFGS"
fmax = 0.05
steps = 200
[chemistry.frequencies]
displacement = 0.01
nprocs = 1
[chemistry.calculators]
default = "mace_mp"
fallback = "emt"
[output.files]
directory = "./chemgraph_output"
pattern = "{timestamp}_{query_hash}"
formats = [ "xyz", "json", "html",]
[output.visualization]
enable_3d = true
viewer = "py3dmol"
dpi = 300
[advanced.agent]
custom_system_prompt = ""
max_memory_tokens = 8000
enable_function_calling = true
[advanced.parallel]
enable_parallel = false
num_workers = 2
[environments.development]
model = "gpt-4o-mini"
verbose = true
enable_cache = false
[environments.production]
model = "gpt-4o"
verbose = false
enable_cache = true
rate_limit = true
[environments.testing]
model = "gpt-4o-mini"
verbose = true
enable_cache = false