-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathconfig.toml.example
More file actions
1240 lines (1178 loc) · 66.4 KB
/
config.toml.example
File metadata and controls
1240 lines (1178 loc) · 66.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# zh: 机器人小号配置。
# en: Bot account settings.
[core]
# zh: 机器人QQ号。
# en: Bot QQ number.
bot_qq = 0
# zh: 超级管理员QQ号(唯一,可添加其他管理员)。
# en: Superadmin QQ number (unique).
superadmin_qq = 0
# zh: 初始管理员QQ号列表(逗号分隔,可选)。
# en: Initial admin QQ list (comma-separated, optional).
admin_qq = []
# zh: 音频转发代理QQ号(用于群聊音频转发,可选)。
# en: Audio forward proxy QQ number (for group audio forwarding, optional).
forward_proxy_qq = 0
# zh: 是否处理每条群消息。关闭后仅处理 @机器人、私聊和拍一拍(推荐默认开启)。
# en: Process every group message. If disabled, only @mentions, private chats, and pokes are handled (recommended: enabled).
process_every_message = true
# zh: 是否处理私聊消息。关闭后将只记录私聊历史,不触发 AI 回复。
# en: Process private messages. If disabled, private chat history is still recorded but AI replies are skipped.
process_private_message = true
# zh: 是否响应拍一拍。关闭后拍一拍事件将被忽略。
# en: Respond to poke events. If disabled, poke events are ignored.
process_poke_message = true
# zh: 注入给模型的最近历史消息条数上限(0-200,0 表示不注入历史)。
# en: Max number of recent messages injected as model context (0-200; 0 disables history injection).
context_recent_messages_limit = 20
# zh: 单次 LLM 请求失败时最大静默重试次数(0=不重试,默认2)。失败的请求会回到原队列的第 2 个位置。
# en: Max silent retries for a single failed LLM request (0=no retry, default 2). Failed requests are reinserted at position 2 of their original queue lane.
ai_request_max_retries = 2
# zh: 访问控制。mode 可选:off(不启用)/ blacklist(按黑名单)/ allowlist(按白名单)。
# en: Access control. mode options: off (disabled) / blacklist / allowlist.
[access]
# zh: 访问控制模式:off / blacklist / allowlist。
# en: Access control mode: off / blacklist / allowlist.
mode = "off"
# zh: 允许处理/发送的群号白名单(留空 = 不限制)。
# en: Allowed group IDs. Empty = no restriction.
allowed_group_ids = []
# zh: 禁止处理/发送的群号黑名单(优先级高于 allowed_group_ids)。
# en: Blocked group IDs. Higher priority than allowed_group_ids.
blocked_group_ids = []
# zh: 允许处理/发送的私聊 QQ 白名单(留空 = 不限制)。
# en: Allowed private-chat user IDs. Empty = no restriction.
allowed_private_ids = []
# zh: 禁止处理/发送的私聊 QQ 黑名单(blacklist 模式生效)。
# en: Blocked private-chat user IDs (effective in blacklist mode).
blocked_private_ids = []
# zh: 超级管理员是否可在私聊中绕过 "allowed_private_ids"(仅影响私聊收发;群聊仍严格按 "allowed_group_ids")。
# en: Allow superadmin to bypass "allowed_private_ids" for private chats only.
superadmin_bypass_allowlist = true
# zh: 超级管理员是否可在私聊中绕过 "blocked_private_ids"(仅影响私聊收发)。
# en: Allow superadmin to bypass "blocked_private_ids" for private chats only.
superadmin_bypass_private_blacklist = false
# zh: OneBot WebSocket 配置。
# en: OneBot WebSocket settings.
[onebot]
# zh: NapCat WebSocket地址。
# en: NapCat WebSocket URL.
ws_url = "ws://127.0.0.1:3001"
# zh: Access Token(可选)。
# en: Access token (optional).
token = ""
[models]
# zh: 对话模型配置(主模型,处理每一条消息)。
# en: Chat model config (the main model, processing each message).
[models.chat]
# zh: OpenAI-compatible 基址 URL,例如 https://api.openai.com/v1(legacy "/chat/completions" 已弃用但仍兼容)。
# en: OpenAI-compatible base URL, e.g. https://api.openai.com/v1. Note: legacy "/chat/completions" is deprecated but still supported.
api_url = ""
# zh: Chat 模型 API Key。
# en: Chat model API key.
api_key = ""
# zh: Chat 模型名称。
# en: Chat model name.
model_name = ""
# zh: 最大生成 tokens。
# en: Max generation tokens.
max_tokens = 8192
# zh: 队列发车间隔(秒,0 表示立即发车)。
# en: Queue interval (seconds; 0 dispatches immediately).
queue_interval_seconds = 1.0
# zh: API 模式:传统 chat.completions 或新版 responses。
# en: API mode: classic chat.completions or the newer responses API.
api_mode = "chat_completions"
# zh: 是否启用 reasoning.effort。
# en: Enable reasoning.effort.
reasoning_enabled = false
# zh: reasoning effort 档位。
# en: reasoning effort level.
reasoning_effort = "medium"
# zh: 是否启用 thinking(思维链)。
# en: Enable thinking (reasoning).
thinking_enabled = false
# zh: thinking 预算 tokens。
# en: Thinking-budget tokens.
thinking_budget_tokens = 20000
# zh: 是否在请求中发送 budget_tokens(关闭后由提供商决定思维预算)。
# en: Whether to include budget_tokens in the request (if disabled, the provider decides the thinking budget).
thinking_include_budget = true
# zh: reasoning effort 传参风格:openai(reasoning.effort)/ anthropic(output_config.effort)。
# en: Reasoning effort wire format: openai (reasoning.effort) / anthropic (output_config.effort).
reasoning_effort_style = "openai"
# zh: 思维链工具调用兼容:启用后在多轮工具调用中回传 reasoning_content,避免部分模型返回 400。
# en: Thinking tool-call compatibility: pass back reasoning_content in multi-turn tool calls to avoid 400 errors from some models.
thinking_tool_call_compat = true
# zh: Responses API 的 tool_choice 兼容模式:仅在关闭时请求仍返回 500、怀疑上游不兼容对象型 tool_choice 时再尝试开启;开启后上报为 "required" 并只保留目标工具。当前已在 new-api v0.11.4-alpha.3 发现该问题。默认关闭。
# en: Responses API tool_choice compatibility mode: only try enabling this when requests still return 500 with the default setting and you suspect the upstream does not support object-style tool_choice; it sends "required" and keeps only the selected tool. This issue is currently observed on new-api v0.11.4-alpha.3. Disabled by default.
responses_tool_choice_compat = false
# zh: Responses API 续轮强制降级:启用后,多轮工具调用将始终跳过 previous_response_id,直接使用完整消息重放(stateless replay)。仅在上游不兼容 responses 状态续轮时使用。默认关闭。
# en: Responses API force stateless replay: when enabled, multi-turn tool follow-ups always skip previous_response_id and replay the full message history instead. Use only when the upstream does not handle stateful responses follow-ups correctly. Disabled by default.
responses_force_stateless_replay = false
# zh: 是否启用自动 prompt_cache_key(建议开启,以提高相似请求的缓存命中率)。
# en: Enable automatic prompt_cache_key generation (recommended).
prompt_cache_enabled = true
# zh: 额外请求体参数(可选),可用于 temperature 或供应商私有参数。
# en: Extra request-body params (optional), e.g. temperature or vendor-specific fields.
[models.chat.request_params]
# zh: 模型池配置(可选,支持多模型轮询/随机/用户指定)。
# en: Model pool configuration (optional, supports round-robin/random/user-specified).
[models.chat.pool]
# zh: 是否启用模型池。
# en: Enable model pool.
enabled = false
# zh: 分配策略:default(用户指定)/ round_robin(轮询)/ random(随机)。
# en: Strategy: default (user-specified) / round_robin / random.
strategy = "default"
# zh: 模型池列表(每项需填 model_name、api_url、api_key,其余字段可选,缺省继承主模型)。
# en: Model pool entries (model_name, api_url, api_key required; others optional, inherit from primary).
models = []
# zh: 视觉模型配置(用于图片描述和 OCR)。
# en: Vision model config (image description and OCR).
[models.vision]
# zh: OpenAI-compatible 基址 URL,例如 https://api.openai.com/v1(legacy "/chat/completions" 已弃用但仍兼容)。
# en: OpenAI-compatible base URL, e.g. https://api.openai.com/v1. Note: legacy "/chat/completions" is deprecated but still supported.
api_url = ""
# zh: Vision 模型 API Key。
# en: Vision model API key.
api_key = ""
# zh: Vision 模型名称。
# en: Vision model name.
model_name = ""
# zh: Vision 模型最大输出 tokens。启用 thinking 时建议设大(如 8192),确保思维链消耗后仍有余量输出工具调用。
# en: Vision model max output tokens. When thinking is enabled, use a larger value (e.g. 8192) so there is still room for tool-call output after thinking.
max_tokens = 8192
# zh: 队列发车间隔(秒,0 表示立即发车)。
# en: Queue interval (seconds; 0 dispatches immediately).
queue_interval_seconds = 1.0
# zh: API 模式:传统 chat.completions 或新版 responses。
# en: API mode: classic chat.completions or the newer responses API.
api_mode = "chat_completions"
# zh: 是否启用 reasoning.effort。
# en: Enable reasoning.effort.
reasoning_enabled = false
# zh: reasoning effort 档位。
# en: reasoning effort level.
reasoning_effort = "medium"
# zh: 是否启用 thinking(思维链)。
# en: Enable thinking (reasoning).
thinking_enabled = false
# zh: thinking 预算 tokens。
# en: Thinking-budget tokens.
thinking_budget_tokens = 20000
# zh: 是否在请求中发送 budget_tokens(关闭后由提供商决定思维预算)。
# en: Whether to include budget_tokens in the request (if disabled, the provider decides the thinking budget).
thinking_include_budget = true
# zh: reasoning effort 传参风格:openai(reasoning.effort)/ anthropic(output_config.effort)。
# en: Reasoning effort wire format: openai (reasoning.effort) / anthropic (output_config.effort).
reasoning_effort_style = "openai"
# zh: 思维链工具调用兼容:启用后在多轮工具调用中回传 reasoning_content,避免部分模型返回 400。
# en: Thinking tool-call compatibility: pass back reasoning_content in multi-turn tool calls to avoid 400 errors from some models.
thinking_tool_call_compat = true
# zh: Responses API 的 tool_choice 兼容模式:仅在关闭时请求仍返回 500、怀疑上游不兼容对象型 tool_choice 时再尝试开启;开启后上报为 "required" 并只保留目标工具。当前已在 new-api v0.11.4-alpha.3 发现该问题。默认关闭。
# en: Responses API tool_choice compatibility mode: only try enabling this when requests still return 500 with the default setting and you suspect the upstream does not support object-style tool_choice; it sends "required" and keeps only the selected tool. This issue is currently observed on new-api v0.11.4-alpha.3. Disabled by default.
responses_tool_choice_compat = false
# zh: Responses API 续轮强制降级:启用后,多轮工具调用将始终跳过 previous_response_id,直接使用完整消息重放(stateless replay)。仅在上游不兼容 responses 状态续轮时使用。默认关闭。
# en: Responses API force stateless replay: when enabled, multi-turn tool follow-ups always skip previous_response_id and replay the full message history instead. Use only when the upstream does not handle stateful responses follow-ups correctly. Disabled by default.
responses_force_stateless_replay = false
prompt_cache_enabled = true
# zh: 额外请求体参数(可选),可用于 temperature 或供应商私有参数。
# en: Extra request-body params (optional), e.g. temperature or vendor-specific fields.
[models.vision.request_params]
# zh: 安全模型配置(用于防注入检测和注入后回复生成)。
# en: Security model config (injection detection and post-injection responses).
[models.security]
# zh: 是否启用安全模型检测与注入响应(推荐开启)。
# en: Enable security-model detection and injection response (recommended).
enabled = true
# zh: OpenAI-compatible 基址 URL,例如 https://api.openai.com/v1(legacy "/chat/completions" 已弃用但仍兼容)。
# en: OpenAI-compatible base URL, e.g. https://api.openai.com/v1. Note: legacy "/chat/completions" is deprecated but still supported.
api_url = ""
# zh: Security 模型 API Key。
# en: Security model API key.
api_key = ""
# zh: Security 模型名称。
# en: Security model name.
model_name = ""
# zh: 可选限制:最大生成 tokens。
# en: Optional limit: max generation tokens.
max_tokens = 100
# zh: 队列发车间隔(秒,0 表示立即发车)。
# en: Queue interval (seconds; 0 dispatches immediately).
queue_interval_seconds = 1.0
# zh: API 模式:传统 chat.completions 或新版 responses。
# en: API mode: classic chat.completions or the newer responses API.
api_mode = "chat_completions"
# zh: 是否启用 reasoning.effort。
# en: Enable reasoning.effort.
reasoning_enabled = false
# zh: reasoning effort 档位。
# en: reasoning effort level.
reasoning_effort = "medium"
# zh: 是否启用 thinking(思维链)。
# en: Enable thinking (reasoning).
thinking_enabled = false
# zh: thinking 预算 tokens。
# en: Thinking-budget tokens.
thinking_budget_tokens = 0
# zh: 是否在请求中发送 budget_tokens(关闭后由提供商决定思维预算)。
# en: Whether to include budget_tokens in the request (if disabled, the provider decides the thinking budget).
thinking_include_budget = true
# zh: reasoning effort 传参风格:openai(reasoning.effort)/ anthropic(output_config.effort)。
# en: Reasoning effort wire format: openai (reasoning.effort) / anthropic (output_config.effort).
reasoning_effort_style = "openai"
# zh: 思维链工具调用兼容:启用后在多轮工具调用中回传 reasoning_content,避免部分模型返回 400。
# en: Thinking tool-call compatibility: pass back reasoning_content in multi-turn tool calls to avoid 400 errors from some models.
thinking_tool_call_compat = true
# zh: Responses API 的 tool_choice 兼容模式:仅在关闭时请求仍返回 500、怀疑上游不兼容对象型 tool_choice 时再尝试开启;开启后上报为 "required" 并只保留目标工具。当前已在 new-api v0.11.4-alpha.3 发现该问题。默认关闭。
# en: Responses API tool_choice compatibility mode: only try enabling this when requests still return 500 with the default setting and you suspect the upstream does not support object-style tool_choice; it sends "required" and keeps only the selected tool. This issue is currently observed on new-api v0.11.4-alpha.3. Disabled by default.
responses_tool_choice_compat = false
# zh: Responses API 续轮强制降级:启用后,多轮工具调用将始终跳过 previous_response_id,直接使用完整消息重放(stateless replay)。仅在上游不兼容 responses 状态续轮时使用。默认关闭。
# en: Responses API force stateless replay: when enabled, multi-turn tool follow-ups always skip previous_response_id and replay the full message history instead. Use only when the upstream does not handle stateful responses follow-ups correctly. Disabled by default.
responses_force_stateless_replay = false
prompt_cache_enabled = true
# zh: 额外请求体参数(可选),可用于 temperature 或供应商私有参数。
# en: Extra request-body params (optional), e.g. temperature or vendor-specific fields.
[models.security.request_params]
# zh: Naga 外发消息审核模型配置(仅用于 Naga 通过 Runtime API 发消息前的内容审核)。
# zh: 若无必要可不填写;未配置时回退到 [models.security]。
# en: Naga outbound moderation model config (used only before Naga sends messages through the Runtime API).
# en: Optional; falls back to [models.security] when omitted.
[models.naga]
# zh: OpenAI-compatible 基址 URL,例如 https://api.openai.com/v1(legacy "/chat/completions" 已弃用但仍兼容)。
# en: OpenAI-compatible base URL, e.g. https://api.openai.com/v1. Note: legacy "/chat/completions" is deprecated but still supported.
api_url = ""
# zh: Naga 审核模型 API Key。
# en: Naga moderation model API key.
api_key = ""
# zh: Naga 审核模型名称。
# en: Naga moderation model name.
model_name = ""
# zh: 可选限制:最大生成 tokens。
# en: Optional limit: max generation tokens.
max_tokens = 160
# zh: 队列发车间隔(秒,0 表示立即发车)。
# en: Queue interval (seconds; 0 dispatches immediately).
queue_interval_seconds = 1.0
# zh: API 模式:传统 chat.completions 或新版 responses。
# en: API mode: classic chat.completions or the newer responses API.
api_mode = "chat_completions"
# zh: 是否启用 reasoning.effort。
# en: Enable reasoning.effort.
reasoning_enabled = false
# zh: reasoning effort 档位。
# en: reasoning effort level.
reasoning_effort = "medium"
# zh: 是否启用 thinking(思维链)。
# en: Enable thinking (reasoning).
thinking_enabled = false
# zh: thinking 预算 tokens。
# en: Thinking-budget tokens.
thinking_budget_tokens = 0
# zh: 是否在请求中发送 budget_tokens(关闭后由提供商决定思维预算)。
# en: Whether to include budget_tokens in the request (if disabled, the provider decides the thinking budget).
thinking_include_budget = true
# zh: reasoning effort 传参风格:openai(reasoning.effort)/ anthropic(output_config.effort)。
# en: Reasoning effort wire format: openai (reasoning.effort) / anthropic (output_config.effort).
reasoning_effort_style = "openai"
# zh: 思维链工具调用兼容:启用后在多轮工具调用中回传 reasoning_content,避免部分模型返回 400。
# en: Thinking tool-call compatibility: pass back reasoning_content in multi-turn tool calls to avoid 400 errors from some models.
thinking_tool_call_compat = true
# zh: Responses API 的 tool_choice 兼容模式。
# en: Responses API tool_choice compatibility mode.
responses_tool_choice_compat = false
# zh: Responses API 续轮强制降级。
# en: Responses API force stateless replay.
responses_force_stateless_replay = false
# zh: 额外请求体参数(可选),可用于 temperature 或供应商私有参数。
# en: Extra request-body params (optional), e.g. temperature or vendor-specific fields.
[models.naga.request_params]
# zh: Agent 模型配置(用于执行 agents)。
# en: Agent model config (used to run agents).
[models.agent]
# zh: OpenAI-compatible 基址 URL,例如 https://api.openai.com/v1(legacy "/chat/completions" 已弃用但仍兼容)。
# en: OpenAI-compatible base URL, e.g. https://api.openai.com/v1. Note: legacy "/chat/completions" is deprecated but still supported.
api_url = ""
# zh: Agent 模型 API Key。
# en: Agent model API key.
api_key = ""
# zh: Agent 模型名称。
# en: Agent model name.
model_name = ""
# zh: 可选限制:最大生成 tokens。
# en: Optional limit: max generation tokens.
max_tokens = 4096
# zh: 队列发车间隔(秒,0 表示立即发车)。
# en: Queue interval (seconds; 0 dispatches immediately).
queue_interval_seconds = 1.0
# zh: API 模式:传统 chat.completions 或新版 responses。
# en: API mode: classic chat.completions or the newer responses API.
api_mode = "chat_completions"
# zh: 是否启用 reasoning.effort。
# en: Enable reasoning.effort.
reasoning_enabled = false
# zh: reasoning effort 档位。
# en: reasoning effort level.
reasoning_effort = "medium"
# zh: 是否启用 thinking(思维链)。
# en: Enable thinking (reasoning).
thinking_enabled = false
# zh: thinking 预算 tokens。
# en: Thinking-budget tokens.
thinking_budget_tokens = 0
# zh: 是否在请求中发送 budget_tokens(关闭后由提供商决定思维预算)。
# en: Whether to include budget_tokens in the request (if disabled, the provider decides the thinking budget).
thinking_include_budget = true
# zh: reasoning effort 传参风格:openai(reasoning.effort)/ anthropic(output_config.effort)。
# en: Reasoning effort wire format: openai (reasoning.effort) / anthropic (output_config.effort).
reasoning_effort_style = "openai"
# zh: 思维链工具调用兼容:启用后在多轮工具调用中回传 reasoning_content,避免部分模型返回 400。
# en: Thinking tool-call compatibility: pass back reasoning_content in multi-turn tool calls to avoid 400 errors from some models.
thinking_tool_call_compat = true
# zh: Responses API 的 tool_choice 兼容模式:仅在关闭时请求仍返回 500、怀疑上游不兼容对象型 tool_choice 时再尝试开启;开启后上报为 "required" 并只保留目标工具。当前已在 new-api v0.11.4-alpha.3 发现该问题。默认关闭。
# en: Responses API tool_choice compatibility mode: only try enabling this when requests still return 500 with the default setting and you suspect the upstream does not support object-style tool_choice; it sends "required" and keeps only the selected tool. This issue is currently observed on new-api v0.11.4-alpha.3. Disabled by default.
responses_tool_choice_compat = false
# zh: Responses API 续轮强制降级:启用后,多轮工具调用将始终跳过 previous_response_id,直接使用完整消息重放(stateless replay)。仅在上游不兼容 responses 状态续轮时使用。默认关闭。
# en: Responses API force stateless replay: when enabled, multi-turn tool follow-ups always skip previous_response_id and replay the full message history instead. Use only when the upstream does not handle stateful responses follow-ups correctly. Disabled by default.
responses_force_stateless_replay = false
prompt_cache_enabled = true
# zh: 额外请求体参数(可选),可用于 temperature 或供应商私有参数。
# en: Extra request-body params (optional), e.g. temperature or vendor-specific fields.
[models.agent.request_params]
# zh: Agent 模型池配置(可选,支持多模型轮询/随机/用户指定)。
# en: Agent model pool configuration (optional, supports round-robin/random/user-specified).
[models.agent.pool]
# zh: 是否启用 Agent 模型池。
# en: Enable agent model pool.
enabled = false
# zh: 分配策略:default(用户指定)/ round_robin(轮询)/ random(随机)。
# en: Strategy: default (user-specified) / round_robin / random.
strategy = "default"
# zh: Agent 模型池列表(每项需填 model_name、api_url、api_key,其余字段可选,缺省继承主模型)。
# en: Agent model pool entries (model_name, api_url, api_key required; others optional, inherit from primary).
models = []
# zh: 史官模型配置(认知记忆后台改写用,未配置时回退到 agent 模型)。
# en: Historian model config (for cognitive memory rewriting, falls back to agent model if unset).
[models.historian]
# zh: OpenAI-compatible 基址 URL,例如 https://api.openai.com/v1(legacy "/chat/completions" 已弃用但仍兼容)。
# en: OpenAI-compatible base URL, e.g. https://api.openai.com/v1. Note: legacy "/chat/completions" is deprecated but still supported.
api_url = ""
# zh: 史官模型 API Key。
# en: Historian model API key.
api_key = ""
# zh: 史官模型名称。
# en: Historian model name.
model_name = ""
# zh: 可选限制:最大生成 tokens。
# en: Optional limit: max generation tokens.
max_tokens = 4096
# zh: 队列发车间隔(秒,0 表示立即发车)。
# en: Queue interval (seconds; 0 dispatches immediately).
queue_interval_seconds = 1.0
# zh: API 模式:传统 chat.completions 或新版 responses。
# en: API mode: classic chat.completions or the newer responses API.
api_mode = "chat_completions"
# zh: 是否启用 reasoning.effort。
# en: Enable reasoning.effort.
reasoning_enabled = false
# zh: reasoning effort 档位。
# en: reasoning effort level.
reasoning_effort = "medium"
# zh: 是否启用 thinking(思维链)。
# en: Enable thinking (reasoning).
thinking_enabled = false
# zh: thinking 预算 tokens。
# en: Thinking-budget tokens.
thinking_budget_tokens = 0
# zh: 是否在请求中发送 budget_tokens(关闭后由提供商决定思维预算)。
# en: Whether to include budget_tokens in the request (if disabled, the provider decides the thinking budget).
thinking_include_budget = true
# zh: reasoning effort 传参风格:openai(reasoning.effort)/ anthropic(output_config.effort)。
# en: Reasoning effort wire format: openai (reasoning.effort) / anthropic (output_config.effort).
reasoning_effort_style = "openai"
# zh: 思维链工具调用兼容:启用后在多轮工具调用中回传 reasoning_content,避免部分模型返回 400。
# en: Thinking tool-call compatibility: pass back reasoning_content in multi-turn tool calls to avoid 400 errors from some models.
thinking_tool_call_compat = true
# zh: Responses API 的 tool_choice 兼容模式:仅在关闭时请求仍返回 500、怀疑上游不兼容对象型 tool_choice 时再尝试开启;开启后上报为 "required" 并只保留目标工具。当前已在 new-api v0.11.4-alpha.3 发现该问题。默认关闭。
# en: Responses API tool_choice compatibility mode: only try enabling this when requests still return 500 with the default setting and you suspect the upstream does not support object-style tool_choice; it sends "required" and keeps only the selected tool. This issue is currently observed on new-api v0.11.4-alpha.3. Disabled by default.
responses_tool_choice_compat = false
# zh: Responses API 续轮强制降级:启用后,多轮工具调用将始终跳过 previous_response_id,直接使用完整消息重放(stateless replay)。仅在上游不兼容 responses 状态续轮时使用。默认关闭。
# en: Responses API force stateless replay: when enabled, multi-turn tool follow-ups always skip previous_response_id and replay the full message history instead. Use only when the upstream does not handle stateful responses follow-ups correctly. Disabled by default.
responses_force_stateless_replay = false
prompt_cache_enabled = true
# zh: 额外请求体参数(可选),可用于 temperature 或供应商私有参数。
# en: Extra request-body params (optional), e.g. temperature or vendor-specific fields.
[models.historian.request_params]
# zh: 消息总结模型配置(/summary /sum 专用;未配置时回退到 agent 模型)。
# en: Message-summary model config (used by /summary and /sum; falls back to the agent model when unset).
[models.summary]
# zh: OpenAI-compatible 基址 URL,例如 https://api.openai.com/v1(legacy "/chat/completions" 已弃用但仍兼容)。
# en: OpenAI-compatible base URL, e.g. https://api.openai.com/v1. Note: legacy "/chat/completions" is deprecated but still supported.
api_url = ""
# zh: 消息总结模型 API Key。
# en: Message-summary model API key.
api_key = ""
# zh: 消息总结模型名称。
# en: Message-summary model name.
model_name = ""
# zh: 可选限制:最大生成 tokens。
# en: Optional limit: max generation tokens.
max_tokens = 4096
# zh: 队列发车间隔(秒,0 表示立即发车)。
# en: Queue interval (seconds; 0 dispatches immediately).
queue_interval_seconds = 1.0
# zh: API 模式:传统 chat.completions 或新版 responses。
# en: API mode: classic chat.completions or the newer responses API.
api_mode = "chat_completions"
# zh: 是否启用 reasoning.effort。
# en: Enable reasoning.effort.
reasoning_enabled = false
# zh: reasoning effort 档位。
# en: reasoning effort level.
reasoning_effort = "medium"
# zh: 是否启用 thinking(思维链)。
# en: Enable thinking (reasoning).
thinking_enabled = false
# zh: thinking 预算 tokens。
# en: Thinking-budget tokens.
thinking_budget_tokens = 0
# zh: 是否在请求中发送 budget_tokens(关闭后由提供商决定思维预算)。
# en: Whether to include budget_tokens in the request (if disabled, the provider decides the thinking budget).
thinking_include_budget = true
# zh: reasoning effort 传参风格:openai(reasoning.effort)/ anthropic(output_config.effort)。
# en: Reasoning effort wire format: openai (reasoning.effort) / anthropic (output_config.effort).
reasoning_effort_style = "openai"
# zh: 思维链工具调用兼容:启用后在多轮工具调用中回传 reasoning_content,避免部分模型返回 400。
# en: Thinking tool-call compatibility: pass back reasoning_content in multi-turn tool calls to avoid 400 errors from some models.
thinking_tool_call_compat = true
# zh: Responses API 的 tool_choice 兼容模式:仅在关闭时请求仍返回 500、怀疑上游不兼容对象型 tool_choice 时再尝试开启;开启后上报为 "required" 并只保留目标工具。默认关闭。
# en: Responses API tool_choice compatibility mode: only try enabling this when requests still return 500 with the default setting and you suspect the upstream does not support object-style tool_choice; it sends "required" and keeps only the selected tool. Disabled by default.
responses_tool_choice_compat = false
# zh: Responses API 续轮强制降级:启用后,多轮工具调用将始终跳过 previous_response_id,直接使用完整消息重放(stateless replay)。仅在上游不兼容 responses 状态续轮时使用。默认关闭。
# en: Responses API force stateless replay: when enabled, multi-turn tool follow-ups always skip previous_response_id and replay the full message history instead. Use only when the upstream does not handle stateful responses follow-ups correctly. Disabled by default.
responses_force_stateless_replay = false
prompt_cache_enabled = true
# zh: 额外请求体参数(可选),可用于 temperature 或供应商私有参数。
# en: Extra request-body params (optional), e.g. temperature or vendor-specific fields.
[models.summary.request_params]
# zh: Grok 搜索模型配置(仅供 web_agent 内的 grok_search 使用;固定走 chat.completions,不支持 tool call 兼容字段)。
# en: Grok search model config (used only by grok_search inside web_agent; always uses chat completions and does not expose tool-call compatibility fields).
[models.grok]
# zh: OpenAI-compatible 基址 URL,例如 https://api.example.com/v1。
# en: OpenAI-compatible base URL, e.g. https://api.example.com/v1.
api_url = ""
# zh: Grok 搜索模型 API Key。
# en: Grok search model API key.
api_key = ""
# zh: Grok 搜索模型名称。
# en: Grok search model name.
model_name = ""
# zh: 可选限制:最大生成 tokens。
# en: Optional limit: max generation tokens.
max_tokens = 8192
# zh: 队列发车间隔(秒,0 表示立即发车)。
# en: Queue interval (seconds; 0 dispatches immediately).
queue_interval_seconds = 1.0
# zh: 是否启用 reasoning.effort。
# en: Enable reasoning.effort.
reasoning_enabled = false
# zh: reasoning effort 档位。
# en: reasoning effort level.
reasoning_effort = "medium"
# zh: 是否启用 thinking(思维链)。
# en: Enable thinking (reasoning).
thinking_enabled = false
# zh: thinking 预算 tokens。
# en: Thinking-budget tokens.
thinking_budget_tokens = 20000
# zh: 是否在请求中发送 budget_tokens(关闭后由提供商决定思维预算)。
# en: Whether to include budget_tokens in the request (if disabled, the provider decides the thinking budget).
thinking_include_budget = true
# zh: reasoning effort 传参风格:openai(reasoning_effort)/ anthropic(output_config.effort)。
# en: Reasoning effort wire format: openai (reasoning_effort) / anthropic (output_config.effort).
reasoning_effort_style = "openai"
# zh: 是否启用自动 prompt_cache_key(建议开启,以提高相似请求的缓存命中率)。
# en: Enable automatic prompt_cache_key generation (recommended).
prompt_cache_enabled = true
# zh: 额外请求体参数(可选),可用于 temperature 或供应商私有参数。
# en: Extra request-body params (optional), e.g. temperature or vendor-specific fields.
[models.grok.request_params]
# zh: 嵌入模型配置(知识库语义检索使用)。
# en: Embedding model config (used by knowledge semantic retrieval).
[models.embedding]
# zh: OpenAI-compatible 基址 URL,例如 https://api.openai.com/v1。
# en: OpenAI-compatible base URL, e.g. https://api.openai.com/v1.
api_url = ""
# zh: Embedding 模型 API Key。
# en: Embedding model API key.
api_key = ""
# zh: Embedding 模型名称。
# en: Embedding model name.
model_name = ""
# zh: 队列发车间隔(秒,0 表示立即发车)。
# en: Queue interval (seconds; 0 dispatches immediately).
queue_interval_seconds = 0.0
# zh: 向量维度(可选)。0 或留空表示使用模型默认维度。
# en: Embedding dimensions (optional). Use 0/empty to use model defaults.
dimensions = 0
# zh: 查询端指令前缀(可选,Qwen3/BGE 等模型常用)。
# en: Query instruction prefix (optional, common for Qwen3/BGE-style models).
query_instruction = ""
# zh: 文档端指令前缀(可选,E5 等模型常用,例如 "passage: ")。
# en: Document instruction prefix (optional, common for E5-style models, e.g. "passage: ").
document_instruction = ""
# zh: 额外请求体参数(可选),用于 embedding 供应商的扩展字段。
# en: Extra request-body params (optional) for embedding-provider-specific fields.
[models.embedding.request_params]
# zh: 重排模型配置(知识库二阶段检索使用)。
# en: Rerank model config (used in second-stage knowledge retrieval).
[models.rerank]
# zh: OpenAI-compatible 基址 URL,例如 https://api.openai.com/v1。
# en: OpenAI-compatible base URL, e.g. https://api.openai.com/v1.
api_url = ""
# zh: Rerank 模型 API Key。
# en: Rerank model API key.
api_key = ""
# zh: Rerank 模型名称。
# en: Rerank model name.
model_name = ""
# zh: 队列发车间隔(秒,0 表示立即发车)。
# en: Queue interval (seconds; 0 dispatches immediately).
queue_interval_seconds = 0.0
# zh: 查询端指令前缀(可选,部分 rerank 模型需要,如 "Instruct: ...\\nQuery: ")。
# en: Query instruction prefix (optional, required by some rerank models, e.g. "Instruct: ...\\nQuery: ").
query_instruction = ""
# zh: 额外请求体参数(可选),用于 rerank 供应商的扩展字段。
# en: Extra request-body params (optional) for rerank-provider-specific fields.
[models.rerank.request_params]
# zh: 生图模型配置(用于 image_gen.provider="models" 时调用 OpenAI 兼容的图片生成接口)。
# en: Image generation model config (used when image_gen.provider="models" to call OpenAI-compatible image generation API).
[models.image_gen]
# zh: OpenAI-compatible 基址 URL,例如 https://api.openai.com/v1(最终请求路径为 /v1/images/generations)。
# en: OpenAI-compatible base URL, e.g. https://api.openai.com/v1 (final request path is /v1/images/generations).
api_url = ""
# zh: API Key。
# en: API key.
api_key = ""
# zh: 模型名称(如 dall-e-3、minimax-image-01 等),空则使用上游默认。
# en: Model name (e.g. dall-e-3, minimax-image-01, etc.), empty uses provider default.
model_name = ""
# zh: 额外请求体参数(可选)。
# en: Extra request-body params (optional).
[models.image_gen.request_params]
# zh: 参考图生图模型配置(用于 ai_draw_one 传入 reference_image_uids 时调用 OpenAI 兼容的图片编辑接口)。
# en: Reference-image generation model config (used when ai_draw_one receives reference_image_uids and calls the OpenAI-compatible image editing API).
[models.image_edit]
# zh: OpenAI-compatible 基址 URL,例如 https://api.openai.com/v1(最终请求路径为 /v1/images/edits)。
# en: OpenAI-compatible base URL, e.g. https://api.openai.com/v1 (final request path is /v1/images/edits).
api_url = ""
# zh: API Key。
# en: API key.
api_key = ""
# zh: 模型名称,空则回退到 [models.image_gen] 的 model_name。
# en: Model name, empty falls back to [models.image_gen].model_name.
model_name = ""
# zh: 额外请求体参数(可选)。
# en: Extra request-body params (optional).
[models.image_edit.request_params]
# zh: 本地知识库配置。
# en: Local knowledge base settings.
[knowledge]
# zh: 是否启用本地知识库。
# en: Enable local knowledge base.
enabled = false
# zh: 知识库根目录。
# en: Knowledge base root directory.
base_dir = "knowledge"
# zh: 是否定期扫描文本变更。
# en: Periodically scan text changes.
auto_scan = false
# zh: 是否自动执行嵌入。
# en: Automatically run embedding.
auto_embed = false
# zh: 扫描间隔(秒)。
# en: Scan interval (seconds).
scan_interval = 60.0
# zh: 每批嵌入块数。
# en: Embedding batch size.
embed_batch_size = 64
# zh: 每个向量块包含的行数(滑动窗口大小)。
# en: Number of lines per chunk (sliding window size).
chunk_size = 10
# zh: 相邻块重叠的行数。
# en: Overlap lines between adjacent chunks.
chunk_overlap = 2
# zh: 语义搜索默认召回数。
# en: Default semantic retrieval top-k.
default_top_k = 5
# zh: 是否启用语义检索后的重排。
# en: Enable reranking after semantic retrieval.
enable_rerank = false
# zh: 重排后返回条数(必须小于 default_top_k)。
# en: Number of results returned after rerank (must be less than default_top_k).
rerank_top_k = 3
# zh: 日志配置。
# en: Logging settings.
[logging]
# zh: 日志级别(DEBUG/INFO/WARNING/ERROR)。
# en: Log level (DEBUG/INFO/WARNING/ERROR).
level = "INFO"
# zh: 日志文件路径。
# en: Log file path.
# zh: 在 Windows 上建议使用正斜杠,如 D:/logs/bot.log,或使用单引号 'D:\logs\bot.log',或在双引号中写双反斜杠 "D:\\logs\\bot.log"。
# en: On Windows, prefer "D:/logs/bot.log", or "'D:\logs\bot.log'", or "D:\\logs\\bot.log".
file_path = "logs/bot.log"
# zh: 单个日志文件最大大小(MB)。
# en: Max log file size (MB).
max_size_mb = 10
# zh: 保留日志文件数量。
# en: Log backup count.
backup_count = 5
# zh: 是否输出到终端 TTY(默认关闭,避免后台运行时终端阻塞)。
# en: Enable logging to terminal TTY (default: off, prevents blocking in background runs).
tty_enabled = false
# zh: 是否在日志中输出思维链(默认开启)。
# en: Log thinking output (default: on).
log_thinking = true
# zh: Tools 兼容性(可选)。
# en: Tools compatibility (optional).
# zh: 部分 OpenAI-compatible 网关会对 tools schema 做更严格的校验(尤其是 tools[].function.name / description),可能触发 400。
# en: Some OpenAI-compatible gateways strictly validate tools schema (especially tools[].function.name/description) and may return 400.
[tools]
# zh: 工具名分隔符:当工具原始名称包含 '.'(例如 scheduler.create_schedule_task / mcp.server.tool)时,发送给模型前会把 '.' 映射为该分隔符。
# en: Tool-name delimiter: map '.' to this delimiter before sending tools to the model.
dot_delimiter = "-_-"
# zh: 是否启用 tools.description 截断。关闭时不会按长度截断,仅做清理/规范化。
# en: Enable truncation for tools.description. If disabled, no length-based truncation is applied.
description_truncate_enabled = false
# zh: description 最大长度(仅在开启 description_truncate_enabled 时生效)。
# en: Description max length (used only when description_truncate_enabled is enabled).
description_max_len = 1024
# zh: 是否输出逐条清洗详情日志(INFO)。
# en: Log per-item sanitization details (INFO).
sanitize_verbose = false
# zh: 日志中 description 预览长度。
# en: Description preview length in logs.
description_preview_len = 160
# zh: 功能开关。
# en: Feature flags.
[features]
# zh: 是否启用 NagaAgent 模式:
# zh: - true: 使用 `res/prompts/undefined_nagaagent.xml`,并向模型暴露/允许调用相关 Agent
# zh: - false: 使用 `res/prompts/undefined.xml`,并隐藏/禁用相关 Agent(避免无关提及)
# en: Enable NagaAgent mode:
# en: - true: use `res/prompts/undefined_nagaagent.xml` and expose related agents
# en: - false: use `res/prompts/undefined.xml` and hide/disable related agents
nagaagent_mode_enabled = false
# zh: 多模型池全局开关(关闭后所有多模型功能禁用,行为与原版一致)。无特殊需求不建议启用。
# en: Global model pool switch. When disabled, all multi-model features are disabled. Not recommended unless you have specific needs.
pool_enabled = false
# zh: 彩蛋功能(可选)。
# en: Easter egg features (optional)
[easter_egg]
# zh: 彩蛋提示发送模式。模式:"none"(关闭)/"agent"(主 AI 调用 Agent 时发送)/"tools"(主 AI 或 Agent 调用 Tool 时发送)/"clean"(过滤噪声;对自动预取的工具如 "get_current_time"、"send_message"、"end" 不予提示)/"all"(包括 Agent 内部调用其子工具即 "agent_tool" 的场景也发送)。默认:"none"。
# en: Easter-egg announcement mode. Modes: "none" (off) / "agent" (send when the main AI calls an Agent) / "tools" (send when the main AI or an Agent calls a Tool) / "clean" (filter noise; automatically prefetched tools such as "get_current_time", "send_message", and "end" are not announced) / "all" (also send when an Agent internally calls its sub-tools, i.e. "agent_tool"). Default: "none".
agent_call_message_enabled = "none"
# zh: 是否启用群聊关键词("心理委员")自动回复。
# en: Enable keyword auto-replies("心理委员") in group chats.
keyword_reply_enabled = false
# zh: 是否启用群聊复读功能(连续 N 条相同消息时复读,N 由 repeat_threshold 控制;若期间有 bot 自身发言则重置链)。
# en: Enable repeat feature in group chats (repeat when N consecutive identical messages arrive, N set by repeat_threshold; resets if bot itself sent the same text in between).
repeat_enabled = false
# zh: 复读触发所需的连续相同消息条数(来自不同发送者),范围 2–20,默认 3。
# en: Number of consecutive identical messages (from different senders) required to trigger repeat, range 2–20, default 3.
repeat_threshold = 3
# zh: 复读冷却时间(分钟)。同一内容被复读后,在冷却时间内不再重复复读。0 = 无冷却。问号类消息(?/?)视为等价。
# en: Repeat cooldown (minutes). After repeating the same content, won't repeat it again within this cooldown. 0 = no cooldown. Question marks (?/?) are treated as equivalent.
repeat_cooldown_minutes = 60
# zh: 是否启用倒问号(复读触发时,若消息为问号则发送倒问号 ¿)。
# en: Enable inverted question mark (when repeat triggers on "?" messages, send "¿" instead).
inverted_question_enabled = false
# zh: 历史记录配置。
# en: History settings.
[history]
# zh: 每个会话最多保留的消息条数(0 = 无限制,注意内存占用)。
# en: Max messages to keep per conversation (0 = unlimited, mind memory usage).
max_records = 10000
# zh: 工具过滤查询返回的最大消息条数。
# en: Max messages returned by tool filtered queries.
filtered_result_limit = 200
# zh: 工具过滤搜索时扫描的最大消息条数。
# en: Max messages to scan when tools perform filtered searches.
search_scan_limit = 10000
# zh: 总结 agent 单次拉取的最大消息条数。
# en: Max messages the summary agent can fetch per call.
summary_fetch_limit = 1000
# zh: 总结 agent 按时间范围查询时的最大拉取条数。
# en: Max messages the summary agent fetches for time-range queries.
summary_time_fetch_limit = 5000
# zh: OneBot API 回退获取的最大消息条数。
# en: Max messages to fetch via OneBot API fallback.
onebot_fetch_limit = 10000
# zh: 群分析工具的消息/成员返回上限。
# en: Max messages/members returned by group analysis tools.
group_analysis_limit = 500
# zh: Skills 热重载配置(可选)。
# en: Skills hot reload settings (optional).
[skills]
# zh: 是否开启 skills 热重载。
# en: Enable skills hot reload.
hot_reload = true
# zh: 扫描间隔(秒)。
# en: Scan interval (seconds).
hot_reload_interval = 2.0
# zh: 去抖时间(秒)。
# en: Debounce time (seconds).
hot_reload_debounce = 0.5
# zh: Agent intro 自动生成(推荐开启)。
# en: Agent intro auto-generation (recommended).
intro_autogen_enabled = true
# zh: 队列发车间隔(秒,0 表示立即发车)。
# en: Queue interval (seconds; 0 dispatches immediately).
intro_autogen_queue_interval = 1.0
# zh: 单次生成最大 token。
# en: Max tokens per generation.
intro_autogen_max_tokens = 8192
# zh: Hash 缓存路径。
# en: Hash cache path.
intro_hash_path = ".cache/agent_intro_hashes.json"
# zh: 预先执行的工具列表(结果注入 system 消息)。
# en: Prefetch tool list (results injected into system messages).
prefetch_tools = ["get_current_time"]
# zh: 隐藏已预取的工具声明。
# en: Hide prefetched tools from the model's tool list.
prefetch_tools_hide = true
# zh: 搜索服务配置。
# en: Search service config.
[search]
# zh: SearxNG 搜索服务地址,例如 http://127.0.0.1:8849。
# en: SearxNG service URL, e.g. http://127.0.0.1:8849.
searxng_url = ""
# zh: 是否在 web_agent 中启用 grok_search。启用后该工具会优先于 web_search 暴露给模型。
# en: Enable grok_search in web_agent. When enabled, this tool is exposed with higher priority than web_search.
grok_search_enabled = false
# zh: 代理设置(可选)。
# en: Proxy settings (optional).
[proxy]
# zh: 是否使用代理。
# en: Whether to use proxy.
use_proxy = true
# zh: 例如 http://127.0.0.1:7890(也可使用环境变量 "HTTP_PROXY")。
# en: e.g. http://127.0.0.1:7890 (or use the "HTTP_PROXY" environment variable).
http_proxy = ""
# zh: 例如 http://127.0.0.1:7890(也可使用环境变量 "HTTPS_PROXY")。
# en: e.g. http://127.0.0.1:7890 (or use the "HTTPS_PROXY" environment variable).
https_proxy = ""
# zh: 网络请求配置(全局默认)。
# en: Network request settings (global defaults).
[network]
# zh: 默认请求超时(秒),用于多数工具的 HTTP 请求。
# en: Default request timeout in seconds for most HTTP tool calls.
request_timeout_seconds = 30.0
# zh: 默认重试次数(0-5)。
# en: Default retry count (0-5).
request_retries = 0
# zh: 第三方 API 基础地址(便于自定义镜像或私有网关)。
# en: Third-party API base URLs (for mirrors or private gateways).
[api_endpoints]
# zh: XXAPI 基础地址。
# en: XXAPI base URL.
xxapi_base_url = "https://v2.xxapi.cn"
# zh: 星之阁 API 基础地址。
# en: Xingzhige API base URL.
xingzhige_base_url = "https://api.xingzhige.com"
# zh: 生图工具配置。
# en: Image generation tool config.
[image_gen]
# zh: 生图 provider:"xingzhige"(免费星之阁 API)或 "models"(使用 [models.image_gen] 配置的 OpenAI 兼容接口)。
# en: Image generation provider: "xingzhige" (free Xingzhige API) or "models" (OpenAI-compatible via [models.image_gen]).
provider = "xingzhige"
# zh: 星之阁模式默认图片比例(仅 provider="xingzhige" 生效)。
# en: Default image size for Xingzhige mode (only effective when provider="xingzhige").
xingzhige_size = "1:1"
# zh: OpenAI 模式图片尺寸(仅 provider="models" 生效;空表示不传,使用上游默认,如 dall-e-3 默认 1024x1024)。
# en: Image size for OpenAI mode (only effective when provider="models"; empty means not sent, using provider default, e.g. dall-e-3 default 1024x1024).
openai_size = ""
# zh: OpenAI 模式图片质量(仅 provider="models" 生效;空表示不传;dall-e-3 支持 standard/hd)。
# en: Image quality for OpenAI mode (only effective when provider="models"; empty means not sent; dall-e-3 supports standard/hd).
openai_quality = ""
# zh: OpenAI 模式图片风格(仅 provider="models" 生效;空表示不传;dall-e-3 支持 vivid/natural)。
# en: Image style for OpenAI mode (only effective when provider="models"; empty means not sent; dall-e-3 supports vivid/natural).
openai_style = ""
# zh: OpenAI 模式请求超时(秒)。
# en: Request timeout for OpenAI mode (seconds).
openai_timeout = 120.0
# zh: XXAPI API 配置。
# en: XXAPI config.
[xxapi]
# zh: XXAPI Token,前往 https://xxapi.cn/ 获取。
# en: XXAPI token; obtain from https://xxapi.cn/.
api_token = ""
# zh: Token 统计归档配置。
# en: Token usage archive settings.
[token_usage]
# zh: 超过该大小压缩归档(MB),<= 0 禁用。
# en: Archive when size exceeds (MB); <= 0 disables.
max_size_mb = 5
# zh: 最多保留归档数量,<= 0 不限制。
# en: Max archive count; <= 0 means unlimited.
max_archives = 30
# zh: 归档总大小上限(MB),<= 0 禁用。
# en: Max total archive size (MB); <= 0 disables.
max_total_mb = 0
# zh: 归档清理模式:"delete"(删除最旧,默认)/"merge"(合并最旧,尽量无损)/"none"(不清理,可能无限增长)。
# en: Prune mode: "delete" (default) / "merge" (attempt lossless merge) / "none" (no cleanup).
archive_prune_mode = "delete"
# zh: MCP(Model Context Protocol)配置。
# en: MCP (Model Context Protocol) settings.
[mcp]
# zh: MCP 配置文件路径(相对于工作目录)。
# en: MCP config file path (relative to the working directory).
config_path = "config/mcp.json"
# zh: 消息工具配置。
# en: Message tool settings.
[messages]
# zh: messages.send_text_file 单文件文本发送大小上限(KB)。默认 512KB(0.5MB)。
# en: Size limit for messages.send_text_file single-text-file uploads (KB). Default 512KB (0.5MB).
send_text_file_max_size_kb = 512
# zh: messages.send_url_file URL 文件发送大小上限(MB)。默认 100MB。
# en: Size limit for messages.send_url_file URL-file uploads (MB). Default 100MB.
send_url_file_max_size_mb = 100
# zh: Bilibili 视频自动提取配置。
# en: Bilibili video auto-extraction settings.
[bilibili]
# zh: 是否启用自动提取(检测到B站视频链接/BV号时自动下载并发送)。
# en: Enable auto-extraction (auto-download and send when Bilibili video links/BV IDs are detected).
auto_extract_enabled = false
# zh: B站账号完整 Cookie 字符串(必须粘贴浏览器中的完整内容,不要只填 SESSDATA;建议至少包含 SESSDATA + buvid3 + buvid4,用于通过风控与 WBI 搜索)。
# en: Full Bilibili Cookie string (paste the complete browser cookie; do not provide SESSDATA only. Recommended to include at least SESSDATA + buvid3 + buvid4 for anti-bot checks and WBI search).
cookie = ""
# zh: 首选清晰度: 80=1080P, 64=720P, 32=480P。
# en: Preferred quality: 80=1080P, 64=720P, 32=480P.
prefer_quality = 80
# zh: 最大视频时长(秒),超过则不下载,仅发送信息卡片。0=不限。
# en: Max video duration (seconds); exceeding this sends info card only. 0=unlimited.
max_duration = 600
# zh: 最大文件大小(MB),超过则触发 oversize_strategy。0=不限。
# en: Max file size (MB); triggers oversize_strategy if exceeded. 0=unlimited.
max_file_size = 100
# zh: 超限策略: "downgrade"=降低清晰度重试, "info"=发送封面+标题+简介。
# en: Oversize strategy: "downgrade"=retry at lower quality, "info"=send cover+title+description.
oversize_strategy = "downgrade"
# zh: 自动提取功能的群聊白名单(空=跟随全局 access.allowed_group_ids)。
# en: Group allowlist for auto-extraction (empty = follow global access.allowed_group_ids).
auto_extract_group_ids = []
# zh: 自动提取功能的私聊白名单(空=跟随全局 access.allowed_private_ids)。
# en: Private chat allowlist for auto-extraction (empty = follow global access.allowed_private_ids).
auto_extract_private_ids = []
# zh: arXiv 论文自动提取配置。
# en: arXiv paper auto-extraction settings.
[arxiv]
# zh: 是否启用自动提取(检测到 arXiv 链接 / arXiv:ID / 带 arxiv 关键词的新式编号时自动发送论文信息并尽力附带 PDF)。
# en: Enable auto-extraction (auto-send paper info and best-effort PDF when arXiv links / arXiv:ID / keyword-scoped new-style IDs are detected).
auto_extract_enabled = false
# zh: 最大 PDF 文件大小(MB),超过则不上传 PDF,仅发送论文信息。0=不限。
# en: Max PDF file size (MB); exceeding this skips PDF upload and sends paper info only. 0=unlimited.
max_file_size = 100
# zh: 自动提取功能的群聊白名单(空=跟随全局 access.allowed_group_ids)。
# en: Group allowlist for auto-extraction (empty = follow global access.allowed_group_ids).
auto_extract_group_ids = []
# zh: 自动提取功能的私聊白名单(空=跟随全局 access.allowed_private_ids)。
# en: Private chat allowlist for auto-extraction (empty = follow global access.allowed_private_ids).
auto_extract_private_ids = []
# zh: 单条消息最多自动处理几篇 arXiv 论文。<=0 回退 5。
# en: Max number of arXiv papers to auto-process from one message. <=0 falls back to 5.
auto_extract_max_items = 5
# zh: 论文信息里最多展示多少位作者。<=0 回退 20。
# en: Max number of authors shown in paper info. <=0 falls back to 20.
author_preview_limit = 20
# zh: 论文信息里摘要预览的最大字符数。<=0 回退 1000。
# en: Max summary preview characters in paper info. <=0 falls back to 1000.
summary_preview_chars = 1000
# zh: Code Delivery Agent 配置(代码交付 Agent,在 Docker 容器中编写代码并打包上传)。
# en: Code Delivery Agent settings (writes code in Docker containers and delivers packaged results).
[code_delivery]
# zh: 是否启用 Code Delivery Agent。
# en: Enable Code Delivery Agent.
enabled = true
# zh: 任务根目录(相对于工作目录)。
# en: Task root directory (relative to working directory).
task_root = "data/code_delivery"
# zh: Docker 镜像名称。
# en: Docker image name.
docker_image = "ubuntu:24.04"
# zh: 容器名前缀。
# en: Container name prefix.
container_name_prefix = "code_delivery_"
# zh: 容器名后缀。
# en: Container name suffix.
container_name_suffix = "_runner"
# zh: 命令默认超时时间(秒),0 表示不限时。
# en: Default command timeout (seconds), 0 means no limit.
default_command_timeout_seconds = 0
# zh: 命令输出最大字符数。
# en: Max command output characters.
max_command_output_chars = 20000
# zh: 默认归档格式(zip 或 tar.gz)。
# en: Default archive format (zip or tar.gz).
default_archive_format = "zip"
# zh: 归档文件最大大小(MB)。
# en: Max archive file size (MB).
max_archive_size_mb = 200
# zh: 任务完成后是否清理工作区和容器。
# en: Clean up workspace and container after task completion.
cleanup_on_finish = true
# zh: 启动前是否清理残留工作区和容器。
# en: Clean up leftover workspaces and containers on startup.
cleanup_on_start = true
# zh: 单次 LLM 请求最大连续失败次数(达到后发送失败通知并终止任务)。
# en: Max consecutive LLM failures per request (sends failure notification and terminates task when reached).
llm_max_retries_per_request = 5
# zh: LLM 连续失败时是否向目标发送通知。
# en: Send notification to target on consecutive LLM failures.
notify_on_llm_failure = true
# zh: 容器内存限制(如 "2g", "512m"),留空表示不限制。
# en: Container memory limit (e.g., "2g", "512m"), empty means no limit.
container_memory_limit = ""
# zh: 容器 CPU 限制(如 "2.0", "0.5"),留空表示不限制。
# en: Container CPU limit (e.g., "2.0", "0.5"), empty means no limit.
container_cpu_limit = ""
# zh: 命令黑名单(禁止执行的命令模式列表,支持通配符)。
# en: Command blacklist (list of forbidden command patterns, supports wildcards).
command_blacklist = ["rm -rf /", ":(){ :|:& };:", "mkfs.*", "dd if=/dev/zero"]
# zh: WebUI 设置。仅在使用 `Undefined-webui` 启动 WebUI 时生效;直接运行 `Undefined` 可忽略本段。
# en: WebUI settings. Only used when starting WebUI via `Undefined-webui`; ignore this section if you run `Undefined` directly.
[webui]
# zh: WebUI 监听地址。