diff --git a/config.toml.example b/config.toml.example index b186b773..c6705d68 100644 --- a/config.toml.example +++ b/config.toml.example @@ -95,8 +95,8 @@ api_mode = "chat_completions" # zh: 是否启用 reasoning.effort。 # en: Enable reasoning.effort. reasoning_enabled = false -# zh: reasoning.effort 档位:none / minimal / low / medium / high / xhigh。 -# en: reasoning.effort level: none / minimal / low / medium / high / xhigh. +# zh: reasoning effort 档位。 +# en: reasoning effort level. reasoning_effort = "medium" # zh: 是否启用 thinking(思维链)。 # en: Enable thinking (reasoning). @@ -107,6 +107,9 @@ thinking_budget_tokens = 20000 # zh: 是否在请求中发送 budget_tokens(关闭后由提供商决定思维预算)。 # en: Whether to include budget_tokens in the request (if disabled, the provider decides the thinking budget). thinking_include_budget = true +# zh: reasoning effort 传参风格:openai(reasoning.effort)/ anthropic(output_config.effort)。 +# en: Reasoning effort wire format: openai (reasoning.effort) / anthropic (output_config.effort). +reasoning_effort_style = "openai" # zh: 思维链工具调用兼容:启用后在多轮工具调用中回传 reasoning_content,避免部分模型返回 400。 # en: Thinking tool-call compatibility: pass back reasoning_content in multi-turn tool calls to avoid 400 errors from some models. thinking_tool_call_compat = true @@ -155,8 +158,8 @@ api_mode = "chat_completions" # zh: 是否启用 reasoning.effort。 # en: Enable reasoning.effort. reasoning_enabled = false -# zh: reasoning.effort 档位:none / minimal / low / medium / high / xhigh。 -# en: reasoning.effort level: none / minimal / low / medium / high / xhigh. +# zh: reasoning effort 档位。 +# en: reasoning effort level. reasoning_effort = "medium" # zh: 是否启用 thinking(思维链)。 # en: Enable thinking (reasoning). @@ -167,6 +170,9 @@ thinking_budget_tokens = 20000 # zh: 是否在请求中发送 budget_tokens(关闭后由提供商决定思维预算)。 # en: Whether to include budget_tokens in the request (if disabled, the provider decides the thinking budget). thinking_include_budget = true +# zh: reasoning effort 传参风格:openai(reasoning.effort)/ anthropic(output_config.effort)。 +# en: Reasoning effort wire format: openai (reasoning.effort) / anthropic (output_config.effort). +reasoning_effort_style = "openai" # zh: 思维链工具调用兼容:启用后在多轮工具调用中回传 reasoning_content,避免部分模型返回 400。 # en: Thinking tool-call compatibility: pass back reasoning_content in multi-turn tool calls to avoid 400 errors from some models. thinking_tool_call_compat = true @@ -208,8 +214,8 @@ api_mode = "chat_completions" # zh: 是否启用 reasoning.effort。 # en: Enable reasoning.effort. reasoning_enabled = false -# zh: reasoning.effort 档位:none / minimal / low / medium / high / xhigh。 -# en: reasoning.effort level: none / minimal / low / medium / high / xhigh. +# zh: reasoning effort 档位。 +# en: reasoning effort level. reasoning_effort = "medium" # zh: 是否启用 thinking(思维链)。 # en: Enable thinking (reasoning). @@ -220,6 +226,9 @@ thinking_budget_tokens = 0 # zh: 是否在请求中发送 budget_tokens(关闭后由提供商决定思维预算)。 # en: Whether to include budget_tokens in the request (if disabled, the provider decides the thinking budget). thinking_include_budget = true +# zh: reasoning effort 传参风格:openai(reasoning.effort)/ anthropic(output_config.effort)。 +# en: Reasoning effort wire format: openai (reasoning.effort) / anthropic (output_config.effort). +reasoning_effort_style = "openai" # zh: 思维链工具调用兼容:启用后在多轮工具调用中回传 reasoning_content,避免部分模型返回 400。 # en: Thinking tool-call compatibility: pass back reasoning_content in multi-turn tool calls to avoid 400 errors from some models. thinking_tool_call_compat = true @@ -258,8 +267,8 @@ api_mode = "chat_completions" # zh: 是否启用 reasoning.effort。 # en: Enable reasoning.effort. reasoning_enabled = false -# zh: reasoning.effort 档位:none / minimal / low / medium / high / xhigh。 -# en: reasoning.effort level: none / minimal / low / medium / high / xhigh. +# zh: reasoning effort 档位。 +# en: reasoning effort level. reasoning_effort = "medium" # zh: 是否启用 thinking(思维链)。 # en: Enable thinking (reasoning). @@ -270,6 +279,9 @@ thinking_budget_tokens = 0 # zh: 是否在请求中发送 budget_tokens(关闭后由提供商决定思维预算)。 # en: Whether to include budget_tokens in the request (if disabled, the provider decides the thinking budget). thinking_include_budget = true +# zh: reasoning effort 传参风格:openai(reasoning.effort)/ anthropic(output_config.effort)。 +# en: Reasoning effort wire format: openai (reasoning.effort) / anthropic (output_config.effort). +reasoning_effort_style = "openai" # zh: 思维链工具调用兼容:启用后在多轮工具调用中回传 reasoning_content,避免部分模型返回 400。 # en: Thinking tool-call compatibility: pass back reasoning_content in multi-turn tool calls to avoid 400 errors from some models. thinking_tool_call_compat = true @@ -321,8 +333,8 @@ api_mode = "chat_completions" # zh: 是否启用 reasoning.effort。 # en: Enable reasoning.effort. reasoning_enabled = false -# zh: reasoning.effort 档位:none / minimal / low / medium / high / xhigh。 -# en: reasoning.effort level: none / minimal / low / medium / high / xhigh. +# zh: reasoning effort 档位。 +# en: reasoning effort level. reasoning_effort = "medium" # zh: 是否启用 thinking(思维链)。 # en: Enable thinking (reasoning). @@ -333,6 +345,9 @@ thinking_budget_tokens = 0 # zh: 是否在请求中发送 budget_tokens(关闭后由提供商决定思维预算)。 # en: Whether to include budget_tokens in the request (if disabled, the provider decides the thinking budget). thinking_include_budget = true +# zh: reasoning effort 传参风格:openai(reasoning.effort)/ anthropic(output_config.effort)。 +# en: Reasoning effort wire format: openai (reasoning.effort) / anthropic (output_config.effort). +reasoning_effort_style = "openai" # zh: 思维链工具调用兼容:启用后在多轮工具调用中回传 reasoning_content,避免部分模型返回 400。 # en: Thinking tool-call compatibility: pass back reasoning_content in multi-turn tool calls to avoid 400 errors from some models. thinking_tool_call_compat = true diff --git a/scripts/sync_config_template.py b/scripts/sync_config_template.py index 93cff1bb..e4320012 100755 --- a/scripts/sync_config_template.py +++ b/scripts/sync_config_template.py @@ -39,16 +39,49 @@ def build_parser() -> argparse.ArgumentParser: action="store_true", help="将同步后的完整 TOML 输出到标准输出。", ) + parser.add_argument( + "--prune", + action="store_true", + help="删除存在于 config.toml 但不存在于 config.toml.example 中的配置项(危险操作,需二次确认)。", + ) return parser +def _confirm_prune(removed_paths: list[str]) -> bool: + """显示即将删除的路径并请求用户二次确认。""" + print( + "\n\033[1;31m[sync-config] ⚠ 危险操作:以下配置项不存在于模板中,将被永久删除:\033[0m" + ) + for path in removed_paths: + print(f" \033[31m- {path}\033[0m") + print() + try: + answer = input( + "\033[1;33m确认删除以上配置项?此操作不可撤销。输入 yes 确认: \033[0m" + ) + except (EOFError, KeyboardInterrupt): + print() + return False + return answer.strip().lower() == "yes" + + +def _initial_action_label(*, dry_run: bool, prune: bool) -> str: + if dry_run: + return "预览完成" + if prune: + return "分析完成" + return "同步完成" + + def main() -> int: args = build_parser().parse_args() + + # 第一轮:不带 prune 的常规同步(或 dry-run 预览) try: result = sync_config_file( config_path=args.config, example_path=args.example, - write=not args.dry_run, + write=not args.dry_run and not args.prune, ) except FileNotFoundError as exc: print(f"[sync-config] 未找到示例配置:{exc}", file=sys.stderr) @@ -57,12 +90,53 @@ def main() -> int: print(f"[sync-config] 配置解析失败:{exc}", file=sys.stderr) return 1 - action = "预览完成" if args.dry_run else "同步完成" + action = _initial_action_label(dry_run=args.dry_run, prune=args.prune) print(f"[sync-config] {action}: {args.config}") print(f"[sync-config] 新增路径数量: {len(result.added_paths)}") for path in result.added_paths: print(f" + {path}") + if result.removed_paths: + print(f"[sync-config] 多余路径数量: {len(result.removed_paths)}") + for path in result.removed_paths: + print(f" - {path}") + + # --prune 流程:确认后带 prune 重新同步 + if args.prune and result.removed_paths: + if args.dry_run: + print("\n[sync-config] --dry-run 模式,跳过删除。") + elif _confirm_prune(result.removed_paths): + result = sync_config_file( + config_path=args.config, + example_path=args.example, + write=True, + prune=True, + ) + print( + f"\033[1;32m[sync-config] 已删除 {len(result.removed_paths)} 个多余配置项并写回文件。\033[0m" + ) + else: + # 用户取消 prune,仍执行不带 prune 的常规同步 + sync_config_file( + config_path=args.config, + example_path=args.example, + write=True, + prune=False, + ) + print("[sync-config] 已取消删除,仅执行常规同步。") + elif args.prune and not result.removed_paths: + if not args.dry_run: + # 无多余项但仍需写入常规同步结果 + sync_config_file( + config_path=args.config, + example_path=args.example, + write=True, + prune=False, + ) + print("[sync-config] 无多余配置项需要删除,已完成常规同步。") + else: + print("[sync-config] 无多余配置项需要删除。") + if args.stdout: print("\n--- merged config.toml ---\n") print(result.content, end="") diff --git a/src/Undefined/ai/llm.py b/src/Undefined/ai/llm.py index 41d93b61..931f13bc 100644 --- a/src/Undefined/ai/llm.py +++ b/src/Undefined/ai/llm.py @@ -25,7 +25,9 @@ API_MODE_RESPONSES, build_responses_request_body, get_api_mode, - get_reasoning_payload, + get_effort_payload, + get_effort_style, + get_thinking_payload, normalize_responses_result, ) from Undefined.ai.retrieval import RetrievalRequester @@ -117,8 +119,10 @@ "tool_choice", "stream", "stream_options", + "thinking", "reasoning", "reasoning_effort", + "output_config", } ) | _SDK_REQUEST_OPTION_FIELDS @@ -139,6 +143,7 @@ "thinking", "reasoning", "reasoning_effort", + "output_config", } ) | _SDK_REQUEST_OPTION_FIELDS @@ -870,6 +875,8 @@ def _build_effective_request_kwargs( getattr(model_config, "request_params", {}), overrides, ) + thinking_override = overrides["thinking"] if "thinking" in overrides else None + has_thinking_override = "thinking" in overrides reserved_fields = ( _RESPONSES_RESERVED_FIELDS if get_api_mode(model_config) == API_MODE_RESPONSES @@ -879,11 +886,15 @@ def _build_effective_request_kwargs( merged, reserved_fields, ) + if has_thinking_override: + ignored.pop("thinking", None) _warn_ignored_request_params( call_type=call_type, model_name=model_config.model_name, ignored=ignored, ) + if has_thinking_override: + allowed["thinking"] = thinking_override return allowed @@ -1447,12 +1458,20 @@ def build_request_body( """构建 API 请求体。""" api_mode = get_api_mode(model_config) extra_kwargs: dict[str, Any] = dict(kwargs) - reasoning_payload = get_reasoning_payload(model_config) + + if "thinking" in extra_kwargs: + normalized = _normalize_thinking_override( + extra_kwargs.get("thinking"), model_config + ) + if normalized is None: + extra_kwargs.pop("thinking", None) + else: + extra_kwargs["thinking"] = normalized if api_mode == API_MODE_RESPONSES: - extra_kwargs.pop("thinking", None) extra_kwargs.pop("reasoning", None) extra_kwargs.pop("reasoning_effort", None) + extra_kwargs.pop("output_config", None) return build_responses_request_body( model_config, messages, @@ -1470,28 +1489,21 @@ def build_request_body( "max_tokens": max_tokens, } - if "thinking" in extra_kwargs: - normalized = _normalize_thinking_override( - extra_kwargs.get("thinking"), model_config - ) - if normalized is None: - extra_kwargs.pop("thinking", None) - else: - extra_kwargs["thinking"] = normalized - extra_kwargs.pop("reasoning", None) extra_kwargs.pop("reasoning_effort", None) + extra_kwargs.pop("output_config", None) - if getattr(model_config, "thinking_enabled", False): - thinking_param: dict[str, Any] = {"type": "enabled"} - if getattr(model_config, "thinking_include_budget", True): - thinking_param["budget_tokens"] = getattr( - model_config, "thinking_budget_tokens", 0 - ) - body["thinking"] = thinking_param + thinking = get_thinking_payload(model_config) + if thinking is not None: + body["thinking"] = thinking - if reasoning_payload is not None: - body["reasoning"] = reasoning_payload + effort_payload = get_effort_payload(model_config) + if effort_payload is not None: + style = get_effort_style(model_config) + if style == "anthropic": + body["output_config"] = effort_payload + else: + body["reasoning"] = effort_payload if tools: body["tools"] = tools diff --git a/src/Undefined/ai/transports/__init__.py b/src/Undefined/ai/transports/__init__.py index 9f33ccf8..74f527fc 100644 --- a/src/Undefined/ai/transports/__init__.py +++ b/src/Undefined/ai/transports/__init__.py @@ -5,7 +5,10 @@ API_MODE_RESPONSES, build_responses_request_body, get_api_mode, + get_effort_payload, + get_effort_style, get_reasoning_payload, + get_thinking_payload, normalize_api_mode, normalize_reasoning_effort, normalize_responses_result, @@ -16,7 +19,10 @@ "API_MODE_RESPONSES", "build_responses_request_body", "get_api_mode", + "get_effort_payload", + "get_effort_style", "get_reasoning_payload", + "get_thinking_payload", "normalize_api_mode", "normalize_reasoning_effort", "normalize_responses_result", diff --git a/src/Undefined/ai/transports/openai_transport.py b/src/Undefined/ai/transports/openai_transport.py index 5c6d7e7e..c5ab32a2 100644 --- a/src/Undefined/ai/transports/openai_transport.py +++ b/src/Undefined/ai/transports/openai_transport.py @@ -6,7 +6,6 @@ API_MODE_CHAT_COMPLETIONS = "chat_completions" API_MODE_RESPONSES = "responses" _VALID_API_MODES = {API_MODE_CHAT_COMPLETIONS, API_MODE_RESPONSES} -_VALID_REASONING_EFFORTS = {"none", "minimal", "low", "medium", "high", "xhigh"} def normalize_api_mode(value: Any, default: str = API_MODE_CHAT_COMPLETIONS) -> str: @@ -23,13 +22,28 @@ def get_api_mode(model_config: Any) -> str: def normalize_reasoning_effort(value: Any, default: str = "medium") -> str: - text = str(value or default).strip().lower() - if text not in _VALID_REASONING_EFFORTS: - return default - return text + return str(value or default).strip().lower() def get_reasoning_payload(model_config: Any) -> dict[str, Any] | None: + return get_effort_payload(model_config) + + +_VALID_REASONING_EFFORT_STYLES = {"openai", "anthropic"} + + +def get_thinking_payload(model_config: Any) -> dict[str, Any] | None: + """构建 thinking 请求参数,仅由 thinking_* 配置控制。""" + if not bool(getattr(model_config, "thinking_enabled", False)): + return None + param: dict[str, Any] = {"type": "enabled"} + if bool(getattr(model_config, "thinking_include_budget", True)): + param["budget_tokens"] = int(getattr(model_config, "thinking_budget_tokens", 0)) + return param + + +def get_effort_payload(model_config: Any) -> dict[str, Any] | None: + """构建 effort 请求参数(仅在 reasoning_enabled 启用时生效)。""" if not bool(getattr(model_config, "reasoning_enabled", False)): return None return { @@ -39,6 +53,15 @@ def get_reasoning_payload(model_config: Any) -> dict[str, Any] | None: } +def get_effort_style(model_config: Any) -> str: + style = ( + str(getattr(model_config, "reasoning_effort_style", "openai") or "openai") + .strip() + .lower() + ) + return style if style in _VALID_REASONING_EFFORT_STYLES else "openai" + + def _stringify_content(value: Any) -> str: if value is None: return "" @@ -334,9 +357,16 @@ def build_responses_request_body( "model": getattr(model_config, "model_name"), "max_output_tokens": max_tokens, } - reasoning = get_reasoning_payload(model_config) - if reasoning is not None: - body["reasoning"] = reasoning + thinking = get_thinking_payload(model_config) + effort_payload = get_effort_payload(model_config) + if effort_payload is not None: + style = get_effort_style(model_config) + if style == "anthropic": + body["output_config"] = effort_payload + else: + body["reasoning"] = effort_payload + if thinking is not None: + body["thinking"] = thinking if tools: normalized_tools = _normalize_responses_tools(tools, internal_to_api) normalized_tool_choice, selected_tool_name = _normalize_responses_tool_choice( diff --git a/src/Undefined/config/loader.py b/src/Undefined/config/loader.py index 2d36a049..7ff8f5f2 100644 --- a/src/Undefined/config/loader.py +++ b/src/Undefined/config/loader.py @@ -257,7 +257,14 @@ def _get_value( _VALID_API_MODES = {"chat_completions", "responses"} -_VALID_REASONING_EFFORTS = {"none", "minimal", "low", "medium", "high", "xhigh"} +_VALID_REASONING_EFFORT_STYLES = {"openai", "anthropic"} + + +def _resolve_reasoning_effort_style(value: Any, default: str = "openai") -> str: + style = _coerce_str(value, default).strip().lower() + if style not in _VALID_REASONING_EFFORT_STYLES: + return default + return style def _resolve_thinking_compat_flags( @@ -311,10 +318,7 @@ def _resolve_api_mode( def _resolve_reasoning_effort(value: Any, default: str = "medium") -> str: - effort = _coerce_str(value, default).strip().lower() - if effort not in _VALID_REASONING_EFFORTS: - return default - return effort + return _coerce_str(value, default).strip().lower() def _resolve_responses_tool_choice_compat( @@ -1599,6 +1603,10 @@ def _parse_model_pool( item.get("thinking_include_budget"), primary_config.thinking_include_budget, ), + reasoning_effort_style=_resolve_reasoning_effort_style( + item.get("reasoning_effort_style"), + primary_config.reasoning_effort_style, + ), thinking_tool_call_compat=_coerce_bool( item.get("thinking_tool_call_compat"), primary_config.thinking_tool_call_compat, @@ -1785,6 +1793,13 @@ def _parse_chat_model_config(data: dict[str, Any]) -> ChatModelConfig: 20000, ), thinking_include_budget=thinking_include_budget, + reasoning_effort_style=_resolve_reasoning_effort_style( + _get_value( + data, + ("models", "chat", "reasoning_effort_style"), + "CHAT_MODEL_REASONING_EFFORT_STYLE", + ), + ), thinking_tool_call_compat=thinking_tool_call_compat, responses_tool_choice_compat=responses_tool_choice_compat, responses_force_stateless_replay=responses_force_stateless_replay, @@ -1877,6 +1892,13 @@ def _parse_vision_model_config(data: dict[str, Any]) -> VisionModelConfig: 20000, ), thinking_include_budget=thinking_include_budget, + reasoning_effort_style=_resolve_reasoning_effort_style( + _get_value( + data, + ("models", "vision", "reasoning_effort_style"), + "VISION_MODEL_REASONING_EFFORT_STYLE", + ), + ), thinking_tool_call_compat=thinking_tool_call_compat, responses_tool_choice_compat=responses_tool_choice_compat, responses_force_stateless_replay=responses_force_stateless_replay, @@ -1983,6 +2005,13 @@ def _parse_security_model_config( 0, ), thinking_include_budget=thinking_include_budget, + reasoning_effort_style=_resolve_reasoning_effort_style( + _get_value( + data, + ("models", "security", "reasoning_effort_style"), + "SECURITY_MODEL_REASONING_EFFORT_STYLE", + ), + ), thinking_tool_call_compat=thinking_tool_call_compat, responses_tool_choice_compat=responses_tool_choice_compat, responses_force_stateless_replay=responses_force_stateless_replay, @@ -2002,6 +2031,7 @@ def _parse_security_model_config( thinking_enabled=False, thinking_budget_tokens=0, thinking_include_budget=True, + reasoning_effort_style="openai", thinking_tool_call_compat=chat_model.thinking_tool_call_compat, responses_tool_choice_compat=chat_model.responses_tool_choice_compat, responses_force_stateless_replay=chat_model.responses_force_stateless_replay, @@ -2092,6 +2122,13 @@ def _parse_agent_model_config(data: dict[str, Any]) -> AgentModelConfig: 0, ), thinking_include_budget=thinking_include_budget, + reasoning_effort_style=_resolve_reasoning_effort_style( + _get_value( + data, + ("models", "agent", "reasoning_effort_style"), + "AGENT_MODEL_REASONING_EFFORT_STYLE", + ), + ), thinking_tool_call_compat=thinking_tool_call_compat, responses_tool_choice_compat=responses_tool_choice_compat, responses_force_stateless_replay=responses_force_stateless_replay, @@ -2268,6 +2305,14 @@ def _parse_historian_model_config( h.get("thinking_budget_tokens"), fallback.thinking_budget_tokens ), thinking_include_budget=thinking_include_budget, + reasoning_effort_style=_resolve_reasoning_effort_style( + _get_value( + {"models": {"historian": h}}, + ("models", "historian", "reasoning_effort_style"), + "HISTORIAN_MODEL_REASONING_EFFORT_STYLE", + ), + fallback.reasoning_effort_style, + ), thinking_tool_call_compat=thinking_tool_call_compat, responses_tool_choice_compat=responses_tool_choice_compat, responses_force_stateless_replay=responses_force_stateless_replay, diff --git a/src/Undefined/config/models.py b/src/Undefined/config/models.py index 9454a9b9..3b073d11 100644 --- a/src/Undefined/config/models.py +++ b/src/Undefined/config/models.py @@ -38,6 +38,7 @@ class ModelPoolEntry: thinking_enabled: bool = False thinking_budget_tokens: int = 0 thinking_include_budget: bool = True + reasoning_effort_style: str = "openai" # effort 传参风格:openai / anthropic thinking_tool_call_compat: bool = True responses_tool_choice_compat: bool = False responses_force_stateless_replay: bool = False @@ -68,6 +69,7 @@ class ChatModelConfig: thinking_enabled: bool = False # 是否启用 thinking thinking_budget_tokens: int = 20000 # 思维预算 token 数量 thinking_include_budget: bool = True # 是否在请求中发送 budget_tokens + reasoning_effort_style: str = "openai" # effort 传参风格:openai / anthropic thinking_tool_call_compat: bool = ( True # 思维链 + 工具调用兼容(回传 reasoning_content) ) @@ -93,6 +95,7 @@ class VisionModelConfig: thinking_enabled: bool = False # 是否启用 thinking thinking_budget_tokens: int = 20000 # 思维预算 token 数量 thinking_include_budget: bool = True # 是否在请求中发送 budget_tokens + reasoning_effort_style: str = "openai" # effort 传参风格:openai / anthropic thinking_tool_call_compat: bool = ( True # 思维链 + 工具调用兼容(回传 reasoning_content) ) @@ -118,6 +121,7 @@ class SecurityModelConfig: thinking_enabled: bool = False # 是否启用 thinking thinking_budget_tokens: int = 0 # 思维预算 token 数量 thinking_include_budget: bool = True # 是否在请求中发送 budget_tokens + reasoning_effort_style: str = "openai" # effort 传参风格:openai / anthropic thinking_tool_call_compat: bool = ( True # 思维链 + 工具调用兼容(回传 reasoning_content) ) @@ -169,6 +173,7 @@ class AgentModelConfig: thinking_enabled: bool = False # 是否启用 thinking thinking_budget_tokens: int = 0 # 思维预算 token 数量 thinking_include_budget: bool = True # 是否在请求中发送 budget_tokens + reasoning_effort_style: str = "openai" # effort 传参风格:openai / anthropic thinking_tool_call_compat: bool = ( True # 思维链 + 工具调用兼容(回传 reasoning_content) ) diff --git a/src/Undefined/skills/agents/README.md b/src/Undefined/skills/agents/README.md index 8f46a53a..81f7d794 100644 --- a/src/Undefined/skills/agents/README.md +++ b/src/Undefined/skills/agents/README.md @@ -44,8 +44,8 @@ responses_force_stateless_replay = false ``` 说明: -- `api_mode = "chat_completions"` 时,旧 `thinking_*` 仍按原逻辑生效;若开启 `reasoning_enabled`,也会额外发送 `reasoning.effort`。 -- `api_mode = "responses"` 时,Agent 的多轮工具调用默认使用 `previous_response_id + function_call_output` 续轮;若开启 `responses_force_stateless_replay`,则会始终改为完整消息重放;旧 `thinking_*` 不会发到 `responses`。 +- `api_mode = "chat_completions"` 时,`thinking_*` 仍按原逻辑生效;若开启 `reasoning_enabled`,会额外发送 `reasoning.effort`。 +- `api_mode = "responses"` 时,`thinking_*` 与 `reasoning_*` 分别独立控制 `thinking` 和 `reasoning.effort` / `output_config.effort`;Agent 的多轮工具调用默认使用 `previous_response_id + function_call_output` 续轮;若开启 `responses_force_stateless_replay`,则会始终改为完整消息重放。 - `thinking_tool_call_compat` 默认 `true`,会把 `reasoning_content` 回填到本地消息历史,便于日志、回放和兼容读取。 兼容的环境变量(会覆盖 `config.toml`): diff --git a/src/Undefined/skills/commands/naga/README.md b/src/Undefined/skills/commands/naga/README.md new file mode 100644 index 00000000..a7c285a5 --- /dev/null +++ b/src/Undefined/skills/commands/naga/README.md @@ -0,0 +1,66 @@ +# /naga 命令说明 + +## 这是什么? + +NagaAgent 是一个可以接入 Undefined 的外部 AI 助手。 +通过 `/naga` 命令,你可以把自己的 NagaAgent 绑定到 QQ 群,绑定之后,NagaAgent 里的特定功能就可以向你发送消息。 + +## 普通用户 + +普通用户只需要用到一个子命令:`bind`(绑定)。 + +### 如何绑定? + +1. 在**群聊**中发送:`/naga bind <你的naga_id>` +2. 系统会提示"申请已提交,等待超管审核" +3. 超级管理员审核通过后,你会收到私聊通知 +4. 绑定完成!你的 NagaAgent 即可开始使用 + +### 注意事项 + +- `naga_id` 是你在 NagaAgent 中设置的标识,不是 QQ 号 +- 每个 `naga_id` 只能绑定一次,不能重复申请 +- 如果已在审核队列中,无需重复提交 + +## 管理员命令(仅超级管理员) + +以下命令仅超级管理员可使用,用于管理所有绑定: + +| 子命令 | 用法 | 说明 | +|--------|------|------| +| approve | `/naga approve ` | 通过绑定申请,系统会自动生成 Token 并通知申请人 | +| reject | `/naga reject ` | 拒绝绑定申请,申请人会收到私聊通知 | +| revoke | `/naga revoke ` | 吊销已有绑定,该 NagaAgent 将无法继续使用 | +| list | `/naga list` | 查看所有活跃的绑定(含使用次数) | +| pending | `/naga pending` | 查看等待审核的申请列表 | +| info | `/naga info ` | 查看指定绑定的详细信息(Token、使用次数、创建时间等) | + +## 完整示例 + +``` +# 普通用户:在群聊中提交绑定申请 +/naga bind my-naga-001 + +# 超级管理员:查看待审核列表 +/naga pending + +# 超级管理员:通过申请 +/naga approve my-naga-001 + +# 超级管理员:查看绑定详情 +/naga info my-naga-001 + +# 超级管理员:吊销绑定 +/naga revoke my-naga-001 +``` + +## 常见问题 + +**Q: 提示"Naga 集成未启用"?** +A: 请联系管理员开启相关配置开关。 + +**Q: 在群里发了 /naga bind 没有任何反应?** +A: 该群可能不在白名单中,请联系管理员添加。 + +**Q: 绑定通过后 NagaAgent 怎么用?** +A: 请参考 NagaAgent 相关文档,填入QQ号以及其他几个参数即可完成对接。 \ No newline at end of file diff --git a/src/Undefined/skills/commands/naga/config.json b/src/Undefined/skills/commands/naga/config.json index 5086e70f..2f840a79 100644 --- a/src/Undefined/skills/commands/naga/config.json +++ b/src/Undefined/skills/commands/naga/config.json @@ -1,7 +1,8 @@ { "name": "naga", - "description": "NagaAgent 集成管理", - "usage": "/naga [参数]", + "description": "NagaAgent 联动命令", + "usage": "/naga <子命令> [参数]", + "example": "/naga bind my-naga", "permission": "public", "allow_in_private": true, "show_in_help": true, diff --git a/src/Undefined/webui/routes/_config.py b/src/Undefined/webui/routes/_config.py index a284d807..0afeaa01 100644 --- a/src/Undefined/webui/routes/_config.py +++ b/src/Undefined/webui/routes/_config.py @@ -1,3 +1,4 @@ +import asyncio import tomllib from pathlib import Path from tempfile import NamedTemporaryFile @@ -151,16 +152,26 @@ async def config_patch_handler(request: web.Request) -> Response: async def sync_config_template_handler(request: web.Request) -> Response: if not check_auth(request): return web.json_response({"error": "Unauthorized"}, status=401) + prune = request.query.get("prune") == "true" + write = request.query.get("write", "true").lower() != "false" try: - result = sync_config_file() - get_config_manager().reload() - validation_ok, validation_msg = validate_required_config() + result = await asyncio.to_thread(sync_config_file, prune=prune, write=write) + validation_ok = True + validation_msg: str | None = None + if write: + await asyncio.to_thread(get_config_manager().reload) + validation_ok, validation_msg = await asyncio.to_thread( + validate_required_config + ) return web.json_response( { "success": True, "message": "Synced", + "preview": not write, "added_paths": result.added_paths, "added_count": len(result.added_paths), + "removed_paths": result.removed_paths, + "removed_count": len(result.removed_paths), "warning": None if validation_ok else validation_msg, } ) diff --git a/src/Undefined/webui/static/js/config-form.js b/src/Undefined/webui/static/js/config-form.js index 328b6cf6..aafb0048 100644 --- a/src/Undefined/webui/static/js/config-form.js +++ b/src/Undefined/webui/static/js/config-form.js @@ -251,7 +251,7 @@ function isLongText(value) { const FIELD_SELECT_OPTIONS = { api_mode: ["chat_completions", "responses"], - reasoning_effort: ["none", "minimal", "low", "medium", "high", "xhigh"], + reasoning_effort_style: ["openai", "anthropic"], }; function getFieldSelectOptions(path) { @@ -905,6 +905,14 @@ function buildAotTemplate(path, arr) { ) { template.reasoning_effort = "medium"; } + if ( + !Object.prototype.hasOwnProperty.call( + template, + "reasoning_effort_style", + ) + ) { + template.reasoning_effort_style = "openai"; + } } return template; } @@ -914,6 +922,7 @@ function buildAotTemplate(path, arr) { api_key: "", api_mode: "chat_completions", thinking_tool_call_compat: true, + reasoning_effort_style: "openai", responses_tool_choice_compat: false, responses_force_stateless_replay: false, reasoning_enabled: false, @@ -1058,30 +1067,74 @@ async function syncConfigTemplate(button) { setButtonLoading(button, true); showSaveStatus("saving", t("config.syncing")); try { - const res = await api("/api/config/sync-template", { method: "POST" }); - const data = await res.json(); - if (!data.success) { + const previewRes = await api("/api/config/sync-template?write=false", { + method: "POST", + }); + const previewData = await previewRes.json(); + if (!previewData.success) { showSaveStatus("error", t("config.save_error")); showToast( - `${t("common.error")}: ${data.error || t("config.sync_error")}`, + `${t("common.error")}: ${previewData.error || t("config.sync_error")}`, "error", 5000, ); return; } + + let shouldPrune = false; + if ( + Array.isArray(previewData.removed_paths) && + previewData.removed_paths.length > 0 + ) { + const listing = previewData.removed_paths + .map((p) => ` - ${p}`) + .join("\n"); + shouldPrune = confirm( + `${t("config.prune_confirm")}\n\n${listing}\n\n${t("config.prune_confirm_action")}`, + ); + } + + const finalUrl = shouldPrune + ? "/api/config/sync-template?prune=true" + : "/api/config/sync-template"; + const finalRes = await api(finalUrl, { method: "POST" }); + const finalData = await finalRes.json(); + if (!finalData.success) { + showSaveStatus("error", t("config.save_error")); + showToast( + `${t("common.error")}: ${finalData.error || t("config.sync_error")}`, + "error", + 5000, + ); + return; + } + await loadConfig(); showSaveStatus("saved", t("config.saved")); - if (data.warning) { + if (finalData.warning) { showToast( - `${t("common.warning")}: ${data.warning}`, + `${t("common.warning")}: ${finalData.warning}`, "warning", 5000, ); } - const suffix = Number.isFinite(data.added_count) - ? ` (+${data.added_count})` - : ""; - showToast(`${t("config.sync_success")}${suffix}`, "info", 4000); + if (shouldPrune) { + const removedCount = Number.isFinite(finalData.removed_count) + ? finalData.removed_count + : Array.isArray(finalData.removed_paths) + ? finalData.removed_paths.length + : 0; + showToast( + `${t("config.prune_success")} (-${removedCount})`, + "info", + 4000, + ); + } else { + const suffix = Number.isFinite(finalData.added_count) + ? ` (+${finalData.added_count})` + : ""; + showToast(`${t("config.sync_success")}${suffix}`, "info", 4000); + } } catch (e) { showSaveStatus("error", t("config.sync_error")); showToast(`${t("common.error")}: ${e.message}`, "error", 5000); diff --git a/src/Undefined/webui/static/js/i18n.js b/src/Undefined/webui/static/js/i18n.js index 2959eec8..b6027322 100644 --- a/src/Undefined/webui/static/js/i18n.js +++ b/src/Undefined/webui/static/js/i18n.js @@ -79,6 +79,9 @@ const I18N = { "config.syncing": "同步模板中...", "config.sync_success": "模板同步完成", "config.sync_error": "模板同步失败", + "config.prune_confirm": "以下配置项不存在于模板中,是否删除?", + "config.prune_confirm_action": "点击「确定」将永久删除以上配置项。", + "config.prune_success": "已清理多余配置项", "config.search_placeholder": "搜索配置...", "config.clear_search": "清除搜索", "config.expand_all": "全部展开", @@ -307,6 +310,11 @@ const I18N = { "config.syncing": "Syncing template...", "config.sync_success": "Template sync completed", "config.sync_error": "Template sync failed", + "config.prune_confirm": + "The following config keys do not exist in the template. Delete them?", + "config.prune_confirm_action": + "Click OK to permanently remove the listed keys.", + "config.prune_success": "Removed obsolete config keys", "config.search_placeholder": "Search config...", "config.clear_search": "Clear search", "config.expand_all": "Expand all", diff --git a/src/Undefined/webui/utils/config_sync.py b/src/Undefined/webui/utils/config_sync.py index 61a88626..46c16eef 100644 --- a/src/Undefined/webui/utils/config_sync.py +++ b/src/Undefined/webui/utils/config_sync.py @@ -12,11 +12,14 @@ from .config_io import CONFIG_EXAMPLE_PATH, _resolve_config_example_path from .toml_render import TomlData, merge_defaults, render_toml +_PASSTHROUGH_KEYS = {"request_params"} + @dataclass(frozen=True) class ConfigTemplateSyncResult: content: str added_paths: list[str] + removed_paths: list[str] comments: CommentMap @@ -64,6 +67,73 @@ def _collect_added_paths( return added +def _collect_removed_paths( + defaults: TomlData, current: TomlData, prefix: str = "" +) -> list[str]: + """收集存在于 current 但不在 defaults 中的路径(与 _collect_added_paths 互逆)。""" + removed: list[str] = [] + for key, current_value in current.items(): + path = f"{prefix}.{key}" if prefix else key + if key not in defaults: + removed.append(path) + continue + default_value = defaults[key] + if isinstance(current_value, dict) and isinstance(default_value, dict): + if _should_skip_passthrough_recursion(key, default_value): + continue + removed.extend(_collect_removed_paths(default_value, current_value, path)) + elif _is_array_of_tables(current_value) and _is_array_of_tables(default_value): + template_item = default_value[0] + for index, current_item in enumerate(current_value): + default_item = ( + default_value[index] + if index < len(default_value) + else template_item + ) + removed.extend( + _collect_removed_paths( + default_item, + current_item, + f"{path}[{index}]", + ) + ) + return removed + + +def _prune_to_template( + data: TomlData, template: TomlData, prefix: str = "" +) -> TomlData: + """递归移除 data 中不存在于 template 的键。""" + pruned: TomlData = {} + for key, value in data.items(): + path = f"{prefix}.{key}" if prefix else key + if key not in template: + continue + template_value = template[key] + if isinstance(value, dict) and isinstance(template_value, dict): + if _should_skip_passthrough_recursion(key, template_value): + pruned[key] = value + else: + pruned[key] = _prune_to_template(value, template_value, path) + elif _is_array_of_tables(value) and _is_array_of_tables(template_value): + tpl_item = template_value[0] + pruned[key] = [ + _prune_to_template( + item, + template_value[idx] if idx < len(template_value) else tpl_item, + f"{path}[{idx}]", + ) + for idx, item in enumerate(value) + ] + else: + pruned[key] = value + return pruned + + +def _should_skip_passthrough_recursion(key: str, template_value: TomlData) -> bool: + return not template_value and key in _PASSTHROUGH_KEYS + + def _is_array_of_tables(value: Any) -> bool: return ( isinstance(value, list) @@ -163,12 +233,20 @@ def _merge_comment_maps(current: CommentMap, example: CommentMap) -> CommentMap: return merged -def sync_config_text(current_text: str, example_text: str) -> ConfigTemplateSyncResult: +def sync_config_text( + current_text: str, + example_text: str, + *, + prune: bool = False, +) -> ConfigTemplateSyncResult: current_data = _parse_toml_text(current_text, label="current config") example_data = _parse_toml_text(example_text, label="config example") prepared_example_data = _prepare_pool_model_templates(example_data, current_data) added_paths = _collect_added_paths(prepared_example_data, current_data) + removed_paths = _collect_removed_paths(prepared_example_data, current_data) merged = merge_defaults(prepared_example_data, current_data) + if prune and removed_paths: + merged = _prune_to_template(merged, prepared_example_data) example_comments = parse_comment_map_text(example_text) comments = _merge_comment_maps( parse_comment_map_text(current_text), @@ -178,6 +256,7 @@ def sync_config_text(current_text: str, example_text: str) -> ConfigTemplateSync return ConfigTemplateSyncResult( content=content, added_paths=added_paths, + removed_paths=removed_paths, comments=comments, ) @@ -187,6 +266,7 @@ def sync_config_file( example_path: Path = CONFIG_EXAMPLE_PATH, *, write: bool = True, + prune: bool = False, ) -> ConfigTemplateSyncResult: resolved_example = _resolve_config_example_path(example_path) if resolved_example is None or not resolved_example.exists(): @@ -196,7 +276,7 @@ def sync_config_file( config_path.read_text(encoding="utf-8") if config_path.exists() else "" ) example_text = resolved_example.read_text(encoding="utf-8") - result = sync_config_text(current_text, example_text) + result = sync_config_text(current_text, example_text, prune=prune) if write: config_path.write_text(result.content, encoding="utf-8") return result diff --git a/tests/test_config_template_sync.py b/tests/test_config_template_sync.py index e57c2189..c26e107b 100644 --- a/tests/test_config_template_sync.py +++ b/tests/test_config_template_sync.py @@ -115,3 +115,74 @@ def test_sync_config_text_merges_new_fields_into_existing_pool_model_entries() - assert model["request_params"]["temperature"] == 0.2 assert "models.chat.pool.models[0].api_mode" in result.added_paths assert "models.chat.pool.models[0].request_params" in result.added_paths + + +def test_sync_config_text_prune_preserves_passthrough_request_params() -> None: + current = """ +[models.chat] +api_url = "https://primary.example/v1" +api_key = "primary-key" +model_name = "primary-model" + +[models.chat.request_params] +temperature = 0.2 + +[models.chat.request_params.metadata] +tier = "gold" + +[[models.chat.request_params.tags]] +name = "alpha" + +[models.chat.extra] +flag = true +""" + example = """ +[models.chat] +api_url = "" +api_key = "" +model_name = "" + +[models.chat.request_params] +""" + + result = sync_config_text(current, example, prune=True) + parsed = tomllib.loads(result.content) + request_params = parsed["models"]["chat"]["request_params"] + + assert result.removed_paths == ["models.chat.extra"] + assert request_params["temperature"] == 0.2 + assert request_params["metadata"]["tier"] == "gold" + assert request_params["tags"][0]["name"] == "alpha" + assert "extra" not in parsed["models"]["chat"] + + +def test_sync_config_text_prune_only_preserves_exact_passthrough_keys() -> None: + current = """ +[models.chat] +api_url = "https://primary.example/v1" +api_key = "primary-key" +model_name = "primary-model" + +[models.chat.request_params] +temperature = 0.2 + +[models.chat.custom_request_params] +temperature = 0.8 +""" + example = """ +[models.chat] +api_url = "" +api_key = "" +model_name = "" + +[models.chat.request_params] + +[models.chat.custom_request_params] +""" + + result = sync_config_text(current, example, prune=True) + parsed = tomllib.loads(result.content) + + assert result.removed_paths == ["models.chat.custom_request_params.temperature"] + assert parsed["models"]["chat"]["request_params"]["temperature"] == 0.2 + assert parsed["models"]["chat"]["custom_request_params"] == {} diff --git a/tests/test_llm_request_params.py b/tests/test_llm_request_params.py index 3c2cb7b5..2d8e68ec 100644 --- a/tests/test_llm_request_params.py +++ b/tests/test_llm_request_params.py @@ -193,7 +193,6 @@ async def test_responses_request_normalizes_tool_calls_and_usage() -> None: } ], tool_choice=cast(Any, {"type": "function", "function": {"name": "lookup"}}), - thinking={"enabled": False, "budget_tokens": 0}, ) assert fake_client.responses.last_kwargs is not None @@ -235,6 +234,61 @@ async def test_responses_request_normalizes_tool_calls_and_usage() -> None: "tool_result_start_index": 2, } + await requester._http_client.aclose() + + +@pytest.mark.asyncio +async def test_responses_request_respects_explicit_thinking_override() -> None: + requester = ModelRequester( + http_client=httpx.AsyncClient(), + token_usage_storage=cast(TokenUsageStorage, _FakeUsageStorage()), + ) + fake_client = _FakeClient( + responses=[ + { + "id": "resp_1", + "output": [ + { + "type": "message", + "role": "assistant", + "content": [{"type": "output_text", "text": "hi"}], + }, + ], + "usage": {"input_tokens": 5, "output_tokens": 3, "total_tokens": 8}, + } + ] + ) + setattr( + requester, + "_get_openai_client_for_model", + lambda _cfg: cast(AsyncOpenAI, fake_client), + ) + cfg = ChatModelConfig( + api_url="https://api.openai.com/v1", + api_key="sk-test", + model_name="gpt-test", + max_tokens=512, + api_mode="responses", + reasoning_enabled=True, + reasoning_effort="low", + ) + + await requester.request( + model_config=cfg, + messages=[{"role": "user", "content": "hello"}], + max_tokens=128, + call_type="chat", + thinking={"enabled": False, "budget_tokens": 0}, + ) + + assert fake_client.responses.last_kwargs is not None + assert fake_client.responses.last_kwargs["reasoning"] == {"effort": "low"} + assert fake_client.responses.last_kwargs["extra_body"] == { + "thinking": {"budget_tokens": 0, "type": "disabled"}, + } + + await requester._http_client.aclose() + def test_normalize_responses_result_falls_back_to_output_text_and_scalar_content() -> ( None @@ -928,3 +982,136 @@ async def test_responses_tools_and_tool_choice_use_sanitized_api_names() -> None ) await requester._http_client.aclose() + + +@pytest.mark.asyncio +async def test_thinking_effort_anthropic_style_chat_completions() -> None: + """thinking_enabled + anthropic style → legacy thinking + output_config.effort.""" + requester = ModelRequester( + http_client=httpx.AsyncClient(), + token_usage_storage=cast(TokenUsageStorage, _FakeUsageStorage()), + ) + fake_client = _FakeClient() + setattr( + requester, + "_get_openai_client_for_model", + lambda _cfg: cast(AsyncOpenAI, fake_client), + ) + cfg = ChatModelConfig( + api_url="https://api.anthropic.com/v1", + api_key="sk-test", + model_name="claude-test", + max_tokens=4096, + thinking_enabled=True, + thinking_budget_tokens=8000, + reasoning_enabled=True, + reasoning_effort="max", + reasoning_effort_style="anthropic", + ) + + await requester.request( + model_config=cfg, + messages=[{"role": "user", "content": "hello"}], + max_tokens=1024, + call_type="chat", + ) + + kw = fake_client.chat.completions.last_kwargs + assert kw is not None + assert kw["extra_body"]["thinking"] == {"type": "enabled", "budget_tokens": 8000} + assert kw["extra_body"]["output_config"] == {"effort": "max"} + assert "reasoning" not in kw.get("extra_body", {}) + + await requester._http_client.aclose() + + +@pytest.mark.asyncio +async def test_thinking_effort_openai_style_responses() -> None: + """thinking_enabled + openai style → legacy thinking + reasoning.effort.""" + requester = ModelRequester( + http_client=httpx.AsyncClient(), + token_usage_storage=cast(TokenUsageStorage, _FakeUsageStorage()), + ) + fake_client = _FakeClient( + responses=[ + { + "id": "resp_1", + "output": [ + { + "type": "message", + "role": "assistant", + "content": [{"type": "output_text", "text": "hi"}], + }, + ], + "usage": {"input_tokens": 5, "output_tokens": 3, "total_tokens": 8}, + } + ] + ) + setattr( + requester, + "_get_openai_client_for_model", + lambda _cfg: cast(AsyncOpenAI, fake_client), + ) + cfg = ChatModelConfig( + api_url="https://api.openai.com/v1", + api_key="sk-test", + model_name="gpt-test", + max_tokens=4096, + api_mode="responses", + thinking_enabled=True, + thinking_budget_tokens=8000, + reasoning_enabled=True, + reasoning_effort="high", + reasoning_effort_style="openai", + ) + + await requester.request( + model_config=cfg, + messages=[{"role": "user", "content": "hello"}], + max_tokens=1024, + call_type="chat", + ) + + kw = fake_client.responses.last_kwargs + assert kw is not None + assert kw["extra_body"]["thinking"] == {"type": "enabled", "budget_tokens": 8000} + assert kw["reasoning"] == {"effort": "high"} + assert "output_config" not in kw and "output_config" not in kw.get("extra_body", {}) + + await requester._http_client.aclose() + + +@pytest.mark.asyncio +async def test_thinking_enabled_legacy_budget_tokens() -> None: + """thinking_enabled=True + no effort → legacy budget_tokens mode.""" + requester = ModelRequester( + http_client=httpx.AsyncClient(), + token_usage_storage=cast(TokenUsageStorage, _FakeUsageStorage()), + ) + fake_client = _FakeClient() + setattr( + requester, + "_get_openai_client_for_model", + lambda _cfg: cast(AsyncOpenAI, fake_client), + ) + cfg = ChatModelConfig( + api_url="https://api.openai.com/v1", + api_key="sk-test", + model_name="gpt-test", + max_tokens=4096, + thinking_enabled=True, + thinking_budget_tokens=8000, + ) + + await requester.request( + model_config=cfg, + messages=[{"role": "user", "content": "hello"}], + max_tokens=1024, + call_type="chat", + ) + + kw = fake_client.chat.completions.last_kwargs + assert kw is not None + assert kw["extra_body"]["thinking"] == {"type": "enabled", "budget_tokens": 8000} + + await requester._http_client.aclose() diff --git a/tests/test_sync_config_template_script.py b/tests/test_sync_config_template_script.py new file mode 100644 index 00000000..3f14aec4 --- /dev/null +++ b/tests/test_sync_config_template_script.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +import importlib.util +import sys +from pathlib import Path +from types import ModuleType, SimpleNamespace + +import pytest + + +def _load_script_module() -> ModuleType: + script_path = ( + Path(__file__).resolve().parent.parent / "scripts" / "sync_config_template.py" + ) + spec = importlib.util.spec_from_file_location( + "sync_config_template_script", script_path + ) + assert spec is not None + assert spec.loader is not None + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def test_prune_mode_reports_analysis_before_write( + monkeypatch: pytest.MonkeyPatch, + capsys: pytest.CaptureFixture[str], +) -> None: + module = _load_script_module() + calls: list[tuple[bool, bool]] = [] + + def fake_sync_config_file( + *, + config_path: Path, + example_path: Path, + write: bool = True, + prune: bool = False, + ) -> SimpleNamespace: + del config_path, example_path + calls.append((write, prune)) + return SimpleNamespace( + content="", + added_paths=[], + removed_paths=["models.chat.extra"], + comments={}, + ) + + monkeypatch.setattr(module, "sync_config_file", fake_sync_config_file) + monkeypatch.setattr(module, "_confirm_prune", lambda _paths: False) + monkeypatch.setattr(sys, "argv", ["sync_config_template.py", "--prune"]) + + assert module.main() == 0 + + output = capsys.readouterr().out + assert "[sync-config] 分析完成:" in output + assert calls == [(False, False), (True, False)] diff --git a/tests/test_webui_management_api.py b/tests/test_webui_management_api.py index d4caedfa..be44bffe 100644 --- a/tests/test_webui_management_api.py +++ b/tests/test_webui_management_api.py @@ -10,7 +10,7 @@ from Undefined.webui import app as webui_app from Undefined.webui.app import create_app from Undefined.webui.core import SessionStore -from Undefined.webui.routes import _auth, _index, _shared, _system +from Undefined.webui.routes import _auth, _config, _index, _shared, _system from Undefined.webui.routes._shared import ( REDIRECT_TO_CONFIG_ONCE_APP_KEY, SESSION_COOKIE, @@ -156,6 +156,106 @@ async def _fake_runtime() -> tuple[bool, bool, str]: assert payload["advice"] +async def test_sync_config_template_handler_preview_skips_reload( + monkeypatch: Any, +) -> None: + request = _request(query={"write": "false"}) + calls: list[tuple[str, object, object]] = [] + + async def _fake_to_thread(func: Any, *args: Any, **kwargs: Any) -> Any: + return func(*args, **kwargs) + + def _fake_validate_required_config() -> tuple[bool, str]: + calls.append(("validate", None, None)) + return True, "OK" + + def _fake_sync_config_file(*, write: bool = True, prune: bool = False) -> Any: + calls.append(("sync", write, prune)) + return SimpleNamespace( + added_paths=["models.chat.api_mode"], + removed_paths=["models.chat.extra"], + ) + + monkeypatch.setattr(_config, "check_auth", lambda _request: True) + monkeypatch.setattr( + cast(Any, getattr(_config, "asyncio")), "to_thread", _fake_to_thread + ) + monkeypatch.setattr(_config, "sync_config_file", _fake_sync_config_file) + monkeypatch.setattr( + _config, + "get_config_manager", + lambda: SimpleNamespace( + reload=lambda: calls.append(("reload", None, None)), + ), + ) + monkeypatch.setattr( + _config, "validate_required_config", _fake_validate_required_config + ) + + response = await _config.sync_config_template_handler( + cast(web.Request, cast(Any, request)) + ) + payload = _json_payload(response) + + assert payload["success"] is True + assert payload["preview"] is True + assert payload["warning"] is None + assert payload["added_count"] == 1 + assert payload["removed_count"] == 1 + assert calls == [("sync", False, False)] + + +async def test_sync_config_template_handler_write_reloads_and_validates( + monkeypatch: Any, +) -> None: + request = _request(query={"prune": "true"}) + calls: list[tuple[str, object, object]] = [] + + async def _fake_to_thread(func: Any, *args: Any, **kwargs: Any) -> Any: + return func(*args, **kwargs) + + def _fake_validate_required_config() -> tuple[bool, str]: + calls.append(("validate", None, None)) + return False, "missing required field" + + def _fake_sync_config_file(*, write: bool = True, prune: bool = False) -> Any: + calls.append(("sync", write, prune)) + return SimpleNamespace( + added_paths=[], + removed_paths=["models.chat.extra"], + ) + + monkeypatch.setattr(_config, "check_auth", lambda _request: True) + monkeypatch.setattr( + cast(Any, getattr(_config, "asyncio")), "to_thread", _fake_to_thread + ) + monkeypatch.setattr(_config, "sync_config_file", _fake_sync_config_file) + monkeypatch.setattr( + _config, + "get_config_manager", + lambda: SimpleNamespace( + reload=lambda: calls.append(("reload", None, None)), + ), + ) + monkeypatch.setattr( + _config, "validate_required_config", _fake_validate_required_config + ) + + response = await _config.sync_config_template_handler( + cast(web.Request, cast(Any, request)) + ) + payload = _json_payload(response) + + assert payload["success"] is True + assert payload["preview"] is False + assert payload["warning"] == "missing required field" + assert calls == [ + ("sync", True, True), + ("reload", None, None), + ("validate", None, None), + ] + + def test_create_app_registers_management_routes() -> None: app = create_app() routes = {