From 4af6012f0d7d12320f076133d895a4d398be0bac Mon Sep 17 00:00:00 2001 From: presstab Date: Sun, 21 Sep 2025 14:31:17 -0600 Subject: [PATCH 1/6] refactor: extract input routing logic into InputHandler class - Move router agent orchestration from Application.process_input to new InputHandler.route method - Add lazy import for CommandInterpretationAgent in agents/__init__.py - Improve variable handling in agent_tools web_search and web_scrape_url functions - Add comprehensive tests for InputHandler in tests/test_input_handler.py --- src/jrdev/agents/__init__.py | 10 ++ src/jrdev/agents/agent_tools.py | 10 +- src/jrdev/core/application.py | 93 +++------------- src/jrdev/core/input_handler.py | 191 ++++++++++++++++++++++++++++++++ tests/test_input_handler.py | 139 +++++++++++++++++++++++ 5 files changed, 360 insertions(+), 83 deletions(-) create mode 100644 src/jrdev/core/input_handler.py create mode 100644 tests/test_input_handler.py diff --git a/src/jrdev/agents/__init__.py b/src/jrdev/agents/__init__.py index e69de29..3f3e928 100644 --- a/src/jrdev/agents/__init__.py +++ b/src/jrdev/agents/__init__.py @@ -0,0 +1,10 @@ +from importlib import import_module +from typing import Any + +__all__ = ["CommandInterpretationAgent"] + + +def __getattr__(name: str) -> Any: + if name == "CommandInterpretationAgent": + return import_module("jrdev.agents.router_agent").CommandInterpretationAgent + raise AttributeError(name) diff --git a/src/jrdev/agents/agent_tools.py b/src/jrdev/agents/agent_tools.py index 4174098..8da66e5 100644 --- a/src/jrdev/agents/agent_tools.py +++ b/src/jrdev/agents/agent_tools.py @@ -84,8 +84,11 @@ def terminal(args: List[str]) -> str: def web_search(args: List[str]) -> str: if not args: return "" + query = args[0].strip() + if not query: + return "" service = WebSearchService() - return str(service.search(args[0])) + return str(service.search(query)) async def web_scrape_url(args: List[str]) -> str: @@ -102,8 +105,9 @@ async def web_scrape_url(args: List[str]) -> str: logger.info("web_scrap_url: empty args") return "" - logger.info("web_scrape_url: scraping %s", args[0]) - doc = await WebScrapeService().fetch_and_convert(args[0]) + url = args[0] + logger.info("web_scrape_url: scraping %s", url) + doc = await WebScrapeService().fetch_and_convert(url) if len(args) > 1: file_path = args[1] with open(file_path, "w", encoding="utf-8") as file: diff --git a/src/jrdev/core/application.py b/src/jrdev/core/application.py index a2bef0d..0dd36ed 100644 --- a/src/jrdev/core/application.py +++ b/src/jrdev/core/application.py @@ -2,18 +2,17 @@ import json import os import sys -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional from dotenv import load_dotenv -from jrdev.agents import agent_tools from jrdev.agents.router_agent import CommandInterpretationAgent +from jrdev.core.input_handler import InputHandler from jrdev.commands.handle_research import handle_research from jrdev.commands.keys import check_existing_keys, save_keys_to_env from jrdev.core.clients import APIClients from jrdev.core.commands import Command, CommandHandler from jrdev.core.state import AppState -from jrdev.core.tool_call import ToolCall from jrdev.core.user_settings import UserSettings from jrdev.file_operations.file_utils import (JRDEV_DIR, JRDEV_PACKAGE_DIR, add_to_gitignore, get_env_path, @@ -49,6 +48,7 @@ def __init__(self, ui_mode="textual"): # Add the router agent and its dedicated chat thread self.router_agent = None + self.input_handler: Optional[InputHandler] = None self.state.router_thread_id = self.state.create_thread(thread_id="", meta_data={"type": "router"}) self.user_settings: UserSettings = UserSettings() @@ -273,8 +273,9 @@ async def initialize_services(self): self.message_service = MessageService(self) self.model_fetch_service = ModelFetchService() - # Initialize the router agent + # Initialize the router agent and input handler self.router_agent = CommandInterpretationAgent(self) + self.input_handler = InputHandler(self) self.logger.info("CommandInterpretationAgent initialized.") if not self.state.model: @@ -527,7 +528,7 @@ async def _schedule_task_monitor(self): await self.task_monitor_callback() async def process_input(self, user_input, worker_id=None): - """Process user input.""" + """Process user input by dispatching commands or invoking the input router.""" await asyncio.sleep(0.01) # Brief yield to event loop if not user_input: @@ -536,84 +537,16 @@ async def process_input(self, user_input, worker_id=None): if user_input.startswith("/"): command = Command(user_input, worker_id) result = await self.handle_command(command) - # Check for special exit code if result == "EXIT": self.logger.info("Exit command received, forcing running state to False") self.state.running = False - else: - # Invoke the CommandInterpretationAgent - self.ui.print_text("Interpreting your request...\n", print_type=PrintType.PROCESSING) - restricted_commands = ["/init", "/migrate", "/keys"] - calls_made = [] - max_iter = self.user_settings.max_router_iterations - i = 0 - while i < max_iter: - i += 1 - tool_call: ToolCall = await self.router_agent.interpret(user_input, worker_id, calls_made) - if not tool_call: - # Agent decided to clarify, chat, summarize, or failed. Stop processing. - break - - # The agent decided on a command, now execute it - command_to_execute = tool_call.formatted_cmd - self.ui.print_text(f"Running command: {command_to_execute}\nCommand Purpose: {tool_call.reasoning}\n", print_type=PrintType.PROCESSING) - if tool_call.action_type == "command": - if tool_call.command in restricted_commands: - self.ui.print_text( - f"Error: Router Agent is restricted from using the {tool_call.command} command.", - PrintType.ERROR - ) - break - # commands print directly to console, therefore we have to capture console output for results - self.ui.start_capture() - command = Command(command_to_execute, worker_id) - await self.handle_command(command) - self.ui.end_capture() - tool_call.result = self.ui.get_capture() - # If the command was /code, we should break out of the router loop - # as /code is a self-contained agentic process. - if command_to_execute.startswith("/code"): - break - elif tool_call.action_type == "tool": - try: - if tool_call.command not in agent_tools.tools_list: - tool_call.result = f"Error: Tool '{tool_call.command}' does not exist." - elif tool_call.command == "read_files": - tool_call.result = agent_tools.read_files(tool_call.args) - elif tool_call.command == "get_file_tree": - tool_call.result = agent_tools.get_file_tree() - elif tool_call.command == "write_file": - filename = tool_call.args[0] - content = " ".join(tool_call.args[1:]) - tool_call.result = await agent_tools.write_file(self, filename, content) - elif tool_call.command == "web_search": - tool_call.result = agent_tools.web_search(tool_call.args) - elif tool_call.command == "web_scrape_url": - tool_call.result = await agent_tools.web_scrape_url(tool_call.args) - elif tool_call.command == "get_indexed_files_context": - tool_call.result = agent_tools.get_indexed_files_context(self, tool_call.args) - elif tool_call.command == "terminal": - command_str = " ".join(tool_call.args) - confirmed = await self.ui.prompt_for_command_confirmation(command_str) - if confirmed: - tool_call.result = agent_tools.terminal(tool_call.args) - else: - tool_call.result = "Terminal command request REJECTED by user." - self.ui.print_text("Command execution cancelled.", PrintType.INFO) - except Exception as e: - error_message = f"Error executing tool '{tool_call.command}': {str(e)}" - self.logger.error(f"Tool execution failed: {error_message}", exc_info=True) - tool_call.result = error_message - if not tool_call.has_next: - # This was the final command in the chain. - break - - # This was an info-gathering step, add result to history and loop again. - calls_made.append(tool_call) - if i >= max_iter: - self.ui.print_text( - "My maximum command iterations have been hit for this request. Please reprompt to continue. You can" - " adjust this using the /routeragent command", print_type=PrintType.ERROR) + return + + if not self.input_handler: + self.logger.warning("Input handler not initialized; skipping natural language routing.") + return + + await self.input_handler.route(user_input, worker_id) async def process_chat_input(self, user_input, worker_id=None): # 1) get the active thread diff --git a/src/jrdev/core/input_handler.py b/src/jrdev/core/input_handler.py new file mode 100644 index 0000000..63d4875 --- /dev/null +++ b/src/jrdev/core/input_handler.py @@ -0,0 +1,191 @@ +from __future__ import annotations + +from typing import Any, List, Optional + +from jrdev.agents import agent_tools +from jrdev.core.commands import Command +from jrdev.core.tool_call import ToolCall +from jrdev.ui.ui import PrintType + + +class InputHandler: + """Coordinates the agentic routing loop for free-form user input. + + The handler owns orchestration concerns such as invoking the router agent, + executing resulting tool calls, relaying user feedback through the UI, and + persisting conversational context. Decision making remains the + responsibility of the router agent; this class simply delegates to it and + executes the returned actions against the rest of the application. + """ + + def __init__(self, app: Any) -> None: + self.app = app + self._restricted_commands = {"/init", "/migrate", "/keys"} + + async def route(self, user_input: str, worker_id: Optional[str] = None) -> Optional[str]: + """Interpret and execute user input via the router agent. + + Args: + user_input: Raw input provided by the end user. + worker_id: Optional identifier used for async task correlation. + + Returns: + The last tool or command result surfaced back to the user, or + ``None`` if the agent decided the conversation should end without + executing an action. + + Side Effects: + * Streams progress messages through ``app.ui``. + * Mutates the router agent's conversation thread with action + results, providing context for subsequent iterations. + * May execute application commands or agent tools, depending on the + router's decisions. + """ + + agent = getattr(self.app, "router_agent", None) + if agent is None: + self.app.logger.warning("Router agent not initialized; skipping natural language routing.") + return None + + self.app.ui.print_text( + "Interpreting your request...\n", + print_type=PrintType.PROCESSING, + ) + + calls_made: List[ToolCall] = [] + last_result: Optional[str] = None + max_iterations = max(1, self.app.user_settings.max_router_iterations) + hit_iteration_limit = True + + for _ in range(max_iterations): + tool_call = await agent.interpret(user_input, worker_id, calls_made) + if not tool_call: + hit_iteration_limit = False + break + + try: + self._announce_tool_call(tool_call) + if tool_call.action_type == "command": + await self._execute_command(agent, tool_call, worker_id) + elif tool_call.action_type == "tool": + await self._execute_tool(agent, tool_call) + else: + self.app.logger.error("Unknown tool call action type '%s'.", tool_call.action_type) + hit_iteration_limit = False + break + except Exception: # pragma: no cover - defensive logging path + self.app.logger.error( + "Unhandled error while processing tool call '%s'.", + tool_call.formatted_cmd, + exc_info=True, + ) + self.app.ui.print_text( + "I hit an unexpected error while handling that request. Please try again.", + print_type=PrintType.ERROR, + ) + hit_iteration_limit = False + break + + if tool_call.result: + last_result = tool_call.result + + if not tool_call.has_next: + hit_iteration_limit = False + break + + calls_made.append(tool_call) + + if hit_iteration_limit: + self.app.ui.print_text( + "My maximum command iterations have been hit for this request. Please reprompt to " + "continue. You can adjust this using the /routeragent command", + print_type=PrintType.ERROR, + ) + + return last_result + + def _announce_tool_call(self, tool_call: ToolCall) -> None: + message = ( + f"Running command: {tool_call.formatted_cmd}\n" + f"Command Purpose: {tool_call.reasoning}\n" + ) + self.app.ui.print_text(message, print_type=PrintType.PROCESSING) + + async def _execute_command( + self, + agent: Any, + tool_call: ToolCall, + worker_id: Optional[str], + ) -> None: + command_to_execute = tool_call.formatted_cmd + + if tool_call.command in self._restricted_commands: + error_message = ( + f"Error: Router Agent is restricted from using the {tool_call.command} command." + ) + self.app.ui.print_text(error_message, PrintType.ERROR) + tool_call.result = error_message + self._append_thread_message(agent, error_message) + tool_call.has_next = False + return + + self.app.ui.start_capture() + try: + command = Command(command_to_execute, worker_id) + await self.app.handle_command(command) + finally: + self.app.ui.end_capture() + + tool_call.result = self.app.ui.get_capture() + self._append_thread_message(agent, tool_call.result) + + if command_to_execute.startswith("/code"): + tool_call.has_next = False + + async def _execute_tool(self, agent: Any, tool_call: ToolCall) -> None: + result = await self._run_tool(tool_call) + + tool_call.result = result + self._append_thread_message(agent, result) + + async def _run_tool(self, tool_call: ToolCall) -> str: + try: + if tool_call.command not in agent_tools.tools_list: + return f"Error: Tool '{tool_call.command}' does not exist." + if tool_call.command == "read_files": + return agent_tools.read_files(tool_call.args) + if tool_call.command == "get_file_tree": + return agent_tools.get_file_tree() + if tool_call.command == "write_file": + if not tool_call.args: + return "Error: write_file requires a filename and content." + filename = tool_call.args[0] + content = " ".join(tool_call.args[1:]) + return await agent_tools.write_file(self.app, filename, content) + if tool_call.command == "web_search": + return agent_tools.web_search(tool_call.args) + if tool_call.command == "web_scrape_url": + return await agent_tools.web_scrape_url(tool_call.args) + if tool_call.command == "get_indexed_files_context": + return agent_tools.get_indexed_files_context(self.app, tool_call.args) + if tool_call.command == "terminal": + command_str = " ".join(tool_call.args) + confirmed = await self.app.ui.prompt_for_command_confirmation(command_str) + if confirmed: + return agent_tools.terminal(tool_call.args) + self.app.ui.print_text("Command execution cancelled.", PrintType.INFO) + return "Terminal command request REJECTED by user." + except Exception as exc: # pragma: no cover - defensive logging path + error_message = f"Error executing tool '{tool_call.command}': {exc}" + self.app.logger.error(error_message, exc_info=True) + return error_message + + return f"Tool '{tool_call.command}' is not implemented." + + def _append_thread_message(self, agent: Any, content: Optional[str]) -> None: + if not content or not hasattr(agent, "thread"): + return + thread = agent.thread + if thread is None: + return + thread.messages.append({"role": "assistant", "content": content}) diff --git a/tests/test_input_handler.py b/tests/test_input_handler.py new file mode 100644 index 0000000..c025c15 --- /dev/null +++ b/tests/test_input_handler.py @@ -0,0 +1,139 @@ +import asyncio +from types import SimpleNamespace + +import pytest + +from jrdev.core.input_handler import InputHandler +from jrdev.core.tool_call import ToolCall +from jrdev.ui.ui import PrintType + + +class FakeLogger: + def info(self, *args, **kwargs): # pragma: no cover - logging helper + pass + + def warning(self, *args, **kwargs): # pragma: no cover - logging helper + pass + + def error(self, *args, **kwargs): # pragma: no cover - logging helper + pass + + +class FakeUI: + def __init__(self): + self.messages = [] + self._capturing = False + self._capture_buffer = [] + + def print_text(self, message, print_type=None): + self.messages.append((message, print_type)) + if self._capturing: + self._capture_buffer.append(message) + + def start_capture(self): + self._capturing = True + self._capture_buffer = [] + + def end_capture(self): + self._capturing = False + + def get_capture(self): + return "\n".join(self._capture_buffer) + + async def prompt_for_command_confirmation(self, command_str): + self.messages.append((f"confirm:{command_str}", PrintType.INFO)) + return True + + +class FakeApp: + def __init__(self): + self.logger = FakeLogger() + self.ui = FakeUI() + self.user_settings = SimpleNamespace(max_router_iterations=3) + self.state = SimpleNamespace(running=True) + self.commands_executed = [] + self.router_agent = None + + async def handle_command(self, command): + self.commands_executed.append(command.text) + self.ui.print_text(f"handled {command.text}", PrintType.INFO) + await asyncio.sleep(0) + + +class FakeThread: + def __init__(self): + self.messages = [] + + +class FakeInterpretationAgent: + def __init__(self, responses): + self._responses = list(responses) + self.thread = FakeThread() + + async def interpret(self, *_args, **_kwargs): + if not self._responses: + return None + return self._responses.pop(0) + + +@pytest.mark.asyncio +async def test_route_executes_command_and_captures_output(): + tool_call = ToolCall( + action_type="command", + command="/echo", + args=["hello", "world"], + reasoning="echo the greeting", + has_next=False, + ) + app = FakeApp() + agent = FakeInterpretationAgent([tool_call]) + app.router_agent = agent + handler = InputHandler(app) + + result = await handler.route("echo hello") + + assert app.commands_executed == ["/echo hello world"] + assert tool_call.result == "handled /echo hello world" + assert result == tool_call.result + assert agent.thread.messages[-1]["content"] == "handled /echo hello world" + + +@pytest.mark.asyncio +async def test_route_stores_tool_results_in_thread(monkeypatch): + calls = {"count": 0} + + def fake_web_search(args): + calls["count"] += 1 + return f"result {calls['count']} for {args[0]}" + + from jrdev.agents import agent_tools as tools_module + + monkeypatch.setattr(tools_module, "web_search", fake_web_search) + + tool_calls = [ + ToolCall( + action_type="tool", + command="web_search", + args=["python caching"], + reasoning="search the web", + has_next=False, + ), + ToolCall( + action_type="tool", + command="web_search", + args=["python caching"], + reasoning="search the web", + has_next=False, + ), + ] + + app = FakeApp() + agent = FakeInterpretationAgent(tool_calls) + app.router_agent = agent + handler = InputHandler(app) + + result = await handler.route("search python caching") + + assert calls["count"] == 1 + assert result == "result 1 for python caching" + assert agent.thread.messages[-1]["content"] == "result 1 for python caching" From 708547b215f299fc87dbabe46e759e1679388e2b Mon Sep 17 00:00:00 2001 From: presstab Date: Thu, 23 Oct 2025 11:31:07 -0600 Subject: [PATCH 2/6] fix: add retry logic for LLM response parsing in research_agent and guard against None text in TUI - research_agent now retries once if JSON parsing fails, summarising findings and aborting gracefully on repeated failure - textual_ui skips printing when event.text is None to prevent downstream errors --- src/jrdev/agents/research_agent.py | 59 ++++++++++++++++++++++++++---- src/jrdev/ui/tui/textual_ui.py | 5 ++- 2 files changed, 54 insertions(+), 10 deletions(-) diff --git a/src/jrdev/agents/research_agent.py b/src/jrdev/agents/research_agent.py index b2c8ce3..2a955cf 100644 --- a/src/jrdev/agents/research_agent.py +++ b/src/jrdev/agents/research_agent.py @@ -52,15 +52,58 @@ async def interpret( # Use a specific model for this task research_model = self.app.state.model - response_text = await generate_llm_response(self.app, research_model, messages, task_id=worker_id) - - json_content = "" - try: - json_content = cutoff_string(response_text, "```json", "```") - response_json = json.loads(json_content) - except (json.JSONDecodeError, KeyError) as e: + response_json = None + for attempt in range(2): # Retry once + response_text = await generate_llm_response( + self.app, research_model, messages, task_id=worker_id + ) + json_content = "" + try: + json_content = cutoff_string(response_text, "```json", "```") + response_json = json.loads(json_content) + break # Success, exit loop + except (json.JSONDecodeError, KeyError) as e: + self.logger.warning( + f"Attempt {attempt + 1} failed to parse research agent LLM response: {e}" + ) + if attempt == 0: + self.app.ui.print_text( + "Research agent had an issue parsing its response. Retrying once...", + print_type=PrintType.WARNING, + ) + else: + self.logger.error( + f"Failed to parse research agent LLM response after 2 attempts. Aborting and summarizing. Response: {response_text}" + ) + self.app.ui.print_text( + "Research agent failed to parse its response after a retry. Summarizing current findings and finishing.", + print_type=PrintType.ERROR, + ) + summary_parts = [ + "I was unable to decide on the next step due to a persistent error." + ] + if previous_tool_calls: + summary_parts.append( + "Here is a summary of the research so far:" + ) + for tc in previous_tool_calls: + summary_parts.append( + f"Action: {tc.formatted_cmd}\nResult: {tc.result}" + ) + else: + summary_parts.append("No research actions were taken.") + + summary = "\n".join(summary_parts) + self.thread.messages.append( + {"role": "assistant", "content": summary} + ) + return {"type": "summary", "data": summary} + + if response_json is None: + # This should not be reached if the logic above is correct, but as a safeguard. self.logger.error( - f"Failed to parse research agent LLM response: {e}\nResponse:\n {response_text}\nRaw:\n{json_content}") + "response_json is None after retry loop, which should not happen. Aborting." + ) self.app.ui.print_text( "Research agent had an issue parsing its own response. This may be a temporary issue. Aborting research task.", print_type=PrintType.ERROR, diff --git a/src/jrdev/ui/tui/textual_ui.py b/src/jrdev/ui/tui/textual_ui.py index f8f476c..7127c72 100644 --- a/src/jrdev/ui/tui/textual_ui.py +++ b/src/jrdev/ui/tui/textual_ui.py @@ -212,7 +212,8 @@ def handle_print_message(self, event: TextualEvents.PrintMessage) -> None: if isinstance(event.text, list): self.terminal_output_widget.append_text("\n".join(event.text) + "\n") else: - self.terminal_output_widget.append_text(f"[PrintType={type_string}]" + event.text + "\n") + if event.text is not None: + self.terminal_output_widget.append_text(f"[PrintType={type_string}]" + event.text + "\n") @on(TextualEvents.StreamChunk) async def handle_stream_chunk(self, event: TextualEvents.StreamChunk) -> None: @@ -439,4 +440,4 @@ def run_textual_ui() -> None: if __name__ == "__main__": - run_textual_ui() \ No newline at end of file + run_textual_ui() From 9496124b10b2927b403905aea490b1eef274e2fa Mon Sep 17 00:00:00 2001 From: presstab Date: Thu, 23 Oct 2025 11:46:56 -0600 Subject: [PATCH 3/6] style: replace emoji with unicode symbols in ui buttons --- src/jrdev/ui/tui/model_listview.py | 4 ++-- src/jrdev/ui/tui/terminal/terminal_output_widget.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/jrdev/ui/tui/model_listview.py b/src/jrdev/ui/tui/model_listview.py index de0ad25..bae6806 100644 --- a/src/jrdev/ui/tui/model_listview.py +++ b/src/jrdev/ui/tui/model_listview.py @@ -70,7 +70,7 @@ def __init__(self, id: str, core_app: Any, model_button: Button, above_button: b self.height = 10 self.models = [] self.search_input = SearchInput(placeholder="Search models...", id="model-search-input") - self.btn_settings = Button("⚙️", id="btn-settings", tooltip="Model & Provider Settings") + self.btn_settings = Button("\u2699", id="btn-settings", tooltip="Model & Provider Settings") self.btn_settings.can_focus = False self.list_view = ListView(id="_listview") self.input_query = None @@ -109,7 +109,7 @@ def update_models(self, query_filter: str = "") -> None: is_first = True for provider, provider_models in grouped_models.items(): - provider_item = ListItem(Label(f"[{star_color.rich_color.name}][bold white]✨{provider}✨[/bold white][/{star_color.rich_color.name}]", markup=True), name=provider, disabled=True) + provider_item = ListItem(Label(f"[{star_color.rich_color.name}][bold white]\u2739 {provider} \u2739[/bold white][/{star_color.rich_color.name}]", markup=True), name=provider, disabled=True) self.list_view.append(provider_item) for model in provider_models: model_name = model["name"] diff --git a/src/jrdev/ui/tui/terminal/terminal_output_widget.py b/src/jrdev/ui/tui/terminal/terminal_output_widget.py index fec420c..94abfbc 100644 --- a/src/jrdev/ui/tui/terminal/terminal_output_widget.py +++ b/src/jrdev/ui/tui/terminal/terminal_output_widget.py @@ -104,8 +104,8 @@ def __init__(self, id: Optional[str] = None, output_widget_mode=False, core_app= else: self.model_button = Button(label="Model", id="model_btn_term") self.context_label = Label("Context Use 0%", id="context-label") - self.compact_button = Button(label="🗜️", id="compact-btn", tooltip="Compact conversation. Condenses conversation thread, keeping a summary, but not all details. Reduces Context Use.") - self.clear_button = Button(label="🗑️", id="clear-btn", tooltip="Clear the entire conversation with the router agent. Sets Context Use to 0.") + self.compact_button = Button(label="🗜", id="compact-btn", tooltip="Compact conversation. Condenses conversation thread, keeping a summary, but not all details. Reduces Context Use.") + self.clear_button = Button(label="🗑", id="clear-btn", tooltip="Clear the entire conversation with the router agent. Sets Context Use to 0.") if not core_app: raise Exception("core app reference missing from terminal output widget") self.core_app = core_app From cf42040942bc9284e506f9e450e8e5cbb04f28a3 Mon Sep 17 00:00:00 2001 From: presstab Date: Thu, 23 Oct 2025 12:00:13 -0600 Subject: [PATCH 4/6] chore: update model configurations and replace gpt-4.1 with gpt-5 variants - Removes deprecated Anthropic, OpenAI and OpenRouter models - Adds new Claude Haiku 4.5, Sonnet 4.5 and Opus 4.1 entries - Introduces GPT-5, GPT-5-mini and GPT-5-nano with updated pricing/context - Replaces all profile mappings to use gpt-5-mini as the default model --- src/jrdev/config/model_list.json | 128 ++++++++++------------------- src/jrdev/models/model_profiles.py | 34 ++++---- 2 files changed, 61 insertions(+), 101 deletions(-) diff --git a/src/jrdev/config/model_list.json b/src/jrdev/config/model_list.json index 7e330e9..a29153b 100644 --- a/src/jrdev/config/model_list.json +++ b/src/jrdev/config/model_list.json @@ -1,35 +1,11 @@ { "models": [ { - "name": "claude-3-5-haiku-20241022", - "provider": "anthropic", - "is_think": false, - "input_cost": 8, - "output_cost": 40, - "context_tokens": 200000 - }, - { - "name": "claude-3-7-sonnet-20250219", - "provider": "anthropic", - "is_think": true, - "input_cost": 30, - "output_cost": 150, - "context_tokens": 200000 - }, - { - "name": "claude-opus-4-20250514", - "provider": "anthropic", - "is_think": true, - "input_cost": 150, - "output_cost": 750, - "context_tokens": 200000 - }, - { - "name": "claude-sonnet-4-20250514", + "name": "claude-haiku-4-5-20251001", "provider": "anthropic", "is_think": true, - "input_cost": 30, - "output_cost": 150, + "input_cost": 10, + "output_cost": 50, "context_tokens": 200000 }, { @@ -48,30 +24,6 @@ "output_cost": 11, "context_tokens": 64000 }, - { - "name": "o4-mini-2025-04-16", - "provider": "openai", - "is_think": false, - "input_cost": 11, - "output_cost": 44, - "context_tokens": 1000000 - }, - { - "name": "gpt-4.1-2025-04-14", - "provider": "openai", - "is_think": false, - "input_cost": 20, - "output_cost": 80, - "context_tokens": 1000000 - }, - { - "name": "gpt-4.1-mini-2025-04-14", - "provider": "openai", - "is_think": false, - "input_cost": 4, - "output_cost": 16, - "context_tokens": 1000000 - }, { "name": "google/gemini-2.5-pro", "provider": "open_router", @@ -104,14 +56,6 @@ "output_cost": 0, "context_tokens": 262144 }, - { - "name": "deepseek/deepseek-chat-v3-0324:free", - "provider": "open_router", - "is_think": false, - "input_cost": 0, - "output_cost": 0, - "context_tokens": 16384 - }, { "name": "deepseek/deepseek-r1-0528:free", "provider": "open_router", @@ -128,14 +72,6 @@ "output_cost": 150, "context_tokens": 131072 }, - { - "name": "mistralai/devstral-small-2507", - "provider": "open_router", - "is_think": true, - "input_cost": 1, - "output_cost": 3, - "context_tokens": 131072 - }, { "name": "qwen/qwen3-coder:free", "provider": "open_router", @@ -153,36 +89,60 @@ "context_tokens": 33000 }, { - "name": "anthropic/claude-sonnet-4", - "provider": "open_router", - "is_think": false, + "name": "claude-sonnet-4-5-20250929", + "provider": "anthropic", + "is_think": true, "input_cost": 30, "output_cost": 150, - "context_tokens": 200000 + "context_tokens": 1000000 }, { - "name": "anthropic/claude-opus-4", - "provider": "open_router", - "is_think": false, + "name": "claude-opus-4-1-20250805", + "provider": "anthropic", + "is_think": true, "input_cost": 150, "output_cost": 750, "context_tokens": 200000 }, { - "name": "openai/o4-mini-high", + "name": "gpt-5", + "provider": "openai", + "is_think": true, + "input_cost": 12, + "output_cost": 100, + "context_tokens": 400000 + }, + { + "name": "gpt-5-nano", + "provider": "openai", + "is_think": true, + "input_cost": 0, + "output_cost": 4, + "context_tokens": 400000 + }, + { + "name": "gpt-5-mini", + "provider": "openai", + "is_think": true, + "input_cost": 2, + "output_cost": 20, + "context_tokens": 400000 + }, + { + "name": "openai/gpt-5-mini", "provider": "open_router", - "is_think": false, - "input_cost": 11, - "output_cost": 44, - "context_tokens": 200000 + "is_think": true, + "input_cost": 2, + "output_cost": 20, + "context_tokens": 400000 }, { - "name": "openai/gpt-4.1", + "name": "openai/gpt-5-nano", "provider": "open_router", - "is_think": false, - "input_cost": 20, - "output_cost": 80, - "context_tokens": 1047576 + "is_think": true, + "input_cost": 0, + "output_cost": 4, + "context_tokens": 400000 }, { "name": "gemma3:4b", diff --git a/src/jrdev/models/model_profiles.py b/src/jrdev/models/model_profiles.py index 0ab8ad2..1142f21 100644 --- a/src/jrdev/models/model_profiles.py +++ b/src/jrdev/models/model_profiles.py @@ -79,19 +79,19 @@ def _load_profiles(self, remove_fallback=False) -> Dict[str, Any]: # Define the hardcoded fallback configuration hardcoded_fallback_config: Dict[str, Any] = { "profiles": { - "advanced_reasoning": "o4-mini-2025-04-16", - "advanced_coding": "o4-mini-2025-04-16", - "intermediate_reasoning": "gpt-4.1-2025-04-14", - "intermediate_coding": "gpt-4.1-2025-04-14", - "quick_reasoning": "gpt-4.1-mini-2025-04-14", - "intent_router": "gpt-4.1-2025-04-14", - "low_cost_search": "gpt-4.1-2025-04-14" + "advanced_reasoning": "gpt-5-mini", + "advanced_coding": "gpt-5-mini", + "intermediate_reasoning": "gpt-5-mini", + "intermediate_coding": "gpt-5-mini", + "quick_reasoning": "gpt-5-mini", + "intent_router": "gpt-5-mini", + "low_cost_search": "gpt-5-mini" }, "default_profile": "advanced_coding", # chat_model will be derived from default_profile } hardcoded_fallback_config["chat_model"] = hardcoded_fallback_config["profiles"].get( - hardcoded_fallback_config["default_profile"], "o4-mini-2025-04-16" # Ultimate fallback for chat_model + hardcoded_fallback_config["default_profile"], "gpt-5-mini" # Ultimate fallback for chat_model ) try: @@ -206,7 +206,7 @@ def get_model(self, profile_type: str) -> str: # Fall back to default profile if requested profile doesn't exist default = str(self.profiles["default_profile"]) logger.warning(f"Profile '{profile_type}' not found, using default: {default}") - return str(self.profiles["profiles"].get(default, "qwen-2.5-coder-32b")) + return str(self.profiles["profiles"].get(default, "gpt-5-mini")) def update_profile(self, profile_type: str, model_name: str, model_list: Optional[ModelList] = None) -> bool: """ @@ -402,18 +402,18 @@ def reload_if_using_fallback(self, active_provider_names) -> bool: self.active_provider_names: List[str] = active_provider_names if active_provider_names is not None else [] hardcoded_fallback_config = { "profiles": { - "advanced_reasoning": "o4-mini-2025-04-16", - "advanced_coding": "o4-mini-2025-04-16", - "intermediate_reasoning": "gpt-4.1-2025-04-14", - "intermediate_coding": "gpt-4.1-2025-04-14", - "quick_reasoning": "gpt-4.1-mini-2025-04-14", - "intent_router": "gpt-4.1-2025-04-14", - "low_cost_search": "gpt-4.1-2025-04-14" + "advanced_reasoning": "gpt-5-mini", + "advanced_coding": "gpt-5-mini", + "intermediate_reasoning": "gpt-5-mini", + "intermediate_coding": "gpt-5-mini", + "quick_reasoning": "gpt-5-mini", + "intent_router": "gpt-5-mini", + "low_cost_search": "gpt-5-mini" }, "default_profile": "advanced_coding", } hardcoded_fallback_config["chat_model"] = hardcoded_fallback_config["profiles"].get( - hardcoded_fallback_config["default_profile"], "o4-mini-2025-04-16" + hardcoded_fallback_config["default_profile"], "gpt-5-mini" ) # Only compare the relevant keys current = self.profiles From 6be408cd8ea57ee61bdc67ebc4fc3ff26aa268ab Mon Sep 17 00:00:00 2001 From: presstab Date: Thu, 23 Oct 2025 12:28:44 -0600 Subject: [PATCH 5/6] tests: update API provider model configurations - Upgrade OpenAI models from o4-mini and gpt-4.1 variants to gpt-5-mini - Update Anthropic models to latest claude-sonnet-4-5 and claude-haiku-4-5 versions - Standardize Qwen models to use qwen3-coder across all profiles - Improve test isolation by configuring git environment to prevent external interference --- src/jrdev/config/api_providers.json | 32 ++++++++++++++--------------- tests/test_git_utils.py | 13 ++++++++++++ 2 files changed, 29 insertions(+), 16 deletions(-) diff --git a/src/jrdev/config/api_providers.json b/src/jrdev/config/api_providers.json index c746d12..5763a76 100644 --- a/src/jrdev/config/api_providers.json +++ b/src/jrdev/config/api_providers.json @@ -7,13 +7,13 @@ "required": false, "default_profiles": { "profiles": { - "advanced_reasoning": "o4-mini-2025-04-16", - "advanced_coding": "o4-mini-2025-04-16", - "intermediate_reasoning": "gpt-4.1-2025-04-14", - "intermediate_coding": "gpt-4.1-2025-04-14", - "quick_reasoning": "gpt-4.1-mini-2025-04-14", - "intent_router": "gpt-4.1-2025-04-14", - "low_cost_search": "gpt-4.1-2025-04-14" + "advanced_reasoning": "gpt-5-mini", + "advanced_coding": "gpt-5-mini", + "intermediate_reasoning": "gpt-5-mini", + "intermediate_coding": "gpt-5-mini", + "quick_reasoning": "gpt-5-mini", + "intent_router": "gpt-5-mini", + "low_cost_search": "gpt-5-mini" }, "default_profile": "advanced_coding" } @@ -25,13 +25,13 @@ "required": false, "default_profiles": { "profiles": { - "advanced_reasoning": "claude-3-7-sonnet-20250219", - "advanced_coding": "claude-3-7-sonnet-20250219", - "intermediate_reasoning": "claude-3-5-haiku-20241022", - "intermediate_coding": "claude-3-5-haiku-20241022", - "quick_reasoning": "claude-3-5-haiku-20241022", - "intent_router": "claude-3-5-haiku-20241022", - "low_cost_search": "claude-3-5-haiku-20241022" + "advanced_reasoning": "claude-sonnet-4-5-20250929", + "advanced_coding": "claude-sonnet-4-5-20250929", + "intermediate_reasoning": "claude-haiku-4-5-20251001", + "intermediate_coding": "claude-haiku-4-5-20251001", + "quick_reasoning": "claude-haiku-4-5-20251001", + "intent_router": "claude-haiku-4-5-20251001", + "low_cost_search": "claude-haiku-4-5-20251001" }, "default_profile": "advanced_coding" } @@ -65,9 +65,9 @@ "advanced_coding": "qwen/qwen3-coder:free", "intermediate_reasoning": "qwen/qwen3-coder:free", "intermediate_coding": "qwen/qwen3-coder:free", - "quick_reasoning": "qwen/qwen3-235b-a22b-2507:free", + "quick_reasoning": "qwen/qwen3-coder:free", "intent_router": "qwen/qwen3-coder:free", - "low_cost_search": "qwen/qwen3-235b-a22b-2507:free" + "low_cost_search": "qwen/qwen3-coder:free" }, "default_profile": "advanced_coding" } diff --git a/tests/test_git_utils.py b/tests/test_git_utils.py index 351e273..d2d043c 100644 --- a/tests/test_git_utils.py +++ b/tests/test_git_utils.py @@ -16,9 +16,22 @@ def setUp(self): self.test_dir = os.path.abspath("test_repo") os.makedirs(self.test_dir, exist_ok=True) os.chdir(self.test_dir) + # Ensure this test repo is fully isolated from any user/global git config + # to avoid prompts (e.g., GPG signing) or running user hooks. + os.environ["GIT_CONFIG_GLOBAL"] = os.devnull + # Some environments also read system config; point it at a harmless file. + os.environ["GIT_CONFIG_SYSTEM"] = os.devnull + # Avoid external editor launches if a commit is made without -m by mistake. + os.environ["GIT_EDITOR"] = "true" subprocess.call(["git", "init"]) subprocess.call(["git", "config", "user.email", "test@example.com"]) subprocess.call(["git", "config", "user.name", "Test User"]) + # Explicitly disable commit/tag signing and any hooks for this test repo. + subprocess.call(["git", "config", "commit.gpgsign", "false"]) + subprocess.call(["git", "config", "tag.gpgSign", "false"]) + hooks_dir = os.path.join(os.getcwd(), ".git", "hooks-disabled") + os.makedirs(hooks_dir, exist_ok=True) + subprocess.call(["git", "config", "core.hooksPath", hooks_dir]) def tearDown(self): """Clean up the temporary git repository.""" From 024fe2f89221971e099104a3342e4e08b51607f9 Mon Sep 17 00:00:00 2001 From: presstab Date: Thu, 23 Oct 2025 13:09:37 -0600 Subject: [PATCH 6/6] feat: add minimal async test support via conftest Add pytest configuration to handle async test functions without external plugins. The implementation detects coroutine functions and runs them using asyncio.run, providing basic async testing capabilities similar to pytest-asyncio. --- conftest.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 conftest.py diff --git a/conftest.py b/conftest.py new file mode 100644 index 0000000..ea78edf --- /dev/null +++ b/conftest.py @@ -0,0 +1,24 @@ +import asyncio +import inspect + +import pytest + + +def pytest_configure(config): + # Register the asyncio marker so pytest doesn't warn about it. + config.addinivalue_line("markers", "asyncio: mark test as asyncio-based") + + +def pytest_pyfunc_call(pyfuncitem): + """Minimal async test support without external plugins. + + If a test function is defined with ``async def``, run it inside a fresh + event loop using ``asyncio.run``. This mirrors the basic behavior provided + by pytest-asyncio for our simple async tests. + """ + test_func = pyfuncitem.obj + if inspect.iscoroutinefunction(test_func): + asyncio.run(test_func(**pyfuncitem.funcargs)) + return True + return None +