Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import asyncio
import inspect

import pytest


def pytest_configure(config):
# Register the asyncio marker so pytest doesn't warn about it.
config.addinivalue_line("markers", "asyncio: mark test as asyncio-based")


def pytest_pyfunc_call(pyfuncitem):
"""Minimal async test support without external plugins.

If a test function is defined with ``async def``, run it inside a fresh
event loop using ``asyncio.run``. This mirrors the basic behavior provided
by pytest-asyncio for our simple async tests.
"""
test_func = pyfuncitem.obj
if inspect.iscoroutinefunction(test_func):
asyncio.run(test_func(**pyfuncitem.funcargs))
return True
return None

10 changes: 10 additions & 0 deletions src/jrdev/agents/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from importlib import import_module
from typing import Any

__all__ = ["CommandInterpretationAgent"]


def __getattr__(name: str) -> Any:
if name == "CommandInterpretationAgent":
return import_module("jrdev.agents.router_agent").CommandInterpretationAgent
raise AttributeError(name)
10 changes: 7 additions & 3 deletions src/jrdev/agents/agent_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,11 @@ def terminal(args: List[str]) -> str:
def web_search(args: List[str]) -> str:
if not args:
return ""
query = args[0].strip()
if not query:
return ""
service = WebSearchService()
return str(service.search(args[0]))
return str(service.search(query))


async def web_scrape_url(args: List[str]) -> str:
Expand All @@ -102,8 +105,9 @@ async def web_scrape_url(args: List[str]) -> str:
logger.info("web_scrap_url: empty args")
return ""

logger.info("web_scrape_url: scraping %s", args[0])
doc = await WebScrapeService().fetch_and_convert(args[0])
url = args[0]
logger.info("web_scrape_url: scraping %s", url)
doc = await WebScrapeService().fetch_and_convert(url)
if len(args) > 1:
file_path = args[1]
with open(file_path, "w", encoding="utf-8") as file:
Expand Down
59 changes: 51 additions & 8 deletions src/jrdev/agents/research_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,15 +52,58 @@ async def interpret(

# Use a specific model for this task
research_model = self.app.state.model
response_text = await generate_llm_response(self.app, research_model, messages, task_id=worker_id)

json_content = ""
try:
json_content = cutoff_string(response_text, "```json", "```")
response_json = json.loads(json_content)
except (json.JSONDecodeError, KeyError) as e:
response_json = None
for attempt in range(2): # Retry once
response_text = await generate_llm_response(
self.app, research_model, messages, task_id=worker_id
)
json_content = ""
try:
json_content = cutoff_string(response_text, "```json", "```")
response_json = json.loads(json_content)
break # Success, exit loop
except (json.JSONDecodeError, KeyError) as e:
self.logger.warning(
f"Attempt {attempt + 1} failed to parse research agent LLM response: {e}"
)
if attempt == 0:
self.app.ui.print_text(
"Research agent had an issue parsing its response. Retrying once...",
print_type=PrintType.WARNING,
)
else:
self.logger.error(
f"Failed to parse research agent LLM response after 2 attempts. Aborting and summarizing. Response: {response_text}"
)
self.app.ui.print_text(
"Research agent failed to parse its response after a retry. Summarizing current findings and finishing.",
print_type=PrintType.ERROR,
)
summary_parts = [
"I was unable to decide on the next step due to a persistent error."
]
if previous_tool_calls:
summary_parts.append(
"Here is a summary of the research so far:"
)
for tc in previous_tool_calls:
summary_parts.append(
f"Action: {tc.formatted_cmd}\nResult: {tc.result}"
)
else:
summary_parts.append("No research actions were taken.")

summary = "\n".join(summary_parts)
self.thread.messages.append(
{"role": "assistant", "content": summary}
)
return {"type": "summary", "data": summary}

if response_json is None:
# This should not be reached if the logic above is correct, but as a safeguard.
self.logger.error(
f"Failed to parse research agent LLM response: {e}\nResponse:\n {response_text}\nRaw:\n{json_content}")
"response_json is None after retry loop, which should not happen. Aborting."
)
self.app.ui.print_text(
"Research agent had an issue parsing its own response. This may be a temporary issue. Aborting research task.",
print_type=PrintType.ERROR,
Expand Down
32 changes: 16 additions & 16 deletions src/jrdev/config/api_providers.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,13 @@
"required": false,
"default_profiles": {
"profiles": {
"advanced_reasoning": "o4-mini-2025-04-16",
"advanced_coding": "o4-mini-2025-04-16",
"intermediate_reasoning": "gpt-4.1-2025-04-14",
"intermediate_coding": "gpt-4.1-2025-04-14",
"quick_reasoning": "gpt-4.1-mini-2025-04-14",
"intent_router": "gpt-4.1-2025-04-14",
"low_cost_search": "gpt-4.1-2025-04-14"
"advanced_reasoning": "gpt-5-mini",
"advanced_coding": "gpt-5-mini",
"intermediate_reasoning": "gpt-5-mini",
"intermediate_coding": "gpt-5-mini",
"quick_reasoning": "gpt-5-mini",
"intent_router": "gpt-5-mini",
"low_cost_search": "gpt-5-mini"
},
"default_profile": "advanced_coding"
}
Expand All @@ -25,13 +25,13 @@
"required": false,
"default_profiles": {
"profiles": {
"advanced_reasoning": "claude-3-7-sonnet-20250219",
"advanced_coding": "claude-3-7-sonnet-20250219",
"intermediate_reasoning": "claude-3-5-haiku-20241022",
"intermediate_coding": "claude-3-5-haiku-20241022",
"quick_reasoning": "claude-3-5-haiku-20241022",
"intent_router": "claude-3-5-haiku-20241022",
"low_cost_search": "claude-3-5-haiku-20241022"
"advanced_reasoning": "claude-sonnet-4-5-20250929",
"advanced_coding": "claude-sonnet-4-5-20250929",
"intermediate_reasoning": "claude-haiku-4-5-20251001",
"intermediate_coding": "claude-haiku-4-5-20251001",
"quick_reasoning": "claude-haiku-4-5-20251001",
"intent_router": "claude-haiku-4-5-20251001",
"low_cost_search": "claude-haiku-4-5-20251001"
},
"default_profile": "advanced_coding"
}
Expand Down Expand Up @@ -65,9 +65,9 @@
"advanced_coding": "qwen/qwen3-coder:free",
"intermediate_reasoning": "qwen/qwen3-coder:free",
"intermediate_coding": "qwen/qwen3-coder:free",
"quick_reasoning": "qwen/qwen3-235b-a22b-2507:free",
"quick_reasoning": "qwen/qwen3-coder:free",
"intent_router": "qwen/qwen3-coder:free",
"low_cost_search": "qwen/qwen3-235b-a22b-2507:free"
"low_cost_search": "qwen/qwen3-coder:free"
},
"default_profile": "advanced_coding"
}
Expand Down
128 changes: 44 additions & 84 deletions src/jrdev/config/model_list.json
Original file line number Diff line number Diff line change
@@ -1,35 +1,11 @@
{
"models": [
{
"name": "claude-3-5-haiku-20241022",
"provider": "anthropic",
"is_think": false,
"input_cost": 8,
"output_cost": 40,
"context_tokens": 200000
},
{
"name": "claude-3-7-sonnet-20250219",
"provider": "anthropic",
"is_think": true,
"input_cost": 30,
"output_cost": 150,
"context_tokens": 200000
},
{
"name": "claude-opus-4-20250514",
"provider": "anthropic",
"is_think": true,
"input_cost": 150,
"output_cost": 750,
"context_tokens": 200000
},
{
"name": "claude-sonnet-4-20250514",
"name": "claude-haiku-4-5-20251001",
"provider": "anthropic",
"is_think": true,
"input_cost": 30,
"output_cost": 150,
"input_cost": 10,
"output_cost": 50,
"context_tokens": 200000
},
{
Expand All @@ -48,30 +24,6 @@
"output_cost": 11,
"context_tokens": 64000
},
{
"name": "o4-mini-2025-04-16",
"provider": "openai",
"is_think": false,
"input_cost": 11,
"output_cost": 44,
"context_tokens": 1000000
},
{
"name": "gpt-4.1-2025-04-14",
"provider": "openai",
"is_think": false,
"input_cost": 20,
"output_cost": 80,
"context_tokens": 1000000
},
{
"name": "gpt-4.1-mini-2025-04-14",
"provider": "openai",
"is_think": false,
"input_cost": 4,
"output_cost": 16,
"context_tokens": 1000000
},
{
"name": "google/gemini-2.5-pro",
"provider": "open_router",
Expand Down Expand Up @@ -104,14 +56,6 @@
"output_cost": 0,
"context_tokens": 262144
},
{
"name": "deepseek/deepseek-chat-v3-0324:free",
"provider": "open_router",
"is_think": false,
"input_cost": 0,
"output_cost": 0,
"context_tokens": 16384
},
{
"name": "deepseek/deepseek-r1-0528:free",
"provider": "open_router",
Expand All @@ -128,14 +72,6 @@
"output_cost": 150,
"context_tokens": 131072
},
{
"name": "mistralai/devstral-small-2507",
"provider": "open_router",
"is_think": true,
"input_cost": 1,
"output_cost": 3,
"context_tokens": 131072
},
{
"name": "qwen/qwen3-coder:free",
"provider": "open_router",
Expand All @@ -153,36 +89,60 @@
"context_tokens": 33000
},
{
"name": "anthropic/claude-sonnet-4",
"provider": "open_router",
"is_think": false,
"name": "claude-sonnet-4-5-20250929",
"provider": "anthropic",
"is_think": true,
"input_cost": 30,
"output_cost": 150,
"context_tokens": 200000
"context_tokens": 1000000
},
{
"name": "anthropic/claude-opus-4",
"provider": "open_router",
"is_think": false,
"name": "claude-opus-4-1-20250805",
"provider": "anthropic",
"is_think": true,
"input_cost": 150,
"output_cost": 750,
"context_tokens": 200000
},
{
"name": "openai/o4-mini-high",
"name": "gpt-5",
"provider": "openai",
"is_think": true,
"input_cost": 12,
"output_cost": 100,
"context_tokens": 400000
},
{
"name": "gpt-5-nano",
"provider": "openai",
"is_think": true,
"input_cost": 0,
"output_cost": 4,
"context_tokens": 400000
},
{
"name": "gpt-5-mini",
"provider": "openai",
"is_think": true,
"input_cost": 2,
"output_cost": 20,
"context_tokens": 400000
},
{
"name": "openai/gpt-5-mini",
"provider": "open_router",
"is_think": false,
"input_cost": 11,
"output_cost": 44,
"context_tokens": 200000
"is_think": true,
"input_cost": 2,
"output_cost": 20,
"context_tokens": 400000
},
{
"name": "openai/gpt-4.1",
"name": "openai/gpt-5-nano",
"provider": "open_router",
"is_think": false,
"input_cost": 20,
"output_cost": 80,
"context_tokens": 1047576
"is_think": true,
"input_cost": 0,
"output_cost": 4,
"context_tokens": 400000
},
{
"name": "gemma3:4b",
Expand Down
Loading