Skip to content

Commit d79c90c

Browse files
committed
adding few new folders
1 parent 4830f19 commit d79c90c

File tree

7 files changed

+673
-11
lines changed

7 files changed

+673
-11
lines changed

adr/README.md

+77
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
# Architecture Decision Records (ADR)
2+
3+
This directory contains Architecture Decision Records (ADRs) documenting significant architectural decisions made in this project.
4+
5+
## What is an ADR?
6+
7+
An Architecture Decision Record is a document that captures an important architectural decision made along with its context and consequences.
8+
9+
## ADR Format
10+
11+
Each ADR follows this format:
12+
13+
```markdown
14+
# ADR {number}: {title}
15+
16+
## Status
17+
18+
[Proposed | Accepted | Deprecated | Superseded]
19+
20+
## Context
21+
22+
What is the issue that we're seeing that is motivating this decision or change?
23+
24+
## Decision
25+
26+
What is the change that we're proposing and/or doing?
27+
28+
## Consequences
29+
30+
What becomes easier or more difficult to do because of this change?
31+
```
32+
33+
## List of ADRs
34+
35+
### Project Structure
36+
37+
- [ADR-0001](./0001-modular-pattern-structure.md): Modular Pattern Structure
38+
- [ADR-0002](./0002-async-first-approach.md): Async-First Approach
39+
- [ADR-0003](./0003-type-hints-and-validation.md): Type Hints and Validation
40+
41+
### Building Blocks
42+
43+
- [ADR-0004](./0004-llm-provider-abstraction.md): LLM Provider Abstraction
44+
- [ADR-0005](./0005-tool-integration-framework.md): Tool Integration Framework
45+
- [ADR-0006](./0006-memory-management.md): Memory Management Strategy
46+
47+
### Workflows
48+
49+
- [ADR-0007](./0007-prompt-chain-design.md): Prompt Chain Design
50+
- [ADR-0008](./0008-routing-strategy.md): Routing Strategy
51+
- [ADR-0009](./0009-parallel-execution.md): Parallel Execution Approach
52+
- [ADR-0010](./0010-orchestration-model.md): Orchestration Model
53+
- [ADR-0011](./0011-optimization-strategies.md): Optimization Strategies
54+
55+
### Agents
56+
57+
- [ADR-0012](./0012-agent-architecture.md): Agent Architecture
58+
- [ADR-0013](./0013-domain-specialization.md): Domain Specialization Approach
59+
60+
### Infrastructure
61+
62+
- [ADR-0014](./0014-error-handling.md): Error Handling Strategy
63+
- [ADR-0015](./0015-logging-approach.md): Logging Approach
64+
- [ADR-0016](./0016-testing-strategy.md): Testing Strategy
65+
- [ADR-0017](./0017-configuration-management.md): Configuration Management
66+
67+
## Creating New ADRs
68+
69+
1. Copy the template from `template.md`
70+
2. Create a new file with the next number in sequence
71+
3. Fill in the sections
72+
4. Add a link to this index
73+
5. Submit for review
74+
75+
## Superseded ADRs
76+
77+
When an ADR is superseded, update its status and add a link to the new ADR that supersedes it.

agents/utils/llm.py

+142
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,142 @@
1+
"""
2+
LLM Caller Utility
3+
4+
A utility class for making calls to various LLM APIs with consistent interface.
5+
"""
6+
7+
import os
8+
from typing import Dict, List, Optional, Union
9+
10+
import anthropic
11+
from openai import OpenAI
12+
from google.cloud import aiplatform
13+
from azure.ai.ml import MLClient
14+
15+
class LLMCaller:
16+
"""A utility class for making LLM API calls."""
17+
18+
@staticmethod
19+
def get_client(provider: str = None):
20+
"""Get the appropriate LLM client based on environment configuration."""
21+
if not provider:
22+
# Determine provider based on available API keys
23+
if os.getenv("ANTHROPIC_API_KEY"):
24+
provider = "anthropic"
25+
elif os.getenv("OPENAI_API_KEY"):
26+
provider = "openai"
27+
elif os.getenv("GOOGLE_APPLICATION_CREDENTIALS"):
28+
provider = "google"
29+
elif os.getenv("AZURE_OPENAI_API_KEY"):
30+
provider = "azure"
31+
else:
32+
raise ValueError("No LLM API credentials found in environment")
33+
34+
if provider == "anthropic":
35+
return anthropic.Anthropic()
36+
elif provider == "openai":
37+
return OpenAI()
38+
elif provider == "google":
39+
return aiplatform.init()
40+
elif provider == "azure":
41+
return MLClient.from_config()
42+
else:
43+
raise ValueError(f"Unsupported LLM provider: {provider}")
44+
45+
@staticmethod
46+
def get_default_model(provider: str) -> str:
47+
"""Get the default model for a provider."""
48+
defaults = {
49+
"anthropic": "claude-3-opus-20240229",
50+
"openai": "gpt-4-turbo-preview",
51+
"google": "text-bison@002",
52+
"azure": "gpt-4"
53+
}
54+
return os.getenv(f"{provider.upper()}_DEFAULT_MODEL", defaults[provider])
55+
56+
@staticmethod
57+
async def call(
58+
system: str,
59+
user: str,
60+
provider: str = None,
61+
model: str = None,
62+
temperature: float = 0.7,
63+
max_tokens: int = 1000,
64+
stop: Optional[Union[str, List[str]]] = None
65+
) -> str:
66+
"""
67+
Make a call to an LLM API.
68+
69+
Args:
70+
system: System message/prompt
71+
user: User message/prompt
72+
provider: LLM provider (anthropic, openai, google, azure)
73+
model: Model to use (defaults to provider's default)
74+
temperature: Sampling temperature (0.0 to 1.0)
75+
max_tokens: Maximum tokens in response
76+
stop: Optional stop sequence(s)
77+
78+
Returns:
79+
The LLM's response text
80+
"""
81+
client = LLMCaller.get_client(provider)
82+
provider = provider or ("anthropic" if isinstance(client, anthropic.Anthropic) else
83+
"openai" if isinstance(client, OpenAI) else
84+
"google" if str(client.__class__).startswith("google") else
85+
"azure")
86+
87+
model = model or LLMCaller.get_default_model(provider)
88+
89+
if provider == "anthropic":
90+
response = await client.messages.create(
91+
model=model,
92+
max_tokens=max_tokens,
93+
temperature=temperature,
94+
system=system,
95+
messages=[{"role": "user", "content": user}],
96+
stop_sequences=stop
97+
)
98+
return response.content[0].text
99+
100+
elif provider == "openai":
101+
response = await client.chat.completions.create(
102+
model=model,
103+
temperature=temperature,
104+
max_tokens=max_tokens,
105+
messages=[
106+
{"role": "system", "content": system},
107+
{"role": "user", "content": user}
108+
],
109+
stop=stop
110+
)
111+
return response.choices[0].message.content
112+
113+
elif provider == "google":
114+
response = await client.predict_text(
115+
model=model,
116+
temperature=temperature,
117+
max_output_tokens=max_tokens,
118+
prompt=f"{system}\n\n{user}",
119+
stop_sequences=stop
120+
)
121+
return response.text
122+
123+
elif provider == "azure":
124+
response = await client.chat.completions.create(
125+
deployment_name=model,
126+
temperature=temperature,
127+
max_tokens=max_tokens,
128+
messages=[
129+
{"role": "system", "content": system},
130+
{"role": "user", "content": user}
131+
],
132+
stop=stop
133+
)
134+
return response.choices[0].message.content
135+
136+
else:
137+
raise ValueError(f"Unsupported LLM provider: {provider}")
138+
139+
@staticmethod
140+
def format_prompt(template: str, **kwargs) -> str:
141+
"""Format a prompt template with variables."""
142+
return template.format(**kwargs)

agents/utils/web.py

+98
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
"""
2+
Web Utilities
3+
4+
Utilities for web search and webpage content extraction.
5+
"""
6+
7+
import os
8+
from typing import Dict, List, Optional
9+
import aiohttp
10+
from bs4 import BeautifulSoup
11+
from duckduckgo_search import AsyncDDGS
12+
13+
from .llm import LLMCaller
14+
15+
class WebSearchTool:
16+
"""A tool for performing web searches."""
17+
18+
def __init__(self, name: str, description: str, parameters: Dict):
19+
self.name = name
20+
self.description = description
21+
self.parameters = parameters
22+
23+
async def __call__(self, query: str, num_results: int = 3) -> Dict:
24+
"""
25+
Perform a web search using DuckDuckGo.
26+
27+
Args:
28+
query: Search query
29+
num_results: Number of results to return
30+
31+
Returns:
32+
Dict containing search results
33+
"""
34+
async with AsyncDDGS() as ddgs:
35+
results = []
36+
async for r in ddgs.text(query, max_results=num_results):
37+
results.append({
38+
"title": r["title"],
39+
"snippet": r["body"],
40+
"url": r["link"]
41+
})
42+
return {"results": results}
43+
44+
class WebpageReaderTool:
45+
"""A tool for reading and extracting content from webpages."""
46+
47+
def __init__(self, name: str, description: str, parameters: Dict):
48+
self.name = name
49+
self.description = description
50+
self.parameters = parameters
51+
52+
async def __call__(self, url: str) -> Dict:
53+
"""
54+
Read and extract content from a webpage.
55+
56+
Args:
57+
url: URL of the webpage to read
58+
59+
Returns:
60+
Dict containing extracted content and metadata
61+
"""
62+
async with aiohttp.ClientSession() as session:
63+
async with session.get(url) as response:
64+
if response.status != 200:
65+
raise ValueError(f"Failed to fetch URL: {url}")
66+
67+
html = await response.text()
68+
soup = BeautifulSoup(html, "html.parser")
69+
70+
# Remove script and style elements
71+
for script in soup(["script", "style"]):
72+
script.decompose()
73+
74+
# Extract text content
75+
text = soup.get_text()
76+
77+
# Clean up whitespace
78+
lines = (line.strip() for line in text.splitlines())
79+
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
80+
text = " ".join(chunk for chunk in chunks if chunk)
81+
82+
# Extract metadata
83+
title = soup.title.string if soup.title else ""
84+
meta_desc = soup.find("meta", attrs={"name": "description"})
85+
description = meta_desc["content"] if meta_desc else ""
86+
87+
# Use LLM to clean and structure content
88+
cleaned_content = await LLMCaller.call(
89+
system="You are a helpful assistant that cleans and structures webpage content.",
90+
user=f"Clean and structure the following webpage content, removing any irrelevant parts like navigation, footers, etc:\n\n{text[:2000]}..." # Limit content length
91+
)
92+
93+
return {
94+
"title": title,
95+
"description": description,
96+
"content": cleaned_content,
97+
"url": url
98+
}

0 commit comments

Comments
 (0)