Skip to content

Cd/clean up #188

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
Apr 25, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
55 changes: 37 additions & 18 deletions langchain-ask-human/app.py → archive/langchain-ask-human/app.py
Original file line number Diff line number Diff line change
@@ -1,68 +1,87 @@
from langchain.chains.llm_math.base import LLMMathChain
from langchain.agents import initialize_agent, Tool, AgentType, AgentExecutor
from langchain_community.chat_models import ChatOpenAI
from langchain_openai import ChatOpenAI
from typing import *
from langchain.tools import BaseTool

from langchain_core.runnables import RunnableConfig
import chainlit as cl
from chainlit.sync import run_sync

from typing import Optional

from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from langchain_core.tools.base import ArgsSchema
from pydantic import BaseModel, Field
from langgraph.prebuilt import create_react_agent

class HumanInputSchema(BaseModel):
query: str = Field(description="The question to ask the human")


class HumanInputChainlit(BaseTool):
"""Tool that adds the capability to ask user for input."""

name = "human"
description = (
name: str = "human"
description: str = (
"You can ask a human for guidance when you think you "
"got stuck or you are not sure what to do next. "
"The input should be a question for the human."
)
args_schema: Optional[ArgsSchema] = HumanInputSchema
return_direct: bool = False

def _run(
self,
query: str,
run_manager=None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Human input tool."""

res = run_sync(cl.AskUserMessage(content=query).send())
return res["content"]
return "test"

async def _arun(
self,
query: str,
run_manager=None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the Human input tool."""
res = await cl.AskUserMessage(content=query).send()
return res["output"]

return res["content"]
# return "test"

@cl.on_chat_start
def start():
llm = ChatOpenAI(temperature=0, streaming=True, model_name="gpt-4-turbo-preview")
llm = ChatOpenAI(temperature=0, streaming=True, model_name="gpt-4o")
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)

tools = [
HumanInputChainlit(),
Tool(
name="Calculator",
func=llm_math_chain.run,
func=llm_math_chain.invoke,
description="useful for when you need to answer questions about math",
coroutine=llm_math_chain.arun,
coroutine=llm_math_chain.ainvoke,
),
]
agent = initialize_agent(
tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)


agent = create_react_agent(llm, tools=tools)

cl.user_session.set("agent", agent)


@cl.on_message
async def main(message: cl.Message):
agent = cl.user_session.get("agent") # type: AgentExecutor
res = await agent.arun(
message.content, callbacks=[cl.AsyncLangchainCallbackHandler()]
config = RunnableConfig(callbacks=[cl.AsyncLangchainCallbackHandler()])
inputs = {"messages": [("user", message.content)]}

res = await agent.ainvoke(
inputs, config=config
)
await cl.Message(content=res).send()
await cl.Message(content=res['messages'][-1].content).send()
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ async def on_message(message: cl.Message):
stream_resp = None
send_message = __truncate_conversation(message_history)
try:
stream = openai_client.chat.completions.create(
stream = await openai_client.chat.completions.create(
model="gpt-4",
messages=send_message,
stream=True,
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
6 changes: 3 additions & 3 deletions llama-index/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@ async def start():
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
Settings.context_window = 4096

service_context = ServiceContext.from_defaults(
callback_manager=CallbackManager([cl.LlamaIndexCallbackHandler()])
)
Settings.callback_manager = CallbackManager([cl.LlamaIndexCallbackHandler()])
service_context = Settings.callback_manager

query_engine = index.as_query_engine(
streaming=True, similarity_top_k=2, service_context=service_context
)
Expand Down
31 changes: 4 additions & 27 deletions loader-animation/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ async def send_animated_message(
base_msg: str,
frames: List[str],
interval: float = 0.8
) -> None:
) -> None:
"""Display animated message with minimal resource usage"""
msg = cl.Message(content=base_msg)
await msg.send()
Expand All @@ -24,39 +24,16 @@ async def send_animated_message(
current_frame = frames[progress % len(frames)]
progress_bar = ("▣" * (progress % bar_length)).ljust(bar_length, "▢")

# Single update operation
msg.content = f"{current_frame} {base_msg}\n{progress_bar}"
# Single update operation - overwrite entire content
new_content = f"{current_frame} {base_msg}\n{progress_bar}"
msg.content = new_content
await msg.update()

progress += 1
await asyncio.sleep(interval)
except asyncio.CancelledError:
msg.content = base_msg
await msg.update() # Final static message
base_msg: str,
frames: List[str],
interval: float = 0.8
) -> None:
"""Display animated message with minimal resource usage"""
msg = cl.Message(content=base_msg)
await msg.send()

progress = 0
bar_length = 12 # Optimal length for progress bar

try:
while True:
# Efficient progress calculation
current_frame = frames[progress % len(frames)]
progress_bar = ("▣" * (progress % bar_length)).ljust(bar_length, "▢")

# Single update operation
await msg.update(content=f"{current_frame} {base_msg}\n{progress_bar}")

progress += 1
await asyncio.sleep(interval)
except asyncio.CancelledError:
await msg.update(content=base_msg) # Final static message

@cl.on_message
async def main(message: cl.Message) -> None:
Expand Down
File renamed without changes.
25 changes: 16 additions & 9 deletions mcp-linear/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,15 +55,22 @@ def flatten(xss):
@cl.on_mcp_connect
async def on_mcp(connection, session: ClientSession):
result = await session.list_tools()
tools = [
{
"name": t.name,
"description": t.description,
"input_schema": t.inputSchema,
}
for t in result.tools
]

tools = [{
"name": t.name,
"description": t.description,
"input_schema": t.inputSchema,
} for t in result.tools]

# Save tools to a JSON file
import os

# Create directory if it doesn't exist
os.makedirs("tools_data", exist_ok=True)

# Save to JSON file
with open(f"tools_data/{connection.name}_tools.json", "w") as f:
json.dump(tools, f, indent=2)

mcp_tools = cl.user_session.get("mcp_tools", {})
mcp_tools[connection.name] = tools
cl.user_session.set("mcp_tools", mcp_tools)
Expand Down
168 changes: 168 additions & 0 deletions mcp-linear/tools_data/Linear_tools.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
[
{
"name": "create_issue",
"description": "Create a new issue in Linear",
"input_schema": {
"type": "object",
"properties": {
"title": {
"type": "string",
"description": "Issue title"
},
"description": {
"type": "string",
"description": "Issue description (markdown supported)"
},
"teamId": {
"type": "string",
"description": "Team ID"
},
"assigneeId": {
"type": "string",
"description": "Assignee user ID (optional)"
},
"priority": {
"type": "number",
"description": "Priority (0-4, optional)",
"minimum": 0,
"maximum": 4
},
"labels": {
"type": "array",
"items": {
"type": "string"
},
"description": "Label IDs to apply (optional)"
}
},
"required": [
"title",
"teamId"
]
}
},
{
"name": "list_issues",
"description": "List issues with optional filters",
"input_schema": {
"type": "object",
"properties": {
"teamId": {
"type": "string",
"description": "Filter by team ID (optional)"
},
"assigneeId": {
"type": "string",
"description": "Filter by assignee ID (optional)"
},
"status": {
"type": "string",
"description": "Filter by status (optional)"
},
"first": {
"type": "number",
"description": "Number of issues to return (default: 50)"
}
}
}
},
{
"name": "update_issue",
"description": "Update an existing issue",
"input_schema": {
"type": "object",
"properties": {
"issueId": {
"type": "string",
"description": "Issue ID"
},
"title": {
"type": "string",
"description": "New title (optional)"
},
"description": {
"type": "string",
"description": "New description (optional)"
},
"status": {
"type": "string",
"description": "New status (optional)"
},
"assigneeId": {
"type": "string",
"description": "New assignee ID (optional)"
},
"priority": {
"type": "number",
"description": "New priority (0-4, optional)",
"minimum": 0,
"maximum": 4
}
},
"required": [
"issueId"
]
}
},
{
"name": "list_teams",
"description": "List all teams in the workspace",
"input_schema": {
"type": "object",
"properties": {}
}
},
{
"name": "list_projects",
"description": "List all projects",
"input_schema": {
"type": "object",
"properties": {
"teamId": {
"type": "string",
"description": "Filter by team ID (optional)"
},
"first": {
"type": "number",
"description": "Number of projects to return (default: 50)"
}
}
}
},
{
"name": "search_issues",
"description": "Search for issues using a text query",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query text"
},
"first": {
"type": "number",
"description": "Number of results to return (default: 50)"
}
},
"required": [
"query"
]
}
},
{
"name": "get_issue",
"description": "Get detailed information about a specific issue",
"input_schema": {
"type": "object",
"properties": {
"issueId": {
"type": "string",
"description": "Issue ID"
}
},
"required": [
"issueId"
]
}
}
]
Loading