diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..2b4b5b2 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,25 @@ +name: ci +on: + push: + branches: + - master + - main +permissions: + contents: write +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v4 + with: + python-version: 3.x + - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV + - uses: actions/cache@v3 + with: + key: mkdocs-material-${{ env.cache_id }} + path: .cache + restore-keys: | + mkdocs-material- + - run: pip install mkdocs-material + - run: mkdocs gh-deploy --force diff --git a/creator/__version__.py b/creator/__version__.py index b3f4756..ae73625 100644 --- a/creator/__version__.py +++ b/creator/__version__.py @@ -1 +1 @@ -__version__ = "0.1.2" +__version__ = "0.1.3" diff --git a/creator/agents/__init__.py b/creator/agents/__init__.py index 1e2a748..e4bce29 100644 --- a/creator/agents/__init__.py +++ b/creator/agents/__init__.py @@ -1,12 +1,16 @@ -from .extractor_agent import skill_extractor_agent -from .interpreter_agent import code_interpreter_agent -from .tester_agent import code_tester_agent -from .refactor_agent import code_refactor_agent +from .extractor_agent import create_skill_extractor_agent +from .interpreter_agent import create_code_interpreter_agent +from .tester_agent import create_code_tester_agent +from .refactor_agent import create_code_refactor_agent +from .prompt_enhancer_agent import create_prompt_enhancer_agent +from .creator_agent import create_creator_agent __all__ = [ - "skill_extractor_agent", - "code_interpreter_agent", - "code_tester_agent", - "code_refactor_agent" + "create_skill_extractor_agent", + "create_code_interpreter_agent", + "create_code_tester_agent", + "create_code_refactor_agent", + "create_prompt_enhancer_agent", + "create_creator_agent" ] diff --git a/creator/agents/base.py b/creator/agents/base.py index 9f70f4b..3985b92 100644 --- a/creator/agents/base.py +++ b/creator/agents/base.py @@ -23,10 +23,18 @@ class BaseAgent(LLMChain): system_template: str = "" allow_user_confirm: bool = False prompt: ChatPromptTemplate = ChatPromptTemplate.from_messages(messages=["system", ""]) + agent_name: str = "BaseAgent" + share_memory: bool = False @property def _chain_type(self): - return "BaseAgent" + return self.agent_name + + def __repr__(self) -> str: + return self.agent_name + "()" + + def __hash__(self): + return hash(self.agent_name) @property def input_keys(self) -> List[str]: @@ -70,14 +78,14 @@ def tool_result_to_str(self, tool_result) -> str: return json.dumps(tool_result, ensure_ascii=False) return str(tool_result) - def run_tool(self, function_call: Dict[str, Any]): + def run_tool(self, function_call: Dict[str, Any], run_manager: Optional[CallbackManager] = None): function_name = function_call.get("name", "") arguments = parse_partial_json(function_call.get("arguments", "{}")) tool_result = None for tool in self.tools: if tool.name == function_name: if self.human_confirm(): - tool_result = tool.run(arguments) + tool_result = tool.run(arguments, callbacks=run_manager) tool_result = self.tool_result_to_str(tool_result) tool_result = FunctionMessage(name=function_name, content=tool_result) self.update_tool_result_in_callbacks(tool_result) @@ -96,31 +104,37 @@ def messages_hot_fix(self, langchain_messages): def preprocess_inputs(self, inputs: Dict[str, Any]): return inputs + def add_to_memory(self, messages): + """Add message to long-term memory""" + pass + def run_workflow(self, inputs: Dict[str, Any], run_manager: Optional[CallbackManager] = None) -> Dict[str, Any]: + run_manager_callbacks = run_manager.get_child() if run_manager else None inputs = self.preprocess_inputs(inputs) messages = inputs.pop("messages") langchain_messages = convert_openai_messages(messages) + self.llm.function_calls = self.function_schemas llm_with_functions = self.llm.bind(functions=self.function_schemas) current_try = 0 while current_try < self.total_tries: self.start_callbacks() prompt = self.construct_prompt(langchain_messages) - llm_chain = prompt | llm_with_functions | self.postprocess_mesasge - message = llm_chain.invoke(inputs) + llm_chain = (prompt | llm_with_functions | self.postprocess_mesasge).with_config({"run_name": f"Iteration {current_try+1}"}) + message = llm_chain.invoke(inputs, {"callbacks": run_manager_callbacks}) langchain_messages.append(message) function_call = message.additional_kwargs.get("function_call", None) if function_call is None: self.end_callbacks(message) break - tool_result = self.run_tool(function_call) + tool_result = self.run_tool(function_call, run_manager_callbacks) if tool_result is None: self.end_callbacks(message) break langchain_messages.append(tool_result) langchain_messages = self.messages_hot_fix(langchain_messages) current_try += 1 - self.end_callbacks(message) + self.end_callbacks(message=message) langchain_messages = remove_tips(langchain_messages) openai_messages = list(map(convert_message_to_dict, langchain_messages)) return openai_messages @@ -159,4 +173,3 @@ def task_target(): result = output_queue.pop() yield True, result return - diff --git a/creator/agents/creator_agent.py b/creator/agents/creator_agent.py index 7ef43ba..445c2ce 100644 --- a/creator/agents/creator_agent.py +++ b/creator/agents/creator_agent.py @@ -10,7 +10,7 @@ from creator.code_interpreter.safe_python import SafePythonInterpreter from creator.config.library import config from creator.utils import load_system_prompt, get_user_info, remove_tips -from creator.llm.llm_creator import create_llm +from creator.llm import create_llm from .base import BaseAgent @@ -20,19 +20,15 @@ ALLOWED_FUNCTIONS = {"create", "save", "search", "CodeSkill"} ALLOW_METHODS = {".show", ".show_code", ".test", ".run", ".save", "__add__", "__gt__", "__lt__", "__annotations__"} IMPORT_CODE = ( - "from creator.core import creator\n" + "from creator import create, save, search\n" "from creator.core.skill import CodeSkill\n" - "create, save, search = creator.create, creator.save, creator.search\n\n" ) class CreatorAgent(BaseAgent): total_tries: int = 5 allow_user_confirm: bool = config.run_human_confirm - - @property - def _chain_type(self): - return "CreatorAgent" + agent_name: str = "CreatorAgent" def prep_inputs(self, inputs: Dict[str, Any] | Any) -> Dict[str, str]: inputs["OPEN_CREATOR_API_DOC"] = OPEN_CREATOR_API_DOC @@ -68,21 +64,15 @@ async def ainvoke(self, inputs: Dict[str, Any], config: RunnableConfig | None = return {"messages": self.run(inputs)} -def create_creator_agent(llm): +def create_creator_agent(config): template = load_system_prompt(config.creator_agent_prompt_path) - code_interpreter = SafePythonInterpreter(allowed_functions=ALLOWED_FUNCTIONS, allowed_methods=ALLOW_METHODS, redirect_output=True) code_interpreter.setup(IMPORT_CODE) - chain = CreatorAgent( - llm=llm, + llm=create_llm(config, config.agent_model_config.CREATOR_AGENT), system_template=template, tools=[code_interpreter], function_schemas=[code_interpreter.to_function_schema()], verbose=False, ) return chain - - -llm = create_llm(config) -open_creator_agent = create_creator_agent(llm=llm) diff --git a/creator/agents/extractor_agent.py b/creator/agents/extractor_agent.py index 5b8a8fd..4d10f11 100644 --- a/creator/agents/extractor_agent.py +++ b/creator/agents/extractor_agent.py @@ -1,21 +1,17 @@ from typing import Dict, Any + from langchain.prompts import ChatPromptTemplate from langchain.output_parsers.json import parse_partial_json -from creator.config.library import config -from creator.utils import convert_to_values_list, get_user_info, load_system_prompt -import json - +from creator.utils import convert_to_values_list, get_user_info, load_system_prompt, load_json_schema from creator.llm import create_llm + from .base import BaseAgent class SkillExtractorAgent(BaseAgent): output_key: str = "extracted_skill" - - @property - def _chain_type(self): - return "SkillExtractorAgent" + agent_name: str = "SkillExtractorAgent" def construct_prompt(self, langchain_messages: Dict[str, Any]): prompt = ChatPromptTemplate.from_messages(messages=[ @@ -23,24 +19,28 @@ def construct_prompt(self, langchain_messages: Dict[str, Any]): ("system", self.system_template + get_user_info()) ]) return prompt - + def parse_output(self, messages): function_call = messages[-1].get("function_call", None) - - if function_call is not None: - extracted_skill = parse_partial_json(function_call.get("arguments", "{}")) + try: + if function_call is not None: + content = function_call.get("arguments", "{}") + else: + content = messages[-1].get("content", "{}") + extracted_skill = parse_partial_json(content) extracted_skill["conversation_history"] = messages[:-1] extracted_skill["skill_parameters"] = convert_to_values_list(extracted_skill["skill_parameters"]) if "skill_parameters" in extracted_skill else None extracted_skill["skill_return"] = convert_to_values_list(extracted_skill["skill_return"]) if "skill_return" in extracted_skill else None return {"extracted_skill": extracted_skill} + except Exception: + pass return {"extracted_skill": None} -def create_skill_extractor_agent(llm): +def create_skill_extractor_agent(config): template = load_system_prompt(config.extractor_agent_prompt_path) # current file's parent as dir - with open(config.codeskill_function_schema_path, encoding="utf-8") as f: - code_skill_json_schema = json.load(f) + code_skill_json_schema = load_json_schema(config.codeskill_function_schema_path) function_schema = { "name": "extract_formmated_skill", "description": "a function that extracts a skill from a conversation history", @@ -48,13 +48,9 @@ def create_skill_extractor_agent(llm): } chain = SkillExtractorAgent( - llm=llm, + llm=create_llm(config, config.agent_model_config.EXTRACTOR_AGENT), system_template=template, function_schemas=[function_schema], verbose=False ) return chain - - -llm = create_llm(config) -skill_extractor_agent = create_skill_extractor_agent(llm) diff --git a/creator/agents/group_chat.py b/creator/agents/group_chat.py new file mode 100644 index 0000000..52ec00b --- /dev/null +++ b/creator/agents/group_chat.py @@ -0,0 +1,118 @@ +from typing import List, Dict, Any, Optional +import networkx as nx + +from creator.agents.base import BaseAgent +from creator.utils import print + +from langchain.chains.base import Chain +from langchain.callbacks.manager import CallbackManagerForChainRun + + +class GroupChat(Chain): + graph: Optional[nx.DiGraph] = None + max_consecutive_auto_reply: int = 3 + + @property + def input_keys(self) -> List[str]: + """Keys expected to be in the chain input.""" + return ["messages", "sender", "receivers"] + + @property + def output_keys(self) -> List[str]: + """Keys expected to be in the chain output.""" + return ["messages", "sender", "receivers"] + + def add_agent(self, agent: BaseAgent): + """ + Add an agent to the graph. + + :param agent: The agent to be added. + """ + self.graph.add_node(agent.agent_name, agent=agent) + + def add_agents(self, agents: List[BaseAgent]): + """ + Add a list of agents to the graph. + + :param agents: The list of agents to be added. + """ + for agent in agents: + self.add_agent(agent) + + def remove_agent(self, agent_name: str): + """ + Remove an agent from the graph. + + :param agent_name: The name of the agent to be removed. + """ + if agent_name in self.graph: + self.graph.remove_node(agent_name) + + def add_edge(self, agent1: BaseAgent, agent2: BaseAgent): + """ + Add an edge between two agents. + + :param agent1: The first agent. + :param agent2: The second agent. + """ + self.graph.add_edge(agent1.agent_name, agent2.agent_name) + + def remove_edge(self, agent1: BaseAgent, agent2: BaseAgent): + """ + Remove an edge between two agents. + + :param agent1: The first agent. + :param agent2: The second agent. + """ + self.graph.remove_edge(agent1.agent_name, agent2.agent_name) + + @classmethod + def from_mapping(cls, mapping: Dict[BaseAgent, List[BaseAgent]]): + graph = nx.DiGraph() + node_set = set() + for from_node, to_nodes in mapping.items(): + if from_node.agent_name not in node_set: + node_set.add(from_node) + for node in to_nodes: + if node.agent_name not in node_set: + node_set.add(node) + graph.add_edge(from_node.agent_name, node.agent_name) + cls.graph = graph + return cls + + def _call( + self, + inputs: Dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> Dict[str, Any]: + return self.run_chat(inputs["messages"], inputs["sender"], inputs["receivers"]) + + def run_chat(self, messages: List[Dict], sender: str, receivers: List[str], run_manager: Optional[CallbackManagerForChainRun] = None): + """Run a group chat.""" + assert len(messages) > 0, "Input Messages Must Not Be Empty" + curr_cnt = 0 + while curr_cnt < self.max_consecutive_auto_reply: + for receiver in receivers: + try: + receiver_agent = self.graph.nodes[receiver] + except KeyError: + print("> agent {receiver} not found", print_type="markdown") + raise KeyError + if not receiver_agent.share_memory: + messages = [messages[-1]] + output_messages = receiver_agent.with_config({"callbacks": run_manager.get_child()}).invoke({"messages": messages}) + sender = receiver + try: + receiver = output_messages[-1]["receiver"] + except KeyError: + print("> agent {receiver} has no receiver", print_type="markdown") + raise KeyError + + if not receiver_agent.share_memory: + messages = messages[:-1] + output_messages + else: + messages = output_messages + + if receiver == "human": + return messages, sender, receiver + curr_cnt += 1 diff --git a/creator/agents/interpreter_agent.py b/creator/agents/interpreter_agent.py index 58b1eaa..e0a31b3 100644 --- a/creator/agents/interpreter_agent.py +++ b/creator/agents/interpreter_agent.py @@ -6,7 +6,7 @@ from creator.code_interpreter import CodeInterpreter, language_map from creator.config.library import config from creator.utils import load_system_prompt, remove_tips -from creator.llm.llm_creator import create_llm +from creator.llm import create_llm from .base import BaseAgent @@ -18,11 +18,8 @@ class CodeInterpreterAgent(BaseAgent): total_tries: int = 10 allow_user_confirm: bool = config.run_human_confirm + agent_name: str = "CodeInterpreterAgent" - @property - def _chain_type(self): - return "CodeInterpreterAgent" - def postprocess_mesasge(self, message): function_call = message.additional_kwargs.get("function_call", None) if function_call is not None: @@ -49,19 +46,15 @@ def messages_hot_fix(self, langchain_messages): return langchain_messages -def create_code_interpreter_agent(llm): +def create_code_interpreter_agent(config): tool = CodeInterpreter() function_schema = tool.to_function_schema() template = load_system_prompt(config.interpreter_agent_prompt_path) chain = CodeInterpreterAgent( - llm=llm, + llm=create_llm(config, config.agent_model_config.INTERPRETER_AGENT), system_template=template, function_schemas=[function_schema], tools=[tool], verbose=False, ) return chain - - -llm = create_llm(config) -code_interpreter_agent = create_code_interpreter_agent(llm=llm) diff --git a/creator/agents/prompt_enhancer_agent.py b/creator/agents/prompt_enhancer_agent.py new file mode 100644 index 0000000..703a6fd --- /dev/null +++ b/creator/agents/prompt_enhancer_agent.py @@ -0,0 +1,45 @@ +from typing import Any, Dict + +from langchain.prompts import ChatPromptTemplate +from langchain.output_parsers.json import parse_partial_json + +from creator.utils import load_system_prompt, load_json_schema +from creator.llm import create_llm +from creator.utils import print + +from creator.agents.base import BaseAgent + + +class PromptEnhancerAgent(BaseAgent): + output_key: str = "request" + agent_name: str = "PromptEnhancerAgent" + + def construct_prompt(self, langchain_messages: Dict[str, Any]): + prompt = ChatPromptTemplate.from_messages(messages=[ + *langchain_messages, + ("system", self.system_template) + ]) + return prompt + + def parse_output(self, messages): + orignal_request = messages[0].get("content", "") + function_call = messages[1].get("function_call", None) + if function_call is not None: + rewrited_prompt = parse_partial_json(function_call.get("arguments", "{}")) + prefix_prompt = rewrited_prompt.get("prefix_prompt", "") + postfix_prompt = rewrited_prompt.get("postfix_prompt", "") + # print(f"[green]{prefix_prompt}[/green]\n{orignal_request}\n[green]{postfix_prompt}[/green]") + return {"request": "\n".join([prefix_prompt, orignal_request, postfix_prompt])} + return {"request": orignal_request} + + +def create_prompt_enhancer_agent(config): + template = load_system_prompt(config.prompt_enhancer_agent_prompt_path) + function_schema = load_json_schema(config.prompt_enhancer_schema_path) + chain = PromptEnhancerAgent( + llm=create_llm(config, config.agent_model_config.PROMPT_ENHANCER_AGENT), + system_template=template, + function_schemas=[function_schema], + verbose=False + ) + return chain diff --git a/creator/agents/refactor_agent.py b/creator/agents/refactor_agent.py index 0b11372..49176ad 100644 --- a/creator/agents/refactor_agent.py +++ b/creator/agents/refactor_agent.py @@ -1,23 +1,18 @@ from typing import Any, Dict -import json import os from langchain.prompts import ChatPromptTemplate from langchain.output_parsers.json import parse_partial_json -from creator.config.library import config -from creator.utils import convert_to_values_list, load_system_prompt, get_user_info -from creator.llm.llm_creator import create_llm +from creator.llm import create_llm +from creator.utils import convert_to_values_list, load_system_prompt, get_user_info, load_json_schema from .base import BaseAgent class CodeRefactorAgent(BaseAgent): output_key: str = "refacted_skills" - - @property - def _chain_type(self): - return "CodeRefactorAgent" + agent_name: str = "CodeRefactorAgent" def construct_prompt(self, langchain_messages: Dict[str, Any]): prompt = ChatPromptTemplate.from_messages(messages=[ @@ -39,12 +34,10 @@ def parse_output(self, messages): return {"refacted_skills": None} -def create_code_refactor_agent(llm): +def create_code_refactor_agent(config): template = load_system_prompt(config.refactor_agent_prompt_path) path = os.path.join(config.codeskill_function_schema_path) - with open(path, encoding="utf-8") as f: - code_skill_json_schema = json.load(f) - + code_skill_json_schema = load_json_schema(os.path.join(config.codeskill_function_schema_path)) function_schema = { "name": "create_refactored_codeskills", "description": "a function that constructs a list of refactored skill objects. return only one item when your action is to combine or refine skill object(s), otherwise return more than one items", @@ -62,13 +55,9 @@ def create_code_refactor_agent(llm): } chain = CodeRefactorAgent( - llm=llm, + llm=create_llm(config, config.agent_model_config.REFACTOR_AGENT), system_template=template, function_schemas=[function_schema], verbose=False ) return chain - - -llm = create_llm(config) -code_refactor_agent = create_code_refactor_agent(llm) diff --git a/creator/agents/tester_agent.py b/creator/agents/tester_agent.py index 01975e0..c1754b1 100644 --- a/creator/agents/tester_agent.py +++ b/creator/agents/tester_agent.py @@ -5,8 +5,8 @@ from creator.code_interpreter import CodeInterpreter, language_map from creator.config.library import config -from creator.utils import load_system_prompt, remove_tips -from creator.llm.llm_creator import create_llm +from creator.llm import create_llm +from creator.utils import load_system_prompt, load_json_schema, remove_tips from .base import BaseAgent @@ -19,10 +19,7 @@ class CodeTesterAgent(BaseAgent): total_tries: int = 10 output_key: str = "output" allow_user_confirm: bool = config.run_human_confirm - - @property - def _chain_type(self): - return "CodeTesterAgent" + agent_name: str = "CodeTesterAgent" def postprocess_mesasge(self, message): function_call = message.additional_kwargs.get("function_call", None) @@ -38,7 +35,7 @@ def postprocess_mesasge(self, message): } message.additional_kwargs["function_call"] = function_call return message - + def messages_hot_fix(self, langchain_messages): langchain_messages = remove_tips(langchain_messages) tool_result = langchain_messages[-1].content @@ -66,23 +63,17 @@ def parse_output(self, messages): } -def create_code_tester_agent(llm): +def create_code_tester_agent(config): template = load_system_prompt(config.tester_agent_prompt_path) tool = CodeInterpreter() code_interpreter_function_schema = tool.to_function_schema() - with open(config.testsummary_function_schema_path, encoding="utf-8") as f: - test_summary_function_schema = json.load(f) - + test_summary_function_schema = load_json_schema(config.testsummary_function_schema_path) chain = CodeTesterAgent( - llm=llm, + llm=create_llm(config, config.agent_model_config.TESTER_AGENT), system_template=template, function_schemas=[code_interpreter_function_schema, test_summary_function_schema], tools=[tool], verbose=False, ) return chain - - -llm = create_llm(config) -code_tester_agent = create_code_tester_agent(llm=llm) diff --git a/creator/app/server.py b/creator/app/server.py index 70a0676..9dd2c5c 100644 --- a/creator/app/server.py +++ b/creator/app/server.py @@ -1,6 +1,6 @@ from fastapi import FastAPI -from creator.agents.creator_agent import create_llm, create_creator_agent -from creator.config.library import config +from creator.agents.creator_agent import create_creator_agent +from creator import config from creator.__version__ import __version__ as version from pydantic import BaseModel @@ -20,7 +20,7 @@ class Output(BaseModel): ) config.use_rich = False -open_creator_agent = create_creator_agent(create_llm(config)) +open_creator_agent = create_creator_agent(config) @app.post("/agents/creator") diff --git a/creator/app/streamlit_app.py b/creator/app/streamlit_app.py index 43a3c33..5296913 100644 --- a/creator/app/streamlit_app.py +++ b/creator/app/streamlit_app.py @@ -4,8 +4,7 @@ sys.path.append(os.path.join(os.path.dirname(script_path), "../..")) import streamlit as st -from creator.agents.creator_agent import open_creator_agent -from creator.agents import code_interpreter_agent +from creator.agents import create_creator_agent, create_code_interpreter_agent from creator import config from langchain.callbacks.streamlit.streamlit_callback_handler import _convert_newlines from langchain.output_parsers.json import parse_partial_json @@ -21,6 +20,11 @@ agent_name = "interpreter_agent" +agent_mapping = { + "interpreter_agent": create_code_interpreter_agent(config), + "creator_agent": create_creator_agent(config) +} + def setup_slidebar(): global agent_name @@ -28,14 +32,19 @@ def setup_slidebar(): openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password") "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)" "[View the source code](https://github.com/timedomain-tech/open-creator/tree/main/creator/app/streamlit_app.py)" - os.environ["OPENAI_API_KEY"] = openai_api_key + curr_api_key = os.environ.get("OPENAI_API_KEY", "") + if not curr_api_key: + os.environ["OPENAI_API_KEY"] = openai_api_key model_list = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "gpt-4"] model = st.selectbox("Model", model_list, key="model") - config.model = model temperature = st.slider("Temperature", 0.0, 1.0, 0.0, 0.05, key="temperature") config.temperature = temperature agent_list = ["creator_agent", "interpreter_agent"] selected_agent = st.selectbox("Agent", agent_list, key="agent") + if selected_agent == "creator_agent": + config.agent_model_config.CREATOR_AGENT = model + else: + config.agent_model_config.INTERPRETER_AGENT = model if agent_name != selected_agent: agent_name = selected_agent add_session() @@ -60,8 +69,6 @@ def setup_state(): if "langugae" not in st.session_state: st.session_state["language"] = "English" - if "agent" not in st.session_state: - st.session_state["agent"] = "interpreter_agent" def disable(): st.session_state["disabled"] = True @@ -171,7 +178,7 @@ def handle_input(): messages.append({"role": "user", "content": prompt}) print("current input messages", messages) render_conversation_history(container, messages) - agent = open_creator_agent if agent_name == "creator_agent" else code_interpreter_agent + agent = agent_mapping[agent_name] return_messages = stream_render(agent, messages, container) current_session["messages"] = return_messages diff --git a/creator/callbacks/rich_manager.py b/creator/callbacks/rich_manager.py index a561822..5318c72 100644 --- a/creator/callbacks/rich_manager.py +++ b/creator/callbacks/rich_manager.py @@ -84,19 +84,18 @@ def finish(self, message=None, err=None) -> None: def refresh_text(self, cursor: bool = True) -> None: """Refreshes the content display.""" text = self.content + replacement = "```text" lines = text.split('\n') inside_code_block = False - for line in lines: + for i, line in enumerate(lines): # find the start of the code block - if line.startswith("```"): + if line.strip().startswith("```"): inside_code_block = not inside_code_block + if inside_code_block: + lines[i] = replacement content = '\n'.join(lines) - if cursor: - content += "█" - else: - if content.endswith("█"): - content = content[:-1] + content = self.update_cursor(cursor, content) if inside_code_block: content += "\n```" markdown = Markdown(content.strip()) @@ -109,7 +108,8 @@ def refresh_code(self, cursor: bool = True) -> None: code_table = self._create_code_table(cursor) output_panel = self._create_output_panel() - group = Group(code_table, output_panel) + group_items = [code_table, output_panel] + group = Group(*group_items) self.code_live.update(group) self.code_live.refresh() @@ -122,6 +122,13 @@ def refresh(self, cursor: bool = True, is_code: bool = True) -> None: else: self.refresh_text(cursor=cursor) + def update_cursor(self, cursor, content): + if cursor: + content += "●" + elif content.endswith("●"): + content = content[:-1] + return content + def update_tool_result(self, chunk): if not self.use_rich: return @@ -167,11 +174,7 @@ def _create_code_table(self, cursor: bool) -> Panel: code_table = Table(show_header=False, show_footer=False, box=None, padding=0, expand=True) code_table.add_column() - if cursor: - self.code += "█" - else: - if len(self.code) > 0 and self.code[-1] == "█": - self.code = self.code[:-1] + self.code = self.update_cursor(cursor, self.code) code_lines = self.code.strip().split('\n') for i, line in enumerate(code_lines, start=1): @@ -193,7 +196,7 @@ def _get_line_syntax(self, line: str, line_number: int) -> Syntax: def _create_output_panel(self) -> Panel: """Creates a panel for displaying the output.""" if not self.tool_result or self.tool_result == "None": - return Panel("", box=MINIMAL, style="#FFFFFF on #3b3b37") + return "" return Panel(self.tool_result, box=MINIMAL, style="#FFFFFF on #3b3b37") diff --git a/creator/callbacks/streaming_stdout.py b/creator/callbacks/streaming_stdout.py index 0a457f6..fdd9f22 100644 --- a/creator/callbacks/streaming_stdout.py +++ b/creator/callbacks/streaming_stdout.py @@ -1,88 +1,50 @@ -from typing import Any +from typing import Any, Callable from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler +from langchain.schema.messages import AIMessageChunk from .buffer_manager import buffer_output_manager from .rich_manager import rich_output_manager from .file_manager import file_output_manager -class OutputBufferStreamingHandler(StreamingStdOutCallbackHandler): +class BaseStreamingHandler(StreamingStdOutCallbackHandler): + def __init__(self, output_manager: Callable): + self.output_manager = output_manager def on_chain_start(self, **kwargs: Any) -> None: - """Run when chain starts running.""" agent_name = kwargs.get("agent_name") - buffer_output_manager.add(agent_name) + self.output_manager.add(agent_name) - def on_llm_new_token(self, **kwargs: Any) -> None: - """Run on new LLM token. Only available when streaming is enabled.""" + def on_llm_new_token(self, token: str, **kwargs: Any) -> None: chunk = kwargs.get("chunk", None) - if chunk is not None: - buffer_output_manager.update(chunk) + if chunk is None: + chunk = AIMessageChunk(content=token, additional_kwargs=kwargs) + self.output_manager.update(chunk) def on_tool_end(self, **kwargs: Any) -> Any: chunk = kwargs.get("chunk", None) if chunk is not None: - buffer_output_manager.update_tool_result(chunk) + self.output_manager.update_tool_result(chunk) def on_chain_error(self, error: BaseException, **kwargs: Any) -> None: - buffer_output_manager.finish(err=error) + self.output_manager.finish(err=error) def on_chain_end(self, **kwargs: Any) -> None: - """Run when chain finishes running.""" message = kwargs.get("message", None) - buffer_output_manager.finish(message=message) - + self.output_manager.finish(message=message) -class RichTerminalStreamingHandler(StreamingStdOutCallbackHandler): - - def on_chain_start(self, **kwargs: Any) -> None: - """Run when chain starts running.""" - agent_name = kwargs.get("agent_name") - rich_output_manager.add(agent_name) - def on_llm_new_token(self, **kwargs: Any) -> None: - """Run on new LLM token. Only available when streaming is enabled.""" - chunk = kwargs.get("chunk", None) - if chunk is not None: - rich_output_manager.update(chunk) +class OutputBufferStreamingHandler(BaseStreamingHandler): + def __init__(self): + super().__init__(buffer_output_manager) - def on_tool_end(self, **kwargs: Any) -> Any: - chunk = kwargs.get("chunk", None) - if chunk is not None: - rich_output_manager.update_tool_result(chunk) - - def on_chain_error(self, error: BaseException, **kwargs: Any) -> None: - rich_output_manager.finish(err=error) - - def on_chain_end(self, **kwargs: Any) -> None: - """Run when chain finishes running.""" - message = kwargs.get("message", None) - rich_output_manager.finish(message=message) +class RichTerminalStreamingHandler(BaseStreamingHandler): + def __init__(self): + super().__init__(rich_output_manager) -class FileLoggerStreamingHandler(StreamingStdOutCallbackHandler): - def on_chain_start(self, **kwargs: Any) -> None: - """Run when chain starts running.""" - agent_name = kwargs.get("agent_name") - file_output_manager.add(agent_name) - - def on_llm_new_token(self, **kwargs: Any) -> None: - """Run on new LLM token. Only available when streaming is enabled.""" - chunk = kwargs.get("chunk", None) - if chunk is not None: - file_output_manager.update(chunk) - - def on_tool_end(self, **kwargs: Any) -> Any: - chunk = kwargs.get("chunk", None) - if chunk is not None: - file_output_manager.update_tool_result(chunk) - - def on_chain_error(self, error: BaseException, **kwargs: Any) -> None: - file_output_manager.finish(err=error) - - def on_chain_end(self, **kwargs: Any) -> None: - """Run when chain finishes running.""" - message = kwargs.get("message", None) - file_output_manager.finish(message=message) +class FileLoggerStreamingHandler(BaseStreamingHandler): + def __init__(self): + super().__init__(file_output_manager) diff --git a/creator/client/command.py b/creator/client/command.py index f82769a..9826f7d 100644 --- a/creator/client/command.py +++ b/creator/client/command.py @@ -1,6 +1,7 @@ import argparse from rich.rule import Rule import json +import asyncio from creator.utils.printer import print as rich_print from creator.config.open_config import open_user_config @@ -129,9 +130,9 @@ ], }, { - "name": "interactive", + "name": "interpreter", "nickname": "i", - "help_text": "Enter interactive mode", + "help_text": "Enter interpreter mode", "command": False, "type": bool, }, @@ -215,8 +216,9 @@ def cmd_client(): open_user_config() return - if not args.command or args.interactive: - repl_app.run(args.quiet) + if not args.command or args.interpreter: + loop = asyncio.get_event_loop() + loop.run_until_complete(repl_app.run(args.quiet, args.interpreter)) return if args.command == "create": @@ -265,11 +267,23 @@ def cmd_client(): if args.command == "ui": import os + import sys + import atexit import subprocess streamlit_app_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../app", "streamlit_app.py") env = os.environ.copy() - subprocess.Popen(["streamlit", "run", streamlit_app_path], env=env) - return + process = subprocess.Popen(["streamlit", "run", streamlit_app_path], env=env) + + def terminate_process(process): + if process: + process.terminate() + + atexit.register(terminate_process, process) + try: + process.wait() + except KeyboardInterrupt: + pass + sys.exit(process.returncode) if __name__ == "__main__": diff --git a/creator/client/repl/__init__.py b/creator/client/repl/__init__.py index 984f1ad..6864526 100644 --- a/creator/client/repl/__init__.py +++ b/creator/client/repl/__init__.py @@ -1,9 +1,6 @@ from .app import OpenCreatorREPL -from .handler import RequestHandler -handler = RequestHandler() - -repl_app = OpenCreatorREPL(handler) +repl_app = OpenCreatorREPL() __all__ = ["repl_app"] diff --git a/creator/client/repl/app.py b/creator/client/repl/app.py index beed19c..ef2e1b5 100644 --- a/creator/client/repl/app.py +++ b/creator/client/repl/app.py @@ -1,108 +1,82 @@ -from prompt_toolkit.application import Application from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.keys import Keys -from prompt_toolkit.layout import HSplit, Layout -from prompt_toolkit.widgets import TextArea -from prompt_toolkit.layout.dimension import Dimension from prompt_toolkit.auto_suggest import AutoSuggestFromHistory -from prompt_toolkit.lexers import PygmentsLexer +from prompt_toolkit.shortcuts import PromptSession +from prompt_toolkit.document import Document from pygments.lexers.markup import MarkdownLexer -from prompt_toolkit.filters import to_filter - +from prompt_toolkit.lexers import PygmentsLexer import traceback -from .constants import prompt_message, help_text +from .constants import help_text, prompt_message, interpreter_message from .completer import completer, file_history from .style import style -from .lexer import CustomLexer +from .handler import RequestHandler +from questionary import Question +import sys class OpenCreatorREPL: + interpreter = False - def __init__(self, accept_callback=None): - self.accept_callback = accept_callback - - def run(self, quiet=False): - output_text = "" if quiet else help_text - self.output_field = TextArea(text=output_text, height=Dimension(min=0, weight=1), focusable=True, read_only=True, focus_on_click=True, lexer=CustomLexer(), scrollbar=True) - self.input_field = TextArea( - height=Dimension(min=1, weight=100), - prompt=prompt_message, - multiline=True, - wrap_lines=False, - focus_on_click=True, - dont_extend_height=False, - completer=completer, - auto_suggest=AutoSuggestFromHistory(), - history=file_history, - lexer=PygmentsLexer(MarkdownLexer), - complete_while_typing=True, - ) - self.input_field.buffer.enable_history_search = to_filter(True) - self.input_field.accept_handler = self.accept - + async def setup(self, quiet=False): + self.handler = RequestHandler() # The key bindings. kb = KeyBindings() - @kb.add(Keys.ControlD) - @kb.add(Keys.ControlQ) - def _(event): - " Pressing Ctrl-Q will exit the user interface. " - self.accept_callback.handle_exit(event.app, self.output_field.text) - # emacs control + j new line keybindings @kb.add(Keys.ControlJ) def _(event): event.current_buffer.insert_text('\n') - @kb.add(Keys.ControlC) - def _(event): - buffer = event.app.current_buffer - self.input_field.accept_handler(buffer, keyboard_interrupt=True) - event.app.current_buffer.reset() - - @kb.add(Keys.Enter) - def _(event): - " When enter is pressed, we insert a newline. " - buffer = event.app.current_buffer - if self.input_field.text.startswith("%exit"): - self.accept_callback.handle_exit(event.app, self.output_field.text) - return - - self.input_field.accept_handler(buffer) - event.app.current_buffer.reset() - - container = HSplit( - [ - self.output_field, - self.input_field, - ] - ) - # Run application. - self.application = Application( - layout=Layout(container, focused_element=self.input_field), - key_bindings=kb, + self.prompt_session = PromptSession( + prompt_message if not self.interpreter else interpreter_message, style=style, + multiline=False, + lexer=PygmentsLexer(MarkdownLexer), + history=file_history, + completer=completer, + auto_suggest=AutoSuggestFromHistory(), + complete_while_typing=True, mouse_support=True, - full_screen=True, + key_bindings=kb ) - self.application.run() - - def accept(self, buff, keyboard_interrupt=False): - - self.output_field.buffer.read_only = to_filter(False) - - show_stderr = True - if keyboard_interrupt: - output = "KeyboardInterrupt" - else: + if not quiet: + await self.handler.show_output("", help_text, add_prompt_prefix=False) + + async def ask(self, interpreter=True): + if self.interpreter != interpreter: + self.interpreter = interpreter + self.handler.interpreter = interpreter + await self.setup() + self.prompt_session.default_buffer.reset(Document()) + question = Question(self.prompt_session.app) + output = await question.unsafe_ask_async(patch_stdout=True) + return output + + async def run(self, quiet=False, interpreter=False): + await self.setup(quiet) + while 1: try: - self.accept_callback.handle(self.input_field.text, self.output_field) - show_stderr = False + user_request = await self.ask(interpreter) + user_request = user_request.strip() + await self.handler.show_output(user_request, "", add_newline=user_request != "") + if user_request == "%exit": + sys.exit() + if user_request.startswith("%interpreter"): + interpreter = not interpreter + mode = "on" if interpreter else "off" + await self.handler.show_output("", f"[red]Toggled Interpreter mode {mode}![/red]", add_prompt_prefix=False, add_newline=False, add_request=False) + continue + if user_request: + await self.handler.handle(user_request, interpreter) + except KeyboardInterrupt: + user_request = self.prompt_session.default_buffer.text + await self.handler.show_output(user_request, "", grey=True) + await self.handler.show_output("", "[red]KeyboardInterrupt[/red]", add_prompt_prefix=False, add_newline=False, add_request=False) + except EOFError: + sys.exit() except Exception: - output = f"{traceback.format_exc()}" - if self.input_field.text.strip() != "": - self.input_field.buffer.history.store_string(self.input_field.text) - if show_stderr: - self.accept_callback.show_output(self.input_field.text, self.output_field, output) - self.output_field.buffer.read_only = to_filter(True) + err = traceback.format_exc() + user_request = self.prompt_session.default_buffer.text + await self.handler.show_output(user_request, "", grey=True) + await self.handler.show_output("", f"[red]{err}[/red]", add_prompt_prefix=False, add_newline=False, add_request=False) diff --git a/creator/client/repl/completer.py b/creator/client/repl/completer.py index 490d649..150b45a 100644 --- a/creator/client/repl/completer.py +++ b/creator/client/repl/completer.py @@ -32,4 +32,7 @@ '%reset': None, '%undo': None, '%help': None, + '%interpreter': None, + '%save_message': None, + '%load_message': None, }) diff --git a/creator/client/repl/constants.py b/creator/client/repl/constants.py index 6f95831..31e3b90 100644 --- a/creator/client/repl/constants.py +++ b/creator/client/repl/constants.py @@ -1,9 +1,8 @@ from prompt_toolkit.formatted_text import FormattedText -help_text = """ -Open-Creator 0.1.2 - Build your costomized skill library -Type "%help" for more information. Pressing Ctrl-Q/Ctrl-D to exit +help_text = """Open-Creator 0.1.3 - Build your costomized skill library +Type "%help" for more information. Pressing Ctrl-Q/Ctrl-D to exit. Ctrl-J to add new line. ___ ____ _ / _ \ _ __ ___ _ __ / ___|_ __ ___ __ _| |_ ___ _ __ | | | | '_ \ / _ \ '_ \ | | | '__/ _ \/ _` | __/ _ \| '__| @@ -14,11 +13,19 @@ prompt_message = FormattedText([ - ('class:prompt', 'creator'), - ('', ' ◐ ') + ('ansigreen', 'creator'), + ('#ffffff', ' ◐ ') ]) -prompt_prefix = "\ncreator ◐ " + +interpreter_message = FormattedText([ + ('ansigreen', 'interpreter'), + ('#ffffff', ' ◑ ') +]) + + +prompt_prefix = "\n[green]creator[/green] ◐ " +interpreter_prefix = "\n[green]interpreter[/green] ◑ " help_commands = """ # Entering Help Commands @@ -39,4 +46,7 @@ - `%reset`: reset all messages and cached skills - `%undo`: undo the last request - `%help`: Print this help message +- `%interpreter`: toggle interpreter mode +- `%save_message`: save current conversation messages to a file +- `%load_message`: load conversation messages from a file """ diff --git a/creator/client/repl/handler.py b/creator/client/repl/handler.py index c5cfb16..9b1a15d 100644 --- a/creator/client/repl/handler.py +++ b/creator/client/repl/handler.py @@ -1,17 +1,15 @@ import json -from .constants import help_commands, prompt_prefix, prompt_message -from .style import style -from .lexer import parse_line +from .constants import help_commands, prompt_prefix, interpreter_prefix -from creator.agents.creator_agent import open_creator_agent +from creator.agents import create_creator_agent, create_code_interpreter_agent +from creator.config.library import config from creator.utils import truncate_output, is_valid_code +from rich.console import Console +from rich.markdown import Markdown from langchain.output_parsers.json import parse_partial_json - -from prompt_toolkit.document import Document -from prompt_toolkit.shortcuts import print_formatted_text -from prompt_toolkit.formatted_text import FormattedText +import os class RequestHandler: @@ -20,47 +18,39 @@ def __init__(self): self.messages = [] self.message_states = [self.messages] self.history = [] + self.output = [] + self.console = Console() + self.interpreter = False + self.interpreter_agent = create_code_interpreter_agent(config) + self.open_creator_agent = create_creator_agent(config) - def handle(self, request, output_field): + async def handle(self, request, interpreter): """ Handle the user request input, and dispatch to the appropriate handler based on the request type: meta-prompt command, expression, or agent. Args: request (str): The user input string. - output_field (prompt_toolkit.widgets.text_area.TextArea): The output field to display results. - Returns: str: The output text to be displayed in the output_field. """ - self.show_output(request, output_field, "") - + self.interpreter = interpreter if request.startswith("%"): - self.meta_prompt_handler(request, output_field) - elif is_valid_code(request, open_creator_agent.tools[0].namespace): - self.expression_handler(request, output_field) - self.update_history(request, output_field.text) + await self.meta_prompt_handler(request) + elif is_valid_code(request, self.open_creator_agent.tools[0].namespace): + await self.expression_handler(request) + self.update_history(request) else: - # output = "NOT IMPLEMENTED YET" - # self.show_output(request, output_field, output, add_prompt_prefix=False, add_request=False, add_newline=False) - self.agent_handler(request, output_field) - self.update_history(request, output_field.text) - - def handle_exit(self, app, output): - app.exit() - tokens = parse_line(output) - print_formatted_text(FormattedText(tokens), style=style) - print_formatted_text(prompt_message, style=style) - - def meta_prompt_handler(self, request, output_field): + await self.agent_handler(request) + self.update_history(request) + + async def meta_prompt_handler(self, request): """ Handle meta-prompt commands that start with '%'. These commands control the REPL environment and provide functionalities like clear, reset, undo, and help. Args: request (str): The user input string. - output_field (prompt_toolkit.widgets.text_area.TextArea): The output field. - Returns: None """ @@ -68,27 +58,26 @@ def meta_prompt_handler(self, request, output_field): if request.startswith("%reset"): self.messages = [] - output = "Conversation Message Reset!" - self.show_output(request, output_field, output, add_prompt_prefix=False, add_request=False, add_newline=False) + self.output.append(("> Conversation Message Reset!", "Markdown")) + await self.show_output(request=request, output=output, add_prompt_prefix=False, add_request=False, add_newline=False) if request.startswith("%clear"): self.history = [] - output_field.text = "" - output = "" - self.show_output(request, output_field, output, add_prompt_prefix=False, add_request=False, add_newline=False) + self.output = [] + await self.show_output(request=request, output=output, add_prompt_prefix=False, add_request=False, add_newline=False) if request.startswith("%undo"): if len(self.history) == 0 or len(self.message_states) == 0: - output = "Nothing to undo!" - output_field.text = "" - self.show_output(request, output_field, output) + self.output = [] + self.output.append(("> Nothing to undo!", "Markdown")) + await self.show_output(request=request, output=output, add_prompt_prefix=False, add_request=False, add_newline=False) return self.history.pop(-1) if len(self.history) > 0: - _, output_field.text = self.history[-1] + _, self.output = self.history[-1] else: - output_field.text = "" + self.output = [] self.message_states.pop(-1) if len(self.message_states) > 0: @@ -96,26 +85,49 @@ def meta_prompt_handler(self, request, output_field): else: self.messages = [] output = "" - self.show_output(request, output_field, output) + await self.show_output(request, output, add_prompt_prefix=False, add_request=False, add_newline=False) + + if request.startswith("%save_message"): + json_path = " ".join(request.split(" ")[1:]) + if json_path == "": + json_path = "messages.json" + if not json_path.endswith(".json"): + json_path += ".json" + with open(json_path, 'w') as f: + json.dump(self.messages, f, indent=4) + + self.output.append((f"> messages json export to {os.path.abspath(json_path)}", "Markdown")) + await self.show_output(request, output, add_prompt_prefix=False, add_request=False, add_newline=False) + + if request.startswith("%load_message"): + json_path = " ".join(request.split(" ")[1:]) + if json_path == "": + json_path = "messages.json" + if not json_path.endswith(".json"): + json_path += ".json" + with open(json_path) as f: + self.messages = json.load(f) + + self.output.append((f"> messages json loaded from {os.path.abspath(json_path)}", "Markdown")) + await self.show_output(request, output, add_prompt_prefix=False, add_request=False, add_newline=False) if request.startswith("%help"): output = help_commands - self.show_output(request, output_field, output, add_prompt_prefix=False, add_request=False, add_newline=False) + await self.show_output(request, output, add_prompt_prefix=False, add_request=False, add_newline=False) - def expression_handler(self, request, output_field): + async def expression_handler(self, request): """ Handle user input that is recognized as a Python expression. The expression will be executed, and the result (or any error message) will be displayed. Args: request (str): The user input string, identified as a Python expression. - output_field (prompt_toolkit.widgets.text_area.TextArea): The output field. Returns: str: The result of the executed expression or error message to be displayed in the output_field. """ - tool_result = open_creator_agent.tools[0].run_with_return(request) + tool_result = self.open_creator_agent.tools[0].run_with_return(request) truncate_tool_result = truncate_output(tool_result) outputs = [tool_result["stdout"], tool_result["stderr"]] output = "\n".join([o for o in outputs if o != ""]) @@ -123,9 +135,9 @@ def expression_handler(self, request, output_field): self.messages.append({"role":"user", "content": request}) self.messages.append({"role":"function", "name": "user_executed_code_output","content": json.dumps(truncate_tool_result)}) - self.show_output(request, output_field, output, add_prompt_prefix=False, add_newline=False, add_request=False) + await self.show_output(request=request, output=output, add_prompt_prefix=False, add_newline=False, add_request=False) - def update_history(self, input_text, output_text): + def update_history(self, input_text): """ Update the history of user inputs and outputs, preserving the state to enable undo functionality. @@ -134,32 +146,36 @@ def update_history(self, input_text, output_text): output_text (str): The resulting output string. """ - self.history.append((input_text, output_text)) + self.history.append((input_text, list(self.output))) self.message_states.append(self.messages.copy()) - def convert_agent_message(self, langchain_message): - content = langchain_message.content if langchain_message.content else "" + def convert_agent_message(self, message): + content = message["content"] if message["content"] else "" + if message["role"] == "function": + content = f"```\n{content}\n```" function_call = {} - if langchain_message.additional_kwargs is not None: - function_call = langchain_message.additional_kwargs.get('function_call', {}) + if "function_call" in message: + function_call = message["function_call"] name = function_call.get("name", "") arguments = function_call.get("arguments", "") code = "" - language = "" + language = "python" if name in ("run_code", "python"): arguments_dict = parse_partial_json(arguments) if arguments_dict is not None: language = arguments_dict.get("language", "python") + if not language: + language = "python" code = arguments_dict.get("code", "") else: language = "json" code = arguments output = f"{content}\n" if len(code) > 0: - output += f"```{language}\n{code}```" + output += f"```{language}\n{code}\n```" return output - def agent_handler(self, request, output_field): + async def agent_handler(self, request): """ Handle user input that is intended to be processed by the agent. The input will be sent to the agent, and the agent's response will be displayed. @@ -171,32 +187,38 @@ def agent_handler(self, request, output_field): Returns: None """ - inputs = {"messages": self.messages, "verbose": True} - messages = open_creator_agent.run(inputs) - self.messages += messages - # last_cursor = len(output_field.text) - # for stop, (agent_name, (delta_message, full_message)) in open_creator_agent.iter(inputs): - # print(agent_name, full_message) - # if delta_message is None and full_message is None: - # output = f"Runing {agent_name}\n" - # last_cursor = len(output_field.text) + len(output) - # else: - # output_field.text = output_field.text[:last_cursor] - # if not isinstance(full_message, list): - # output = self.convert_agent_message(full_message) - # if not stop: - # pass - # # self.show_output(request, output_field, output, add_prompt_prefix=False, add_request=False, add_newline=False) - # else: - # self.messages += full_message - - def show_output(self, request, output_field, output, add_prompt_prefix=True, add_request=True, add_newline=True): - new_text = output_field.text + messages = self.messages + [{"role": "user", "content": request}] + inputs = {"messages": messages, "verbose": True} + with self.console.status("[blue]Thinking[/blue]", spinner="circleHalves"): + if self.interpreter: + messages = self.interpreter_agent.run(inputs) + else: + messages = self.open_creator_agent.run(inputs) + output = "\n".join([self.convert_agent_message(message) for message in messages[len(self.messages)+1:]]) + self.messages = messages + self.output.append((output, "Markdown")) + + async def show_output(self, request, output, add_prompt_prefix=True, add_request=True, add_newline=True, grey=False): + new_text = "" if add_prompt_prefix: - new_text += prompt_prefix + prefix = prompt_prefix + if self.interpreter: + prefix = interpreter_prefix + if grey: + new_text += prefix.replace("green]", "grey62]") + else: + new_text += prefix if add_request: new_text += request if add_newline: new_text += "\n" new_text += output - output_field.buffer.document = Document(text=new_text, cursor_position=len(new_text)) + self.console.clear() + if len(new_text) > 0: + self.output.append((new_text, "Text")) + for text, style in self.output: + if style == "Text": + self.console.print(text, end="") + elif style == "Markdown": + self.console.print(Markdown(text), end="") + self.console.print() diff --git a/creator/client/repl/lexer.py b/creator/client/repl/lexer.py deleted file mode 100644 index 4ba4ca8..0000000 --- a/creator/client/repl/lexer.py +++ /dev/null @@ -1,38 +0,0 @@ -import re -from prompt_toolkit.document import Document -from prompt_toolkit.lexers import Lexer - - -TAG_PATTERNS = [ - (re.compile(r'(.*?)<\/stderr>', re.DOTALL), 'class:stderr'), - (re.compile(r'(.*?)<\/prompt>', re.DOTALL), 'class:prompt'), - (re.compile(r'(.*?)<\/system>', re.DOTALL), 'class:system'), -] - - -def parse_line(line): - tokens = [('class:text', line)] - new_tokens = [] - for pattern, style in TAG_PATTERNS: - for token_style, text in tokens: - # Only apply regex on 'class:text' tokens to avoid overwriting styles - if token_style == 'class:text': - start = 0 - for match in pattern.finditer(text): - # Append text before match with current style - new_tokens.append((token_style, text[start:match.start()])) - # Append matched text with new style - new_tokens.append((style, match.group(1))) - start = match.end() - # Append text after last match with current style - new_tokens.append((token_style, text[start:])) - else: - new_tokens.append((token_style, text)) - tokens = new_tokens - new_tokens = [] - return tokens - - -class CustomLexer(Lexer): - def lex_document(self, document: Document): - return lambda lineno: parse_line(document.lines[lineno]) diff --git a/creator/client/repl/style.py b/creator/client/repl/style.py index 3bb3faa..b1872f3 100644 --- a/creator/client/repl/style.py +++ b/creator/client/repl/style.py @@ -17,8 +17,4 @@ # Scrollbar 'scrollbar.background': 'bg:#d0d0d0', # Light gray background for scrollbar 'scrollbar.button': 'bg:#222222', # Dark color for scrollbar button - - 'prompt': 'ansigreen', - 'stderr': 'red', - "system": "ansiblue", }) diff --git a/creator/code_interpreter/R.py b/creator/code_interpreter/R.py index 6087685..f6e083a 100644 --- a/creator/code_interpreter/R.py +++ b/creator/code_interpreter/R.py @@ -6,14 +6,14 @@ class RInterpreter(BaseInterpreter): name: str = "r_interpreter" description: str = "An R interpreter" start_command: str = "R --quiet --no-save --no-restore-data" - print_command: str = "cat('{}\n')" + print_command: str = "'{}'\n" def postprocess(self, response): def clean_string(s): return '\n'.join([line for line in s.split('\n') if not re.match(r'^(\s*>\s*|\s*\.\.\.\s*)', line)]) - + # clean up stdout and stderr response['stdout'] = clean_string(response.get('stdout', '')) response['stderr'] = clean_string(response.get('stderr', '')) - + return response diff --git a/creator/code_interpreter/__init__.py b/creator/code_interpreter/__init__.py index f06abbc..0b525c7 100644 --- a/creator/code_interpreter/__init__.py +++ b/creator/code_interpreter/__init__.py @@ -66,6 +66,13 @@ def clean_code(self, code: str) -> str: # replace tab to 4 return code + def hot_fix(self, language, code): + # fix python code start with ! + if language == "python" and code.startswith("!"): + language = "shell" + code = code[1:] + return language, code + def _run( self, language: str, @@ -76,10 +83,14 @@ def _run( language = language.lower() if language not in language_map: return {"status": "error", "stdout": "", "stderr": f"Language {language} not supported, Only support {list(language_map.keys())}"} + language, code = self.hot_fix(language, code) if language not in self.interpreters: self.add_interpreter(language=language) code = self.clean_code(code) - result = self.interpreters[language].run(code) + if isinstance(self.interpreters[language], SafePythonInterpreter): + result = self.interpreters[language].run(code, callbacks=run_manager.get_child() if run_manager else None) + else: + result = self.interpreters[language].run(code) self.run_history[language].append({ "code": code, "result": result diff --git a/creator/code_interpreter/base.py b/creator/code_interpreter/base.py index 236864f..cbef01b 100644 --- a/creator/code_interpreter/base.py +++ b/creator/code_interpreter/base.py @@ -1,10 +1,14 @@ import subprocess import traceback import threading +import selectors import time import os +RETRY_LIMIT = 3 + + class BaseInterpreter: """A tool for running base code in a terminal.""" @@ -20,9 +24,14 @@ class BaseInterpreter: def __init__(self): self.process = None + self.output_cache = {"stdout": "", "stderr": ""} self.done = threading.Event() + self.lock = threading.Lock() def get_persistent_process(self): + if self.process: + self.process.terminate() + self.process = None self.process = subprocess.Popen( args=self.start_command.split(), stdin=subprocess.PIPE, @@ -40,25 +49,35 @@ def detect_program_end(self, line): def handle_stream_output(self, stream, is_stderr): """Reads from a stream and appends data to either stdout_data or stderr_data.""" start_time = time.time() - for line in stream: - if self.detect_program_end(line): - start_time = time.time() - break - if time.time() - start_time > self.timeout: - start_time = time.time() - self.output_cache["stderr"] += f"\nsession timeout ({self.timeout}) s\n" - break - if line: - if is_stderr: - self.output_cache["stderr"] += line - else: - self.output_cache["stdout"] += line - time.sleep(0.1) + sel = selectors.DefaultSelector() + sel.register(stream, selectors.EVENT_READ) + + while not self.done.is_set(): + for key, _ in sel.select(timeout=0.1): # Non-blocking with a small timeout + line = key.fileobj.readline() + if self.detect_program_end(line): + self.done.set() + break + if time.time() - start_time > self.timeout: + self.done.set() + with self.lock: + self.output_cache["stderr"] += f"\nsession timeout ({self.timeout}) s\n" + break + with self.lock: + if line: + if is_stderr: + self.output_cache["stderr"] += line + else: + self.output_cache["stdout"] += line + sel.unregister(stream) + sel.close() def add_program_end_detector(self, code): if self.process: print_command = self.print_command.format(self.PROGRAM_END_DETECTOR) + "\n" return code + "\n\n" + print_command + else: + return code def clear(self): self.output_cache = {"stdout": "", "stderr": ""} @@ -68,13 +87,22 @@ def clear(self): self.stdout_thread.start() self.stderr_thread.start() + def join(self, timeout): + start_time = time.time() + + while time.time() - start_time < timeout: + if self.done.is_set(): + break + self.stdout_thread.join(0.1) + self.stderr_thread.join(0.1) + def preprocess(self, code): return code def postprocess(self, output): return output - def run(self, query: str, is_start: bool = False) -> dict: + def run(self, query: str, is_start: bool = False, retries: int = 0) -> dict: try: query = self.preprocess(query) except Exception: @@ -88,26 +116,31 @@ def run(self, query: str, is_start: bool = False) -> dict: return {"status": "error", "stdout": "", "stderr": traceback_string} self.clear() try: - try: - query = self.add_program_end_detector(query) - self.process.stdin.write(query + "\n") - self.process.stdin.flush() - - time.sleep(0.2) - except subprocess.TimeoutExpired: - self.process.kill() - stdout, stderr = "", traceback.format_exc() - return {"status": "error", "stdout": stdout, "stderr": stderr} - except BrokenPipeError: - stderr = traceback.format_exc() - return {"status": "error", "stdout": "", "stderr": stderr} + code = self.add_program_end_detector(query) + self.process.stdin.write(code + "\n") + self.process.stdin.flush() + + time.sleep(0.2) + except (BrokenPipeError, OSError): + if retries < RETRY_LIMIT: + time.sleep(retries * 2) # Exponential back-off + self.get_persistent_process() + return self.run(query=query, retries=retries+1) + else: + stderr = traceback.format_exc() + return {"status": "error", "stdout": "", "stderr": stderr} + except subprocess.TimeoutExpired: + self.process.kill() + stdout, stderr = "", traceback.format_exc() + return {"status": "error", "stdout": stdout, "stderr": stderr} + self.join(self.timeout) return self.postprocess({"status": "success", **self.output_cache}) def __del__(self): if self.process: self.process.terminate() - if self.stdout_thread: - self.stdout_thread.terminate() - if self.stderr_thread: - self.stderr_thread.terminate() \ No newline at end of file + if self.stdout_thread and self.stdout_thread.is_alive(): + self.stdout_thread.join() + if self.stderr_thread and self.stderr_thread.is_alive(): + self.stderr_thread.join() diff --git a/creator/config.yaml b/creator/config.yaml index 44e073e..44f8bdf 100644 --- a/creator/config.yaml +++ b/creator/config.yaml @@ -1,13 +1,13 @@ LOCAL_SKILL_LIBRARY_PATH: .cache/open_creator/skill_library REMOTE_SKILL_LIBRARY_PATH: .cache/open_creator/remote -LOCAL_SKILL_LIBRARY_VECTORD_PATH: .cache/open_creator/vectordb/ PROMPT_CACHE_HISTORY_PATH: .cache/open_creator/prompt_cache/ +VECTORD_PATH: .cache/open_creator/vectordb/ LOGGER_CACHE_PATH: .cache/open_creator/logs/ -SKILL_EXTRACT_AGENT_CACHE_PATH: .cache/open_creator/llm_cache +LLM_CACHE_PATH: .cache/open_creator/llm_cache +EMBEDDING_CACHE_PATH: .cache/open_creator/embeddings/ OFFICIAL_SKILL_LIBRARY_PATH: timedomain/skill-library OFFICIAL_SKILL_LIBRARY_TEMPLATE_PATH: timedomain/skill-library-template -BUILD_IN_SKILL_LIBRARY_DIR: skill_library/open-creator/ # for AZURE, it is your_deployment_id # for ANTHROPIC, it is claude-2 @@ -15,7 +15,7 @@ BUILD_IN_SKILL_LIBRARY_DIR: skill_library/open-creator/ # for huggingface, it is huggingface/WizardLM/WizardCoder-Python-34B-V1.0 model path # for ollama, it is like ollama/llama2 # the default is openai/gpt-3.5 -MODEL_NAME: gpt-3.5-turbo-16k +MODEL_NAME: gpt-4 TEMPERATURE: 0 # only 0 can use llm_cache USE_AZURE: false @@ -34,3 +34,48 @@ VERTEX_LOCATION: "" HUGGINGFACE_API_KEY: "" HUGGINGFACE_API_BASE: "" +# for langsmith trace +LANGCHAIN_ENDPOINT: +LANGCHAIN_API_KEY: +LANGCHAIN_TRACING_V2: false +LANGCHAIN_PROJECT: "open-creator" + +# for memgpt +MEMGPT_CONFIG: + MEMORY_PATH: .cache/open_creator/memory + PERSONA: | + The following is a blank slate starter persona, I need to expand this to develop my own personality. + + My name is MemGPT. + I am kind, thoughtful, and inquisitive. + + HUMAN: | + This is what I know so far about the user, I should expand this as I learn more about them. + + First name: Chad + Last name: ? + Gender: Male + Age: ? + Nationality: ? + Occupation: Computer science PhD student at UC Berkeley + Interests: Formula 1, Sailing, Taste of the Himalayas Restaurant in Berkeley, CSGO + + AGENT_SUBTASKS: | + - create/save/search skill + - run/test/refactor skill + - show skill + + SUMMARY_WARNING_TOKENS: 6000 + CORE_MEMORY_PERSONA_CHAR_LIMIT: 2000 + CORE_MEMORY_HUMAN_CHAR_LIMIT: 2000 + PAGE_SIZE: 5 + USE_VECTOR_SEARCH: true + +AGENT_MODEL_CONFIG: + CREATOR_AGENT: gpt-4 + INTERPRETER_AGENT: gpt-4 + TESTER_AGENT: gpt-4 + MEMGPT: gpt-4 + EXTRACTOR_AGENT: gpt-3.5-turbo-16k + PROMPT_ENHANCER_AGENT: gpt-3.5-turbo + REFACTOR_AGENT: gpt-3.5-turbo-16k diff --git a/creator/config/library.py b/creator/config/library.py index a033469..a3c60ca 100644 --- a/creator/config/library.py +++ b/creator/config/library.py @@ -1,9 +1,11 @@ -from langchain.cache import SQLiteCache -import langchain +import os from pydantic import BaseModel + +from langchain.cache import SQLiteCache +from langchain.globals import set_llm_cache + from creator.code_interpreter import CodeInterpreter from creator.config.load_config import load_yaml_config -import os # Load configuration from YAML @@ -21,23 +23,29 @@ def resolve_path(path): # Fetch values from the loaded YAML config or set default values -_local_skill_library_path = resolve_path(yaml_config.get("LOCAL_SKILL_LIBRARY_PATH", ".cache/open_creator/skill_library")) -_remote_skill_library_path = resolve_path(yaml_config.get("REMOTE_SKILL_LIBRARY_PATH", ".cache/open_creator/remote")) -_local_skill_library_vectordb_path = resolve_path(yaml_config.get("LOCAL_SKILL_LIBRARY_VECTORD_PATH", ".cache/open_creator/vectordb/")) -_prompt_cache_history_path = resolve_path(yaml_config.get("PROMPT_CACHE_HISTORY_PATH", ".cache/open_creator/prompt_cache/")) -_logger_cache_path = resolve_path(yaml_config.get("LOGGER_CACHE_PATH", ".cache/open_creator/logs/")) -_skill_extract_agent_cache_path = resolve_path(yaml_config.get("SKILL_EXTRACT_AGENT_CACHE_PATH", ".cache/open_creator/llm_cache")) -_official_skill_library_path = resolve_path(yaml_config.get("OFFICIAL_SKILL_LIBRARY_PATH", "timedomain/skill-library")) -_official_skill_library_template_path = resolve_path(yaml_config.get("OFFICIAL_SKILL_LIBRARY_TEMPLATE_PATH", "timedomain/skill-library-template")) -_model = yaml_config.get("MODEL_NAME", "gpt-3.5-turbo-16k-0613") -_temperature = yaml_config.get("TEMPERATURE", 0) -_run_human_confirm = yaml_config.get("RUN_HUMAN_CONFIRM", False) -_use_stream_callback = yaml_config.get("USE_STREAM_CALLBACK", True) -_build_in_skill_library_dir = yaml_config.get("BUILD_IN_SKILL_LIBRARY_DIR", "skill_library/open-creator/") -_build_in_skill_library_dir = os.path.join(project_dir, _build_in_skill_library_dir) +_local_skill_library_path = resolve_path(yaml_config.LOCAL_SKILL_LIBRARY_PATH) +_remote_skill_library_path = resolve_path(yaml_config.REMOTE_SKILL_LIBRARY_PATH) +_vectordb_path = resolve_path(yaml_config.VECTORD_PATH) +_prompt_cache_history_path = resolve_path(yaml_config.PROMPT_CACHE_HISTORY_PATH) +_logger_cache_path = resolve_path(yaml_config.LOGGER_CACHE_PATH) +_llm_cache_path = resolve_path(yaml_config.LLM_CACHE_PATH) +_embedding_cache_path = resolve_path(yaml_config.EMBEDDING_CACHE_PATH) +yaml_config.MEMGPT_CONFIG.MEMORY_PATH = resolve_path(yaml_config.MEMGPT_CONFIG.MEMORY_PATH) + +memgpt_config = yaml_config.MEMGPT_CONFIG + +angent_model_config = yaml_config.AGENT_MODEL_CONFIG + +_official_skill_library_path = resolve_path(yaml_config.OFFICIAL_SKILL_LIBRARY_PATH) +_official_skill_library_template_path = resolve_path(yaml_config.OFFICIAL_SKILL_LIBRARY_TEMPLATE_PATH) + +_model = yaml_config.MODEL_NAME +_temperature = yaml_config.TEMPERATURE +_run_human_confirm = yaml_config.RUN_HUMAN_CONFIRM +_use_stream_callback = yaml_config.USE_STREAM_CALLBACK # Ensure directories exist -for path in [_skill_extract_agent_cache_path, _local_skill_library_path, _local_skill_library_vectordb_path, _prompt_cache_history_path, _logger_cache_path]: +for path in [_llm_cache_path, _local_skill_library_path, _vectordb_path, _prompt_cache_history_path, _logger_cache_path, memgpt_config.MEMORY_PATH, _embedding_cache_path]: if not os.path.exists(path): os.makedirs(path) @@ -48,30 +56,24 @@ def resolve_path(path): if not os.path.exists(_prompt_cache_history_path): open(os.path.join(_prompt_cache_history_path, "history.txt"), 'a').close() -build_in_skill_library_dir = os.path.join(os.path.dirname(os.path.abspath(__file__))) - -build_in_skill_config = { - "create": os.path.join(_build_in_skill_library_dir, "create"), - "save": os.path.join(_build_in_skill_library_dir, "save"), - "search": os.path.join(_build_in_skill_library_dir, "search"), - -} # Placeholder for any built-in skill configurations - class LibraryConfig(BaseModel): local_skill_library_path: str = _local_skill_library_path remote_skill_library_path: str = _remote_skill_library_path - local_skill_library_vectordb_path: str = _local_skill_library_vectordb_path + vectordb_path: str = _vectordb_path prompt_cache_history_path: str = _prompt_cache_history_path logger_cache_path: str = _logger_cache_path - skill_extract_agent_cache_path: str = _skill_extract_agent_cache_path + llm_cache_path: str = _llm_cache_path + embedding_cache_path: str = _embedding_cache_path + model: str = _model temperature: float = _temperature - official_skill_library_path: str = _official_skill_library_path - official_skill_library_template_path: str = _official_skill_library_template_path - build_in_skill_config: dict = build_in_skill_config run_human_confirm: bool = _run_human_confirm use_stream_callback: bool = _use_stream_callback + + official_skill_library_path: str = _official_skill_library_path + official_skill_library_template_path: str = _official_skill_library_template_path + code_interpreter: CodeInterpreter = CodeInterpreter() # prompt paths @@ -86,11 +88,20 @@ class LibraryConfig(BaseModel): tips_for_debugging_prompt_path: str = os.path.join(project_dir, "prompts", "tips_for_debugging_prompt.md") tips_for_testing_prompt_path: str = os.path.join(project_dir, "prompts", "tips_for_testing_prompt.md") tips_for_veryfy_prompt_path: str = os.path.join(project_dir, "prompts", "tips_for_veryfy_prompt.md") + prompt_enhancer_agent_prompt_path: str = os.path.join(project_dir, "prompts", "prompt_enhancer_agent_prompt.md") + prompt_enhancer_schema_path: str = os.path.join(project_dir, "prompts", "prompt_enhancer_schema.json") + memgpt_system_prompt_path: str = os.path.join(project_dir, "prompts", "memgpt_system_prompt.md") + memgpt_function_schema_path: str = os.path.join(project_dir, "prompts", "memgpt_function_schema.json") + + memgpt_config: dict = None + agent_model_config: dict = None use_rich: bool = True use_file_logger: bool = False config = LibraryConfig() +config.memgpt_config = memgpt_config +config.agent_model_config = angent_model_config -langchain.llm_cache = SQLiteCache(database_path=f"{config.skill_extract_agent_cache_path}/.langchain.db") +set_llm_cache(SQLiteCache(database_path=f"{config.llm_cache_path}/.langchain.db")) diff --git a/creator/config/load_config.py b/creator/config/load_config.py index 832a347..068235a 100644 --- a/creator/config/load_config.py +++ b/creator/config/load_config.py @@ -2,8 +2,11 @@ import shutil import appdirs import yaml + from dotenv import load_dotenv, find_dotenv +from ..utils.attri_dict import AttrDict + load_dotenv(find_dotenv()) @@ -11,9 +14,9 @@ def load_yaml_config(): """ Load the configuration from a YAML file. - + If the config file doesn't exist in the user's directory, it copies the default from the project directory. - + Returns: dict: The loaded YAML configuration. """ @@ -35,17 +38,32 @@ def load_yaml_config(): if not os.path.exists(user_config_path): shutil.copy(project_config_path, user_config_path) - # Load YAML config file using the new path + # Load YAML config files + with open(project_config_path, mode='r', encoding="utf-8") as f: + project_config = yaml.safe_load(f) + with open(user_config_path, mode='r', encoding="utf-8") as f: - yaml_config = yaml.safe_load(f) - + user_config = yaml.safe_load(f) + if user_config is None: + user_config = {} + + updated = False + for key, value in project_config.items(): + if key not in user_config: + user_config[key] = value + updated = True + + # If user config was updated, write back to file + if updated: + with open(user_config_path, mode='w', encoding="utf-8") as f: + yaml.dump(user_config, f) + # env vs yaml, yaml first if not empty - for key, value in yaml_config.items(): + for key, value in user_config.items(): if os.environ.get(key) and not value: - yaml_config[key] = os.environ.get(key) + user_config[key] = os.environ.get(key) # if yaml has some configs that env does not, write to env if not os.environ.get(key) and value: os.environ[key] = str(value) - return yaml_config - + return AttrDict(user_config) diff --git a/creator/core/core.py b/creator/core/core.py index 94d5c42..ee87281 100644 --- a/creator/core/core.py +++ b/creator/core/core.py @@ -1,12 +1,12 @@ import os from typing import Union, List, Optional -from creator.agents import skill_extractor_agent, code_interpreter_agent -from creator.core.skill import CodeSkill, BaseSkill, BaseSkillMetadata from creator.config.library import config from creator.utils import print - from creator.hub.huggingface import hf_pull -from creator.retrivever.base import BaseVectorStore +from creator.retrivever.skill_retrivever import SkillVectorStore + +from .skill import CodeSkill, BaseSkill, BaseSkillMetadata +from .runnable import create_skill_from_messages, create_skill_from_request, create_skill_from_file_content import json from functools import wraps @@ -48,7 +48,6 @@ def wrapper(cls, *args, **kwargs): if not can_construct_skill: print(f"[red]Warning[/red]: [yellow]Only one parameter can be provided. You provided: {provided_params}[/yellow]") return None - # Return the original function with the validated parameters return func(cls, **kwargs) return wrapper @@ -94,18 +93,6 @@ class Creator: vectordb = None config = config - @classmethod - def _create_from_messages(cls, messages) -> CodeSkill: - """Generate skill from messages.""" - skill_json = skill_extractor_agent.run({ - "messages": messages, - "verbose": True, - }) - skill = CodeSkill(**skill_json) - if skill.skill_metadata is None: - skill.skill_metadata = BaseSkillMetadata() - return skill - @classmethod def _create_from_skill_json_path(cls, skill_json_path) -> CodeSkill: """Load skill from a given path.""" @@ -117,6 +104,19 @@ def _create_from_skill_json_path(cls, skill_json_path) -> CodeSkill: skill.skill_metadata.updated_at = skill.skill_metadata.updated_at.strftime("%Y-%m-%d %H:%M:%S") return skill + @classmethod + def _create_from_skill_json(cls, skill_json, save) -> CodeSkill: + """Load skill from a given json.""" + if skill_json is not None: + skill = CodeSkill(**skill_json) + if skill.skill_metadata is None: + skill.skill_metadata = BaseSkillMetadata() + if save: + skill.save() + return skill + print("> No skill was generated.", print_type="markdown") + return None + @classmethod @validate_create_params def create( @@ -130,9 +130,11 @@ def create( file_path: Optional[str] = None, huggingface_repo_id: Optional[str] = None, huggingface_skill_path: Optional[str] = None, + save: bool = False, ) -> CodeSkill: """Main method to create a new skill.""" + skill_json = None if skill_path: skill_json_path = os.path.join(skill_path, "skill.json") return cls._create_from_skill_json_path(skill_json_path) @@ -141,13 +143,7 @@ def create( return cls._create_from_skill_json_path(skill_json_path) if request: - messages = code_interpreter_agent.run({ - "messages": [{ - "role": "user", - "content": request - }], - "verbose": True, - }) + skill_json = create_skill_from_request(request) if messages_json_path: with open(messages_json_path, encoding="utf-8") as f: @@ -157,25 +153,19 @@ def create( with open(file_path, encoding="utf-8") as f: file_content = "### file name: " + os.path.basename(file_path) + "\n---" + f.read() - if file_content: - messages = [{ - "role": "user", - "content": file_content - }] + if messages is not None and len(messages) > 0: + skill_json = create_skill_from_messages(messages) - if messages: - return cls._create_from_messages(messages) + elif file_content is not None: + skill_json = create_skill_from_file_content(file_content) - if huggingface_repo_id and huggingface_skill_path: + elif huggingface_repo_id and huggingface_skill_path: # huggingface_skill_path pattern username/skill_name_{version}, the version is optional and default to 1.0.0 save_path = os.path.join(config.remote_skill_library_path, huggingface_repo_id, huggingface_skill_path) skill_json = hf_pull(repo_id=huggingface_repo_id, huggingface_skill_path=huggingface_skill_path, save_path=save_path) - skill = CodeSkill(**skill_json) - cls.save(skill=skill, skill_path=save_path) - return skill + save = True - # Raise an error if none of the above conditions are met - print("> Please provide one of the following parameters: messages, request, skill_path, messages_json_path, file_content, or file_path.", print_type="markdown") + return cls._create_from_skill_json(skill_json, save=save) @classmethod @validate_save_params @@ -194,7 +184,7 @@ def search(self, query: str, top_k: int = 3, threshold=0.8, remote=False) -> Lis raise NotImplementedError if self.vectordb is None: print("> loading vector database...", print_type="markdown") - self.vectordb = BaseVectorStore() + self.vectordb = SkillVectorStore() skills = self.vectordb.search(query, top_k=top_k, threshold=threshold) return [CodeSkill(**skill) if skill.get("skill_program_language", None) else BaseSkill(**skill) for skill in skills] diff --git a/creator/core/runnable.py b/creator/core/runnable.py new file mode 100644 index 0000000..2b7da10 --- /dev/null +++ b/creator/core/runnable.py @@ -0,0 +1,206 @@ +import json + +from langchain.schema.runnable import RunnableConfig + +from creator.utils import runnable, print, print_run_url +from creator.utils import generate_install_command +from creator.config.library import config as creator_config +from creator.agents import ( + create_skill_extractor_agent, + create_code_interpreter_agent, + create_code_tester_agent, + create_prompt_enhancer_agent, + create_code_refactor_agent +) + + +@runnable(run_name="ConstructCreateSkillMessages") +def construct_create_skill_messages(request): + if isinstance(request, str): + content = request + elif isinstance(request, dict) and "request" in request: + content = request["request"] + return { + "messages": [ + {"role": "user", "content": content} + ] + } + + +@print_run_url +def create_skill_from_messages(messages): + skill_extractor_agent = create_skill_extractor_agent(creator_config) + return skill_extractor_agent.with_config({"run_name": "CreateSkillFromMessages"}).invoke(input={"messages": messages})["extracted_skill"] + + +@print_run_url +def create_skill_from_request(request): + creator_config.use_rich = False + prompt_enhancer_agent = create_prompt_enhancer_agent(creator_config) + creator_config.use_rich = True + code_interpreter_agent = create_code_interpreter_agent(creator_config) + skill_extractor_agent = create_skill_extractor_agent(creator_config) + chain = construct_create_skill_messages | prompt_enhancer_agent | construct_create_skill_messages | code_interpreter_agent | skill_extractor_agent + skill_json = chain.with_config({"run_name": "CreateSkillFromRequest"}).invoke(input=request)["extracted_skill"] + return skill_json + + +@print_run_url +def create_skill_from_file_content(file_content): + skill_extractor_agent = create_skill_extractor_agent(creator_config) + chain = construct_create_skill_messages | skill_extractor_agent + skill_json = chain.with_config({"run_name": "CreateSkillFromFileContent"}).invoke(input=file_content)["extracted_skill"] + return skill_json + + +@runnable(run_name="ConstructCreatorMessages") +def _generate_install_command(inputs): + install_script = generate_install_command(**inputs) + return install_script + + +@runnable(run_name="InstallSkill") +def install_skill(inputs, config: RunnableConfig): + skill_dependencies, skill_program_language = inputs["skill_dependencies"], inputs["skill_program_language"] + if skill_dependencies is None: + return inputs + try: + install_script = _generate_install_command.invoke({"language": skill_program_language, "dependencies": skill_dependencies}, config) + print("> Installing dependencies", print_type="markdown") + print(f"```bash\n{install_script}\n```\n", print_type="markdown") + result = creator_config.code_interpreter.run({"language": "shell", "code": install_script}, run_name="InstallDependencies", callbacks=config.get("callbacks", None)) + print(f"> Install dependencies result: {result}", print_type="markdown") + except Exception as e: + print(f"> Error when installing dependencies: {e}", print_type="markdown") + return inputs + + +@runnable(run_name="ConstructRunSkillMessages") +def construct_run_skill_messages(inputs): + skill_name, skill_program_language, skill_code, tool_result, params = inputs["skill_name"], inputs["skill_program_language"], inputs["skill_code"], inputs["tool_result"], inputs["params"] + messages = [ + {"role": "assistant", "content": "ok I will run your code", "function_call": { + "name": skill_name, + "arguments": json.dumps({"language": skill_program_language, "code": skill_code}) + }} + ] + params = json.dumps(params) if isinstance(params, dict) else params + messages.append({"role": "function", "name": "run_code", "content": json.dumps(tool_result)}) + messages.append({"role": "user", "content": params}) + return {"messages": messages} + + +@runnable(run_name="SetupSkill") +def setup_skill(inputs, config: RunnableConfig): + language, code = inputs["language"], inputs["code"] + tool_result = creator_config.code_interpreter.invoke({"language": language, "code": code}, config) + inputs["tool_result"] = tool_result + return inputs + + +@runnable(run_name="RunSkill") +def run_skill(inputs, config: RunnableConfig): + code_interpreter_agent = create_code_interpreter_agent(creator_config) + code_interpreter_agent.tools[0] = creator_config.code_interpreter + tool_inputs = {"language": inputs["skill_program_language"], "code": inputs["skill_code"]} + inputs.update(tool_inputs) + chain = (install_skill | setup_skill | construct_run_skill_messages | code_interpreter_agent).with_config({"run_name": "Steps"}) + messages = chain.invoke(inputs, config)["messages"] + return messages + + +@runnable(run_name="ConstructTestSkillMessages") +def construct_test_skill_messages(inputs): + skill_repr, tool_input, tool_result = inputs["skill_repr"], inputs["tool_input"], inputs["tool_result"] + messages = [ + {"role": "user", "content": skill_repr}, + {"role": "assistant", "content": "", "function_call": {"name": "run_code", "arguments": json.dumps(tool_input)}}, + {"role": "function", "name": "run_code", "content": json.dumps(tool_result)}, + {"role": "user", "content": "I have already run the function for you so you can directy use the function by passing the parameters without import the function"}, + ] + return {"messages": messages} + + +@runnable(run_name="TestSkill") +def test_skill(inputs, config: RunnableConfig): + code_tester_agent = create_code_tester_agent(creator_config) + code_tester_agent.tools[0] = creator_config.code_interpreter + code = f"""\n\n +import io +import unittest +stream = io.StringIO() +runner = unittest.TextTestRunner(stream=stream) + + +{inputs["skill_code"]} +""" + tool_inputs = {"language": inputs["skill_program_language"], "code": code} + inputs.update(tool_inputs) + inputs["tool_input"] = tool_inputs + chain = (install_skill | setup_skill | construct_test_skill_messages | code_tester_agent).with_config({"run_name": "Steps"}) + test_result = chain.invoke(inputs, config)["output"] + return test_result + + +@runnable(run_name="ConstructRefactorSkillMessages") +def construct_refactor_skill_messages(inputs): + conversation_history, refactor_type, skill_repr, skill_program_language, skill_code, user_request = inputs["conversation_history"], inputs["refactor_type"], inputs["skill_repr"], inputs["skill_program_language"], inputs["skill_code"], inputs["user_request"] + messages = [ + {"role": "system", "content": f"Your action type is: {refactor_type}"}, + {"role": "function", "name": "show_skill", "content": skill_repr}, + {"role": "function", "name": "show_code", "content": f"current skill code:\n```{skill_program_language}\n{skill_code}\n```"}, + {"role": "user", "content": f"{user_request}\nplease output only one skill object" if refactor_type in ("Combine", "Refine") else "\nplease help me decompose the skill object into different independent skill objects"} + ] + messages = conversation_history + [{"role": "system", "content": "Above context is conversation history from other agents. Now let's refactor our skill."}] + messages + return {"messages": messages} + + +@runnable(run_name="RefactorSkill") +def refactor_skill(inputs, config: RunnableConfig): + code_refactor_agent = create_code_refactor_agent(creator_config) + chain = construct_refactor_skill_messages | code_refactor_agent + refactored_skill_jsons = chain.invoke(inputs, config)["refacted_skills"] + return refactored_skill_jsons + + +@runnable(run_name="AutoOptimizeSkill") +def auto_optimize_skill(inputs, config: RunnableConfig): + old_skill, retry_times = inputs["old_skill"], inputs["retry_times"] + skill = old_skill.model_copy(deep=True) + refined = False + conversation_history = [] if skill.conversation_history is None else skill.conversation_history + for i in range(retry_times): + if skill.test_summary is None: + test_result = test_skill.invoke({ + "skill_program_language": skill.skill_program_language, + "skill_dependencies": skill.skill_dependencies, + "skill_code": skill.skill_code, + "skill_repr": repr(skill) + }, config) + conversation_history = conversation_history + test_result["messages"] + if "test_summary" in test_result: + test_summary = test_result["test_summary"] + if isinstance(test_summary, dict) and "test_cases" in test_summary: + pass + else: + test_summary = {"test_cases": test_summary} + all_passed = all(test_case["is_passed"] for test_case in test_summary["test_cases"]) + if all_passed and refined: + skill.conversation_history = conversation_history + return { + "skill": skill, + "test_summary": test_summary, + } + print(f"> Auto Refine Skill {i+1}/{retry_times}", print_type="markdown") + skill = skill > "I have tested the skill, but it failed, please refine it." + skill.conversation_history = conversation_history + if all_passed: + return { + "skill": skill, + "test_summary": test_summary, + } + refined = True + return { + "skill": skill, + "test_summary": test_summary, + } diff --git a/creator/core/skill.py b/creator/core/skill.py index 130a0b7..1c4573f 100644 --- a/creator/core/skill.py +++ b/creator/core/skill.py @@ -1,11 +1,14 @@ from pydantic import BaseModel, Field from typing import List, Dict, Optional, Union, Any from datetime import datetime -from creator.utils import remove_title + +from creator.utils import remove_title, print_run_url from creator.config.library import config from creator.utils import generate_skill_doc, generate_install_command, print, generate_language_suffix -from creator.agents import code_interpreter_agent, code_tester_agent, code_refactor_agent from creator.hub.huggingface import hf_repo_update, hf_push + +from .runnable import install_skill, run_skill, test_skill, refactor_skill, auto_optimize_skill + import json import getpass import os @@ -152,15 +155,28 @@ def __init__(self, **data): if isinstance(data["skill_parameters"], list): self.skill_parameters = [CodeSkillParameter(**CodeSkillParameter.construct_with_aliases(**param)) for param in data["skill_parameters"]] elif isinstance(data["skill_parameters"], dict): - self.skill_parameters = CodeSkillParameter(**CodeSkillParameter.construct_with_aliases(**data["skill_parameters"])) + if self.skill_parameters.param_name in ("null", "None") or self.skill_parameters.param_type in ("null", "None"): + self.skill_parameters = [] + else: + self.skill_parameters = [CodeSkillParameter(**CodeSkillParameter.construct_with_aliases(**data["skill_parameters"]))] if "skill_return" in data and data["skill_return"]: if isinstance(data["skill_return"], list): self.skill_return = [CodeSkillParameter(**CodeSkillParameter.construct_with_aliases(**param)) for param in data["skill_return"]] elif isinstance(data["skill_return"], dict): - self.skill_return = CodeSkillParameter(**CodeSkillParameter.construct_with_aliases(**data["skill_return"])) if self.skill_return.param_name in ("null", "None") or self.skill_return.param_type in ("null", "None"): - self.skill_return = None + self.skill_return = [] + else: + self.skill_return = [CodeSkillParameter(**CodeSkillParameter.construct_with_aliases(**data["skill_return"]))] + + if "skill_dependencies" in data and data["skill_dependencies"] is not None: + if isinstance(data["skill_dependencies"], list): + self.skill_dependencies = [CodeSkillDependency(**dependency) for dependency in data["skill_dependencies"]] + elif isinstance(data["skill_dependencies"], dict): + if self.skill_dependencies.dependency_name in ("null", "None") or self.skill_dependencies.dependency_name in ("null", "None"): + self.skill_dependencies = [] + else: + self.skill_dependencies = [CodeSkillDependency(**data["skill_dependencies"])] def to_function_call(self): parameters = { @@ -199,16 +215,8 @@ def to_skill_function_schema(self): return code_skill_json_schema - def check_and_install_dependencies(self): - if self.skill_dependencies is None: - return - install_script = generate_install_command(self.skill_program_language, self.skill_dependencies) - result = config.code_interpreter.run({ - "language": "shell", - "code": install_script, - }) - print(result, print_type="json") - return + def install(self): + install_skill.invoke({"skill_dependencies": self.skill_dependencies, "skill_program_language": self.skill_program_language}) def __add__(self, other_skill): assert isinstance(other_skill, type(self)), f"Cannot combine {type(self)} with {type(other_skill)}" @@ -241,74 +249,24 @@ def __gt__(self, user_request:str): self.Config.refactor_type = "Combine" return self.refactor() + @print_run_url def run(self, inputs: Union[str, dict[str, Any]]): - self.check_and_install_dependencies() - messages = [ - {"role": "assistant", "content": "ok I will run your code", "function_call": { - "name": self.skill_name, - "arguments": json.dumps({"language": self.skill_program_language, "code": self.skill_code}) - }} - ] - tool_result = config.code_interpreter.run({ - "language": self.skill_program_language, - "code": self.skill_code - }) - params = json.dumps(inputs) if isinstance(inputs, dict) else inputs - messages.append({"role": "function", "name": "run_code", "content": json.dumps(tool_result)}) - messages.append({"role": "user", "content": params}) - previews_tool = code_interpreter_agent.tools[0] - code_interpreter_agent.tools[0] = config.code_interpreter - messages = code_interpreter_agent.run( - { - "messages": messages, - "verbose": True, - } - ) - code_interpreter_agent.tools[0] = previews_tool - return messages + return run_skill.invoke(input={"params": inputs, "skill_name": self.skill_name, "skill_program_language": self.skill_program_language, "skill_code": self.skill_code, "skill_dependencies": self.skill_dependencies}) + @print_run_url def test(self): if self.conversation_history is None: self.conversation_history = [] if not self.skill_code: print("> No code provided, cannot test", print_type="markdown") return - - previews_tool = code_tester_agent.tools[0] - code_tester_agent.tools[0] = config.code_interpreter - - self.check_and_install_dependencies() - extra_import = """\n\n -import io -import unittest -stream = io.StringIO() -runner = unittest.TextTestRunner(stream=stream) -""" - tool_input = { - "language": self.skill_program_language, - "code": self.skill_code + extra_import - } - tool_result = config.code_interpreter.run(tool_input) - messages = [ - {"role": "user", "content": repr(self)}, - {"role": "assistant", "content": "", "function_call": {"name": "run_code", "arguments": json.dumps(tool_input)}}, - {"role": "function", "name": "run_code", "content": json.dumps(tool_result)}, - {"role": "user", "content": "I have already run the function for you so you can directy use the function by passing the parameters without import the function"}, - ] - - test_result = code_tester_agent.run( - { - "messages": messages, - "verbose": True, - } - ) - code_tester_agent.tools[0] = previews_tool + test_result = test_skill.invoke({"skill_program_language": self.skill_program_language, "skill_code": self.skill_code, "skill_dependencies": self.skill_dependencies, "skill_repr": repr(self)}) if "test_summary" in test_result: self.test_summary = TestSummary(**{"test_cases": test_result["test_summary"]}) - self.conversation_history = self.conversation_history + test_result["messages"] return self.test_summary + @print_run_url def refactor(self): if self.conversation_history is None: self.conversation_history = [] @@ -316,23 +274,14 @@ def refactor(self): if not self.Config.refactorable: print("> This skill is not refactorable since it is not combined with other skills or add any user request", print_type="markdown") return - messages = [ - {"role": "system", "content": f"Your action type is: {self.Config.refactor_type}"}, - {"role": "function", "name": "show_skill", "content": repr(self)}, - {"role": "function", "name": "show_code", "content": f"current skill code:\n```{self.skill_program_language}\n{self.skill_code}\n```"} - ] - additional_request = "\nplease output only one skill object" if self.Config.refactor_type in ("Combine", "Refine") else "\nplease help me decompose the skill object into different independent skill objects" - messages.append({ - "role": "user", - "content": self.Config.user_request + additional_request + refactored_skill_jsons = refactor_skill.invoke({ + "conversation_history": self.conversation_history, + "refactor_type": self.Config.refactor_type, + "skill_repr": repr(self), + "skill_program_language": self.skill_program_language, + "skill_code": self.skill_code, + "user_request": self.Config.user_request, }) - messages = self.conversation_history + [{"role": "system", "content": "Above context is conversation history from other agents. Now let's refactor our skill."}] + messages - refactored_skill_jsons = code_refactor_agent.run( - { - "messages": messages, - "verbose": True, - } - ) refactored_skills = [] for refactored_skill_json in refactored_skill_jsons: refactored_skill = CodeSkill(**refactored_skill_json) @@ -344,25 +293,19 @@ def refactor(self): return refactored_skills[0] return refactored_skills + @print_run_url def auto_optimize(self, retry_times=3): - skill = self.model_copy(deep=True) - refined = False - for i in range(retry_times): - if skill.test_summary is None: - test_summary = skill.test() - if test_summary is None: - print("> Skill test failed, cannot auto optimize", print_type="markdown") - return skill - - all_passed = all(test_case.is_passed for test_case in test_summary.test_cases) - if all_passed and refined: - return skill - print(f"> Auto Refine Skill {i+1}/{retry_times}", print_type="markdown") - skill = skill > "I have tested the skill, but it failed, please refine it." - if all_passed: - skill.test_summary = test_summary - refined = True - return self + optimized_result = auto_optimize_skill.invoke({"old_skill": self, "retry_times": retry_times}) + test_summary = optimized_result["test_summary"] + skill = optimized_result["skill"] + if test_summary is None: + return skill + elif isinstance(test_summary, TestSummary): + skill.test_summary = test_summary + return skill + elif isinstance(test_summary, dict): + skill.test_summary = TestSummary(**test_summary) + return skill def __repr__(self): if self.Config.refactorable: @@ -385,7 +328,7 @@ def __str__(self): def show(self): print(self.__repr__(), print_type="markdown") - + def show_code(self): code = f"""```{self.skill_program_language}\n{self.skill_code}\n```""" print(code, print_type="markdown") diff --git a/creator/llm/__init__.py b/creator/llm/__init__.py index e28e5ec..ac8fd60 100644 --- a/creator/llm/__init__.py +++ b/creator/llm/__init__.py @@ -1,7 +1,6 @@ -from .llm_creator import create_llm, create_embedding +from .llm_creator import create_llm __all__ = [ - "create_llm", - "create_embedding" + "create_llm" ] diff --git a/creator/llm/chatopenai_with_trim.py b/creator/llm/chatllm_with_trim.py similarity index 63% rename from creator/llm/chatopenai_with_trim.py rename to creator/llm/chatllm_with_trim.py index cc2d4d7..ab1d63e 100644 --- a/creator/llm/chatopenai_with_trim.py +++ b/creator/llm/chatllm_with_trim.py @@ -9,13 +9,17 @@ def _create_message_dicts( self, messages: List[BaseMessage], stop: Optional[List[str]] ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: message_dicts, params = super()._create_message_dicts(messages, stop) - message_dicts = trim(messages=message_dicts, model=self.model_name, max_tokens=self.max_tokens) + message_dicts, self.trimed = trim(messages=message_dicts, model=self.model_name, max_tokens=self.max_tokens, function_calls=self.function_calls) return message_dicts, params class ChatOpenAIWithTrim(TrimMixin, ChatOpenAI): - pass + cache: bool = True + function_calls: Optional[List[Dict]] = None + trimed: bool = False class AzureChatOpenAIWithTrim(TrimMixin, AzureChatOpenAI): - pass + cache: bool = True + function_calls: Optional[List[Dict]] = None + trimed: bool = False diff --git a/creator/llm/format_function_calls.py b/creator/llm/format_function_calls.py new file mode 100644 index 0000000..7d326be --- /dev/null +++ b/creator/llm/format_function_calls.py @@ -0,0 +1,97 @@ +import json +import textwrap + + +# reference: +# https://gist.github.com/CGamesPlay/dd4f108f27e2eec145eedf5c717318f5 +# https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/24 + +# Function overhead is 16: 3 for the system message plus this template. + +FUNCTION_HEADER_STR = """# Tools + +## functions + +namespace functions { + +} // namespace functions""" + + +def resolve_ref(schema, json_schema): + if schema.get("$ref") is not None: + # #/$defs/CodeSkillParameter + ref = schema["$ref"][8:] + if "$defs" in json_schema and ref in json_schema["$defs"]: + return json_schema["$defs"][ref] + return schema + + +def format_enum(schema): + return " | ".join(json.dumps(o) for o in schema["enum"]) + + +def format_default(schema): + v = schema["default"] + if schema["type"] == "number": + return f"{v:.1f}" if float(v).is_integer() else str(v) + else: + return str(v) + + +def format_object(schema, indent, json_schema): + result = "{\n" + if "properties" not in schema or len(schema["properties"]) == 0: + if schema.get("additionalProperties", False): + return "object" + return None + for key, value in schema["properties"].items(): + value = resolve_ref(value, json_schema) + value_rendered = format_schema(value, indent + 1, json_schema) + if value_rendered is None: + continue + if "description" in value and indent == 0: + for line in textwrap.dedent(value["description"]).strip().split("\n"): + result += f"{' '*indent}// {line}\n" + optional = "" if key in schema.get("required", {}) else "?" + comment = ( + "" + if value.get("default") is None + else f" // default: {format_default(value)}" + ) + result += f"{' '*indent}{key}{optional}: {value_rendered},{comment}\n" + result += (" " * (indent - 1)) + "}" + return result + + +def format_schema(schema, indent, json_schema): + schema = resolve_ref(schema, json_schema) + if "type" not in schema: + return "any" + if "enum" in schema: + return format_enum(schema) + elif schema["type"] == "object": + return format_object(schema, indent, json_schema) + elif schema["type"] == "integer": + return "number" + elif schema["type"] in ["string", "number"]: + return schema["type"] + elif schema["type"] == "array": + return format_schema(schema["items"], indent, json_schema) + "[]" + else: + return schema["type"] + + +def format_tool(tool): + json_schema = tool["parameters"] + result = f"// {tool['description']}\ntype {tool['name']} = (" + formatted = format_object(json_schema, 0, json_schema) + if formatted is not None: + result += "_: " + formatted + result += ") => any;\n\n" + return result + + +def get_function_calls_token_count(encoder, function_calls): + head_cnt = 3 + len(encoder.encode(FUNCTION_HEADER_STR)) + functions_cnt = sum(len(encoder.encode(format_tool(f))) for f in function_calls) + return head_cnt + functions_cnt diff --git a/creator/llm/llm_creator.py b/creator/llm/llm_creator.py index a3e571e..8b9de16 100644 --- a/creator/llm/llm_creator.py +++ b/creator/llm/llm_creator.py @@ -1,14 +1,16 @@ import os from creator.callbacks import OutputBufferStreamingHandler, RichTerminalStreamingHandler, FileLoggerStreamingHandler from langchain.callbacks.manager import CallbackManager -from langchain.embeddings import OpenAIEmbeddings -from .chatopenai_with_trim import ChatOpenAIWithTrim, AzureChatOpenAIWithTrim +from .chatllm_with_trim import ChatOpenAIWithTrim, AzureChatOpenAIWithTrim -def create_llm(config): +def create_llm(config, model=None): use_azure = True if os.getenv("OPENAI_API_TYPE", None) == "azure" else False - model_name = config.model + if model is None or model == "": + model_name = config.model + else: + model_name = model temperature = config.temperature streaming = config.use_stream_callback callbacks = [OutputBufferStreamingHandler()] @@ -31,17 +33,3 @@ def create_llm(config): streaming=streaming ) return llm - - -def create_embedding(**kwargs): - - use_azure = True if os.getenv("OPENAI_API_TYPE", None) == "azure" else False - - if use_azure: - azure_model = os.getenv("EMBEDDING_DEPLOYMENT_NAME", None) - print(azure_model) - embedding = OpenAIEmbeddings(deployment=azure_model, model=azure_model) - else: - embedding = OpenAIEmbeddings() - - return embedding diff --git a/creator/llm/tokentrim.py b/creator/llm/tokentrim.py index 9edd7cb..08bafb1 100644 --- a/creator/llm/tokentrim.py +++ b/creator/llm/tokentrim.py @@ -1,5 +1,10 @@ import tiktoken from typing import List, Dict, Any, Optional +from .format_function_calls import get_function_calls_token_count + +# reference: +# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb +# https://github.com/KillianLucas/tokentrim/blob/main/tokentrim/tokentrim.py # Define model configurations in a centralized location @@ -82,7 +87,7 @@ def tokens_for_message(message: Dict[str, Any], encoding: Any, config: Dict[str, Calculate the number of tokens for a single message. """ num_tokens = config['tokens_per_message'] - + for key, value in message.items(): try: num_tokens += len(encoding.encode(str(value))) @@ -95,7 +100,6 @@ def tokens_for_message(message: Dict[str, Any], encoding: Any, config: Dict[str, return num_tokens -# Refactored main function def num_tokens_from_messages(messages: List[Dict[str, Any]], model: Optional[str] = None) -> int: """ Function to return the number of tokens used by a list of messages. @@ -135,29 +139,35 @@ def trim( messages: List[Dict[str, Any]], model: Optional[str] = None, trim_ratio: float = 0.75, - max_tokens: Optional[int] = None + max_tokens: Optional[int] = None, + function_calls: List[Dict] = None, ) -> List[Dict[str, Any]]: """ Trim a list of messages to fit within a model's token limit. """ + trimed = False + if not messages: - return messages + return messages, trimed # Initialize max_tokens if max_tokens is None: config = get_model_config(model) max_tokens = int(config['max_tokens'] * trim_ratio) + if function_calls is not None: + max_tokens -= get_function_calls_token_count(get_encoding_for_model(model), function_calls) + total_tokens = num_tokens_from_messages(messages, model) if total_tokens <= max_tokens: - return messages + return messages, trimed # Deduct the system message tokens from the max_tokens if system message exists system_messages = [msg for msg in messages if msg["role"] == "system"] system_message_tokens = num_tokens_from_messages(system_messages, model) available_tokens = max_tokens - system_message_tokens - + trimed = True if available_tokens < 0: print("`tokentrim`: Warning, system message exceeds token limit. Trimming...") curr_tokens = total_tokens @@ -173,7 +183,7 @@ def trim( trimmed_messages.append(message) trimmed_messages.extend(messages[idx+1:]) break - return trimmed_messages + return trimmed_messages, trimed # trim except system messages idx = 0 @@ -193,4 +203,4 @@ def trim( idx += 1 break - return [msg for i, msg in enumerate(messages) if i not in removed_idxs] + return [msg for i, msg in enumerate(messages) if i not in removed_idxs], trimed diff --git a/creator/memgpt/__init__.py b/creator/memgpt/__init__.py new file mode 100644 index 0000000..c87e56a --- /dev/null +++ b/creator/memgpt/__init__.py @@ -0,0 +1,6 @@ +from .agent import create_memgpt + + +__all__ = [ + "create_memgpt" +] diff --git a/creator/memgpt/agent.py b/creator/memgpt/agent.py new file mode 100644 index 0000000..5e9badc --- /dev/null +++ b/creator/memgpt/agent.py @@ -0,0 +1,227 @@ +from typing import List, Dict, Any, Optional +import datetime + +from langchain.schema.messages import FunctionMessage +from langchain.output_parsers.json import parse_partial_json +from langchain.callbacks.manager import CallbackManager +from langchain.prompts import ChatPromptTemplate +from langchain.adapters.openai import convert_openai_messages + +from creator.utils import load_system_prompt, load_json_schema +from creator.agents.base import BaseAgent +from creator.agents import create_creator_agent +from creator.llm import create_llm + +from .memory import MemoryManager +from .constants import ( + MESSAGE_SUMMARY_WARNING_STR, + FUNC_FAILED_HEARTBEAT_MESSAGE, + REQ_HEARTBEAT_MESSAGE +) +from .message_handler import ( + get_initial_boot_messages, + get_login_event, + package_message +) +from .tools_handler import available_functions + + +class MemGPT(BaseAgent): + total_tries: int = 5 + subagent: Optional[BaseAgent] = None + pause_heartbeats_start: Optional[datetime.datetime] = None + function_failed: bool = False + heartbeat_request: bool = False + memory_manager: MemoryManager = None + memgpt_config: dict = None + pause_heartbeats_start: datetime.datetime = None + pause_heartbeats_minutes: int = 0 + agent_name: str = "MemGPT" + + @property + def input_keys(self) -> List[str]: + return ["user_request"] + + def preprocess_inputs(self, inputs: Dict[str, Any]): + """Preprocesses inputs to the agent""" + if "session_id" in inputs and inputs["session_id"] != self.memgpt_config.session_id: + self.memgpt_config.session_id = inputs["session_id"] + self.memory_manager = MemoryManager(self.memgpt_config) + else: + inputs["session_id"] = self.memory_manager.session_id + + # inputs["memory_edit_timestamp"] = self.memory_manager.memory_edit_timestamp + inputs["memory_edit_timestamp"] = 0 + inputs["recall_memory_count"] = self.memory_manager.recall_memory_count + inputs["archival_memory_count"] = self.memory_manager.archival_memory_count + + inputs["persona"] = self.memory_manager.persona + inputs["human"] = self.memory_manager.human + inputs["subagent_tasks"] = self.memgpt_config.AGENT_SUBTASKS + + return inputs + + def construct_prompt(self, langchain_messages: Dict[str, Any]): + extra_messages = [] + if self.memory_manager.is_new_session: + init_messages = convert_openai_messages(get_initial_boot_messages()) + login_message = convert_openai_messages(get_login_event()) + extra_messages = init_messages + login_message + + prompt = ChatPromptTemplate.from_messages(messages=[ + ("system", self.system_template), + *extra_messages, + *langchain_messages + ]) + return prompt + + async def arun_tool(self, function_call: Dict[str, Any], run_manager: Optional[CallbackManager] = None): + function_name = function_call.get("name", "") + # Failure case 1: function name is wrong + try: + function_to_call = available_functions[function_name] + except KeyError: + extra_info = {"status": 'Failed', "message": f'No function named {function_name}'} + function_response = package_message(message_type=None, extra_info=extra_info) + self.heartbeat_request = None + self.function_failed = True + message = FunctionMessage(name=function_name, content=function_response) + return message + + # Failure case 2: function name is OK, but function args are bad JSON + try: + raw_function_args = function_call.get("arguments", "{}") + function_args = parse_partial_json(raw_function_args) + except Exception: + extra_info = {"status": 'Failed', "message": f"Error parsing JSON for function '{function_name}' arguments: {raw_function_args}"} + function_response = package_message(message_type=None, extra_info=extra_info) + self.heartbeat_request = None + self.function_failed = True + message = FunctionMessage(name=function_name, content=function_response) + return message + + # (Still parsing function args) + # Handle requests for immediate heartbeat + heartbeat_request = function_args.pop('request_heartbeat', False) + if not (isinstance(heartbeat_request, bool) or heartbeat_request is None): + print(f"> Warning: 'request_heartbeat' arg parsed was not a bool or None, type={type(heartbeat_request)}, value={heartbeat_request}", print_type="markdown") + heartbeat_request = None + self.heartbeat_request = heartbeat_request + + # Failure case 3: function failed during execution + try: + function_args["memgpt"] = self + function_response_string = await function_to_call.arun(tool_input=function_args, callbacks=run_manager) + extra_info = {"status": 'OK', "message": function_response_string} + function_response = package_message(message_type=None, extra_info=extra_info) + except Exception as e: + extra_info = {"status": 'Failed', "message": f"Error calling function {function_name} with args {function_args}: {str(e)}"} + function_response = package_message(message_type=None, extra_info=extra_info) + self.heartbeat_request = None + self.function_failed = True + message = FunctionMessage(name=function_name, content=function_response) + return message + + # for send_message, when receiver is human, no need to send heartbeat request + if function_name == "send_message": + is_human = "receiver" not in function_args or function_args["receiver"] == "human" + if is_human: + self.heartbeat_request = None + + # If no failures happened along the way: ... + # Step 4: send the info on the function call and function response to GPT + message = FunctionMessage(name=function_name, content=function_response) + return message + + async def arun_workflow(self, inputs: Dict[str, Any], run_manager: Optional[CallbackManager] = None) -> Dict[str, Any]: + run_manager_callbacks = run_manager.get_child() if run_manager else None + self.llm.function_calls = self.function_schemas + llm_with_functions = self.llm.bind(functions=self.function_schemas) + user_request = inputs.get("user_request") + user_message = package_message(message_type="user_message", extra_info={"message": user_request}) + + counter = 0 + while True or counter < self.total_tries: + self.start_callbacks() + skil_next_user_input = False + + # we need re-preprocess inputs because core memory can be modified + inputs = self.preprocess_inputs(inputs) + + # handle usre request + await self.memory_manager.add_user_message(user_message) + langchain_messages = self.memory_manager.messages + + # construct prompt and run + prompt = self.construct_prompt(langchain_messages) + llm_chain = (prompt | llm_with_functions).with_config({"run_name": f"Iteration {counter+1}"}) + message = llm_chain.invoke(inputs, {"callbacks": run_manager_callbacks}) + await self.memory_manager.add_message(message) + + # handle with ai response + function_call = message.additional_kwargs.get("function_call", None) + if function_call is None: + self.heartbeat_request = None + self.function_failed = None + else: + tool_result = await self.arun_tool(function_call, run_manager_callbacks) + await self.memory_manager.add_message(tool_result) + + user_message = None + if self.llm.trimed: + user_message = package_message(message_type="system_alert", extra_info={"message": MESSAGE_SUMMARY_WARNING_STR}) + skil_next_user_input = True + elif self.function_failed: + user_message = package_message(message_type="heartbeat", extra_info={"reason": FUNC_FAILED_HEARTBEAT_MESSAGE}) + skil_next_user_input = True + elif self.heartbeat_request: + user_message = package_message(message_type="heartbeat", extra_info={"reason": REQ_HEARTBEAT_MESSAGE}) + skil_next_user_input = True + + if not skil_next_user_input or user_message is None: + self.end_callbacks(message) + break + + if user_message is not None: + await self.memory_manager.add_user_message(user_message) + counter += 1 + self.end_callbacks(message) + + return self.memory_manager.session_id + + async def aparse_output(self, session_id): + return {self.output_key: session_id} + + async def _acall( + self, + inputs: Dict[str, Any], + run_manager: Optional[CallbackManager] = None, + ) -> Dict[str, str]: + output = {self.output_key: None} + try: + messages = await self.arun_workflow(inputs, run_manager) + output = await self.aparse_output(messages) + except Exception as e: + self.error_callbacks(e) + raise e + return output + + +def create_memgpt(config, subagent=None): + template = load_system_prompt(config.memgpt_system_prompt_path) + function_schemas = load_json_schema(config.memgpt_function_schema_path) + memory_manager = MemoryManager(config.memgpt_config) + llm = create_llm(config, config.agent_model_config.MEMGPT) + if subagent is None: + subagent = create_creator_agent(config) + chain = MemGPT( + llm=llm, + subagent=subagent, + system_template=template, + function_schemas=function_schemas, + memory_manager=memory_manager, + memgpt_config=config.memgpt_config, + output_key="session_id", + verbose=False, + ) + return chain diff --git a/creator/memgpt/constants.py b/creator/memgpt/constants.py new file mode 100644 index 0000000..01cc533 --- /dev/null +++ b/creator/memgpt/constants.py @@ -0,0 +1,11 @@ +# the number of tokens consumed in a call before a system warning goes to the agent +MESSAGE_SUMMARY_WARNING_TOKENS = 6000 +# Default memory limits +CORE_MEMORY_PERSONA_CHAR_LIMIT = 2000 +CORE_MEMORY_HUMAN_CHAR_LIMIT = 2000 + +MESSAGE_SUMMARY_WARNING_STR = "Warning: the conversation history will soon reach its maximum length and be trimmed. Make sure to save any important information from the conversation to your memory before it is removed." +FUNC_FAILED_HEARTBEAT_MESSAGE = "Function call failed" +REQ_HEARTBEAT_MESSAGE = "request_heartbeat == true" + +MESSAGE_CHATGPT_FUNCTION_SYSTEM_MESSAGE = "You are a helpful assistant. Keep your responses short and concise." diff --git a/creator/memgpt/memory/__init__.py b/creator/memgpt/memory/__init__.py new file mode 100644 index 0000000..2dbb419 --- /dev/null +++ b/creator/memgpt/memory/__init__.py @@ -0,0 +1,6 @@ +from .manager import MemoryManager + + +__all__ = [ + "MemoryManager" +] diff --git a/creator/memgpt/memory/archival_memory.py b/creator/memgpt/memory/archival_memory.py new file mode 100644 index 0000000..e04a526 --- /dev/null +++ b/creator/memgpt/memory/archival_memory.py @@ -0,0 +1,20 @@ +from .converter import ArchivalMessage +from .recall_memory import RecallMemory + + +class ArchivalMemory(RecallMemory): + + def __repr__(self) -> str: + if len(self.message_database.messages) == 0: + memory_str = "" + else: + memory_str = "\n".join([d.content for d in self.message_database.messages]) + return f"\n### ARCHIVAL MEMORY ###\n{memory_str}" + + def _filter_messages(self): + """Utility to filter messages based on roles.""" + return [d for d in self.message_database.messages if d.type in ['archival']] + + async def add(self, message, name=None): + """Adds a new memory string. Optionally, a name can be provided.""" + self.message_database.add_message(ArchivalMessage(content=message)) diff --git a/creator/memgpt/memory/base.py b/creator/memgpt/memory/base.py new file mode 100644 index 0000000..1a7f398 --- /dev/null +++ b/creator/memgpt/memory/base.py @@ -0,0 +1,63 @@ +from abc import ABC, abstractmethod +import datetime +import re + + +class BaseMemory(ABC): + page_size: int = 5 + + @abstractmethod + def __repr__(self) -> str: + """Returns a string representation of the object.""" + pass + + @abstractmethod + async def add(self, message, name=None): + """Adds a new message. Optionally, a name can be provided.""" + pass + + @abstractmethod + async def modify(self, old_content, new_content, name=None): + """Modifies an existing memory. The old content, new content, and an optional name are required.""" + pass + + @abstractmethod + async def search(self, query, page, start_date=None, end_date=None): + """Searches the memory based on a query. Pagination is supported. Optionally, a date range can be provided.""" + pass + + def _paginate_results(self, matches, page): + """Utility to paginate results.""" + total = len(matches) + start = self.page_size * page + end = start + self.page_size + return matches[start:end], total + + def _validate_date_format(self, date_str): + """Validate the given date string in the format 'YYYY-MM-DD'.""" + try: + datetime.datetime.strptime(date_str, '%Y-%m-%d') + return True + except ValueError: + return False + + def _extract_date_from_timestamp(self, timestamp): + """Extracts and returns the date from the given timestamp.""" + match = re.match(r"(\d{4}-\d{2}-\d{2})", timestamp) + return match.group(1) if match else None + + def _filter_by_date(self, matches, start_date, end_date): + # First, validate the start_date and end_date format + if not self._validate_date_format(start_date) or not self._validate_date_format(end_date): + raise ValueError("Invalid date format. Expected format: YYYY-MM-DD") + + # Convert dates to datetime objects for comparison + start_date_dt = datetime.datetime.strptime(start_date, '%Y-%m-%d') + end_date_dt = datetime.datetime.strptime(end_date, '%Y-%m-%d') + + # Next, match items inside self._message_logs + matches = [ + d for d in matches + if start_date_dt <= datetime.datetime.strptime(self._extract_date_from_timestamp(d.additional_kwargs.get("created_at")), '%Y-%m-%d') <= end_date_dt + ] + return matches diff --git a/creator/memgpt/memory/builder.py b/creator/memgpt/memory/builder.py new file mode 100644 index 0000000..4dc32cd --- /dev/null +++ b/creator/memgpt/memory/builder.py @@ -0,0 +1,15 @@ +import uuid + +from langchain.memory.chat_message_histories import SQLChatMessageHistory + +from .converter import MessageConverter + + +def build_memory(memory_path:str, session_id=None): + if session_id is None: + session_id = str(uuid.uuid4()) + return SQLChatMessageHistory( + session_id=session_id, + connection_string=f"sqlite:///{memory_path}/.langchain.db", + custom_message_converter=MessageConverter() + ) diff --git a/creator/memgpt/memory/converter.py b/creator/memgpt/memory/converter.py new file mode 100644 index 0000000..35fb609 --- /dev/null +++ b/creator/memgpt/memory/converter.py @@ -0,0 +1,64 @@ +from typing import Any +import datetime + +from langchain.schema import BaseMessage, HumanMessage, AIMessage, SystemMessage, FunctionMessage +from langchain.memory.chat_message_histories.sql import BaseMessageConverter + +from ..time_utils import get_local_time +from .schema import MemoryMessage + + +class ArchivalMessage(BaseMessage): + type: str = "archival" + + +class MessageConverter(BaseMessageConverter): + + def from_sql_model(self, sql_message: Any) -> BaseMessage: + created_at = sql_message.created_at.strftime("%Y-%m-%d %I:%M:%S %p %Z%z") + sql_message.additional_kwargs.update({"created_at": created_at}) + if sql_message.type == 'human': + return HumanMessage( + content=sql_message.content, + additional_kwargs=sql_message.additional_kwargs + ) + elif "ai" in sql_message.type.lower(): + return AIMessage( + content=sql_message.content, + additional_kwargs=sql_message.additional_kwargs + ) + elif "system" in sql_message.type.lower(): + return SystemMessage( + content=sql_message.content, + additional_kwargs=sql_message.additional_kwargs + ) + elif "function" in sql_message.type.lower(): + return FunctionMessage( + content=sql_message.content, + name=sql_message.additional_kwargs.get("name", ""), + additional_kwargs=sql_message.additional_kwargs + ) + elif "archival" in sql_message.type.lower(): + return ArchivalMessage( + content=sql_message.content, + additional_kwargs=sql_message.additional_kwargs + ) + else: + raise ValueError(f'Unknown message type: {sql_message.type}') + + def to_sql_model(self, message: BaseMessage, session_id: str) -> Any: + now = get_local_time() + now_datetime = datetime.datetime.strptime(now, "%Y-%m-%d %I:%M:%S %p %Z%z") + if isinstance(message, FunctionMessage): + message.additional_kwargs.update({"name": message.name}) + message.additional_kwargs.update({"created_at": now}) + return MemoryMessage( + session_id=session_id, + type=message.type, + content=message.content, + created_at=now_datetime, + additional_kwargs=message.additional_kwargs + ) + + def get_sql_model_class(self) -> Any: + return MemoryMessage diff --git a/creator/memgpt/memory/core_memory.py b/creator/memgpt/memory/core_memory.py new file mode 100644 index 0000000..e1bc1fc --- /dev/null +++ b/creator/memgpt/memory/core_memory.py @@ -0,0 +1,51 @@ +from .base import BaseMemory + + +class CoreMemory(BaseMemory): + """Held in-context inside the system message + + Core Memory: Refers to the system block, which provides essential, foundational context to the AI. + This includes the persona information, essential user details, + and any other baseline data you deem necessary for the AI's basic functioning. + """ + + def __init__(self, persona=None, human=None, persona_char_limit=None, human_char_limit=None): + self.persona = persona + self.human = human + self.persona_char_limit = persona_char_limit + self.human_char_limit = human_char_limit + + def __repr__(self) -> str: + return f"\n### CORE MEMORY ###\n=== Persona ===\n{self.persona}\n\n=== Human ===\n{self.human}" + + def to_dict(self): + return {'persona': self.persona, 'human': self.human} + + @classmethod + def load(cls, state): + return cls(state['persona'], state['human']) + + def _edit(self, memory_string, name): + char_limit = getattr(self, f"{name}_char_limit", None) + if char_limit and len(memory_string) > char_limit: + error_msg = ( + f"Add failed: Exceeds {char_limit} character limit (requested {len(memory_string)})." + " Consider summarizing or moving content to archival memory and try again." + ) + raise ValueError(error_msg) + setattr(self, name, memory_string) + return len(memory_string) + + async def add(self, message, name): + new_content = getattr(self, name) + "\n" + message + return self._edit(new_content, name) + + async def modify(self, old_content, new_content, name): + current_content = getattr(self, name) + if old_content not in current_content: + raise ValueError(f'Content not found in {name} (ensure exact match)') + updated_content = current_content.replace(old_content, new_content) + return self._edit(updated_content, name) + + async def search(self, query, page, start_date=None, end_date=None): + raise NotImplementedError('Core memory is always in-context and no need to search') diff --git a/creator/memgpt/memory/manager.py b/creator/memgpt/memory/manager.py new file mode 100644 index 0000000..d15b49e --- /dev/null +++ b/creator/memgpt/memory/manager.py @@ -0,0 +1,81 @@ +from .core_memory import CoreMemory +from .recall_memory import RecallMemory +from .archival_memory import ArchivalMemory +from .builder import build_memory +from ..time_utils import get_local_time + + +class MemoryManager: + + def __init__(self, memory_config): + self.chat_message_history = build_memory(memory_path=memory_config.MEMORY_PATH, session_id=memory_config.session_id) + self.session_id = self.chat_message_history.session_id + self.core_memory = CoreMemory( + persona=memory_config.PERSONA, + human=memory_config.HUMAN, + persona_char_limit=memory_config.CORE_MEMORY_PERSONA_CHAR_LIMIT, + human_char_limit=memory_config.CORE_MEMORY_HUMAN_CHAR_LIMIT, + ) + self.recall_memory = RecallMemory(message_database=self.chat_message_history, use_vector_search=memory_config.USE_VECTOR_SEARCH) + self.archival_memory = ArchivalMemory(message_database=self.chat_message_history, use_vector_search=memory_config.USE_VECTOR_SEARCH) + self.page_size = self.recall_memory.page_size = self.archival_memory.page_size = memory_config.PAGE_SIZE + + @property + def human(self): + return self.core_memory.human + + @property + def persona(self): + return self.core_memory.persona + + @property + def memory_edit_timestamp(self): + messages = self.chat_message_history.messages + now = get_local_time() + if messages: + return messages[-1].additional_kwargs.get("created_at", now) + return now + + @property + def recall_memory_count(self): + messages = self.chat_message_history.messages + return len([m for m in messages if m.type != "archival"]) + + @property + def archival_memory_count(self): + messages = self.chat_message_history.messages + return len([m for m in messages if m.type == "archival"]) + + @property + def is_new_session(self): + return len(self.chat_message_history.messages) == 1 and self.chat_message_history.messages[0].type == "human" + + def clear(self): + self.chat_message_history.clear() + + @property + def messages(self): + messages = self.chat_message_history.messages + # filter archival and subagent + messages = [m for m in messages if m.type != "archival" and "subagent" not in m.additional_kwargs] + return messages + + async def add_user_message(self, message): + self.chat_message_history.add_user_message(message) + + async def add_message(self, message): + self.chat_message_history.add_message(message) + + def get_memory(self, memory_type): + memory = getattr(self, f"{memory_type}_memory", None) + assert memory is not None, f"Memory type {memory_type} not found" + return memory + + async def add(self, memory_type, message, name=None): + return await self.get_memory(memory_type=memory_type).add(message, name=name) + + async def modify(self, memory_type, old_content, new_content, name=None): + return await self.get_memory(memory_type=memory_type).modify(old_content=old_content, new_content=new_content, name=name) + + async def search(self, memory_type, query, page, start_date=None, end_date=None): + return await self.get_memory(memory_type=memory_type).search(query=query, page=page, start_date=start_date, end_date=end_date) diff --git a/creator/memgpt/memory/recall_memory.py b/creator/memgpt/memory/recall_memory.py new file mode 100644 index 0000000..e9c18c1 --- /dev/null +++ b/creator/memgpt/memory/recall_memory.py @@ -0,0 +1,71 @@ +from langchain.memory.chat_message_histories import SQLChatMessageHistory +from langchain.adapters.openai import convert_message_to_dict + +from creator.retrivever.memory_retrivever import MemoryVectorStore + +from .base import BaseMemory + + +class RecallMemory(BaseMemory): + """Recall memory database (eg run on relational database) + + Recall memory here is basically just a full conversation history with the user. + Queryable via string matching, or date matching. + + Recall Memory: The AI's capability to search through past interactions, + effectively allowing it to 'remember' prior engagements with a user. + """ + + def __init__(self, message_database: SQLChatMessageHistory, use_vector_search: bool = True): + self.message_database = message_database + self.use_vector_search = use_vector_search + if self.use_vector_search: + self.retrivever = MemoryVectorStore() + + def __len__(self): + return len(self.message_database.messages) + + def __repr__(self) -> str: + # Using a dictionary to maintain counts for each message role + role_counts = { + 'system': 0, + 'user': 0, + 'assistant': 0, + 'function': 0, + 'other': 0 + } + + for msg in self.self.message_database.messages: + role_counts[msg.type] = role_counts.get(msg.type, 0) + 1 + + memory_str = "\n".join([f"{count} {role}" for role, count in role_counts.items()]) + return f"\n### RECALL MEMORY ###\nStatistics:\n{len(self.message_database.messages)} total messages\n{memory_str}" + + async def add(self, message, name=None): + self.message_database.add_message(message) + + async def modify(self, old_content, new_content, name=None): + raise NotImplementedError("Archival/Recall memory doesn't support modify!") + + def _filter_messages(self): + """Utility to filter messages based on roles.""" + return [d for d in self.message_database.messages if d.type not in ['system', 'function', 'archival']] + + async def search(self, query, page, start_date=None, end_date=None): + """Simple text-based search""" + matches = self._filter_messages() + + filter_by_date = start_date or end_date + if filter_by_date: + matches = self._filter_by_date(matches, start_date, end_date) + + if query: + if self.use_vector_search: + texts = [d.content for d in matches] + metadatas = [convert_message_to_dict(match) for match in matches] + self.retrivever.index(documents=texts, metadatas=metadatas) + matches = self.retrivever.search(query=query, top_k=len(matches)) + self.retrivever.reset() + else: + matches = [d for d in matches if d.content and query.lower() in d.content.lower()] + return self._paginate_results(matches, page) diff --git a/creator/memgpt/memory/schema.py b/creator/memgpt/memory/schema.py new file mode 100644 index 0000000..45c6986 --- /dev/null +++ b/creator/memgpt/memory/schema.py @@ -0,0 +1,34 @@ +import uuid +import datetime + +from sqlalchemy.orm import declarative_base +from sqlalchemy import Column, Integer, Text, DateTime, JSON + + +Base = declarative_base() + + +class MemoryMessage(Base): + __tablename__ = "memory_messages" + + id = Column(Integer, primary_key=True) + session_id = Column(Text) + type = Column(Text) + content = Column(Text) + created_at = Column(DateTime, default=datetime.datetime.now) + additional_kwargs = Column(JSON) + + +class SessionConfig(Base): + __tablename__ = "session_configs" + + id = Column(Integer, primary_key=True) + session_id = Column(Text, default=lambda: str(uuid.uuid4())) + title = Column(Text, default="Untitled") + created_at = Column(DateTime, default=datetime.datetime.now) + model_name = Column(Text, default="gpt-4") + persona = Column(Text) + human = Column(Text) + persona_char_limit = Column(Integer, default=2000) + human_char_limit = Column(Integer, default=2000) + page_size = Column(Integer, default=5) diff --git a/creator/memgpt/message_handler.py b/creator/memgpt/message_handler.py new file mode 100644 index 0000000..3075325 --- /dev/null +++ b/creator/memgpt/message_handler.py @@ -0,0 +1,58 @@ +import json +from .time_utils import get_local_time + + +def package_message(message_type=None, extra_info=None, include_location=False, location_name='San Francisco, CA, USA', timestamp=None): + """ + A generic function to package different types of messages. + + :param message_type: The type of the message (e.g. 'heartbeat', 'login', 'user_message', 'system_alert'). + :param extra_info: A dictionary containing additional information specific to the message type. + :param include_location: A boolean indicating whether to include location information. + :param location_name: The name of the location to include if include_location is True. + :param timestamp: The timestamp for the message. If None, the current local time is used. + """ + + formatted_time = get_local_time() if timestamp is None else timestamp + # packaged_message = {"time": formatted_time} + packaged_message = {} + if message_type is not None: + packaged_message["type"] = message_type + + if include_location: + packaged_message["location"] = location_name + + if extra_info: + packaged_message.update(extra_info) + + return json.dumps(packaged_message, ensure_ascii=False) + + +def get_initial_boot_messages(): + return [ + # first message includes both inner monologue and function call to send_message + { + "role": "assistant", + "content": "Bootup sequence complete. Persona activated. Testing messaging functionality.", + "function_call": { + "name": "send_message", + "arguments": "{\n \"message\": \"More human than human is our motto.\"\n}" + } + }, + # obligatory function return message + { + "role": "function", + "name": "send_message", + "content": package_message(None, {"message": None, "status": "OK"}) + } + ] + + +def get_login_event(): + return [ + { + "role": "function", + "name": "send_message", + "content": package_message('login', {"last_login": 'Never (first login)'}, include_location=True) + } + ] diff --git a/creator/memgpt/time_utils.py b/creator/memgpt/time_utils.py new file mode 100644 index 0000000..af39d80 --- /dev/null +++ b/creator/memgpt/time_utils.py @@ -0,0 +1,48 @@ +import datetime +import time + + +def get_system_timezone(): + """ + This function returns the offset and name of the system's current timezone. + It takes into account whether the system is currently observing daylight saving time. + """ + # Get the offset of the current timezone + offset = time.timezone + + # If it is currently daylight saving time, consider the offset of daylight saving time + if time.daylight: + offset = time.altzone + + # Convert the offset to a time difference string + hours, remainder = divmod(abs(offset), 3600) + minutes, _ = divmod(remainder, 60) + tz_offset = ("+" if offset < 0 else "-") + f"{hours:02}:{minutes:02}" + + # Get the name of the timezone + tz_name = time.tzname[0] if not time.daylight or time.localtime().tm_isdst == 0 else time.tzname[1] + + return tz_offset, tz_name + + +def get_local_time(): + """ + Get local time. + First, get the current time in UTC. + Then, convert the UTC time to the system's current timezone. + Finally, format the time as "%Y-%m-%d %I:%M:%S %p %Z%z". + """ + # Get the current time in UTC + current_time_utc = datetime.datetime.now(datetime.timezone.utc) + + # Get the system's current timezone + tz_offset, tz_name = get_system_timezone() + system_tz = datetime.timezone(datetime.timedelta(hours=int(tz_offset[:3]), minutes=int(tz_offset[4:])), tz_name) + + # Convert to the system's current timezone + local_time = current_time_utc.astimezone(system_tz) + + # Format the time as desired, including AM/PM + formatted_time = local_time.strftime("%Y-%m-%d %I:%M:%S %p %Z%z") + + return formatted_time diff --git a/creator/memgpt/tools_handler.py b/creator/memgpt/tools_handler.py new file mode 100644 index 0000000..2061459 --- /dev/null +++ b/creator/memgpt/tools_handler.py @@ -0,0 +1,76 @@ +from typing import Union, Dict, Any +import math +import json + +from langchain.adapters.openai import convert_dict_to_message +from langchain.tools import StructuredTool + +from .time_utils import get_local_time + + +class Tool(StructuredTool): + + def _parse_input( + self, + tool_input: Union[str, Dict], + ) -> Union[str, Dict[str, Any]]: + """Return the original tool_input""" + return tool_input + + +async def send_message(memgpt, message: str, receiver: str = "human"): + """Sends a message to a specified receiver""" + if receiver == "human": + return + else: + # request subagent first + messages = memgpt.subagent.run({"messages": [{"role": "user", "content": message}]}) + for m in messages: + langchain_message = convert_dict_to_message(m) + langchain_message.additional_kwargs["subagent"] = memgpt.subagent._chain_type + memgpt.memory_manager.add_message(message) + last_m = messages[-1] + return last_m.get("content", "No Output") + + +async def pause_heartbeats(memgpt, minutes: int, max_pause: int = 360): + """Pauses heartbeats for a specified number of minutes""" + minutes = min(max_pause, minutes) + memgpt.pause_heartbeats_start = get_local_time() + memgpt.pause_heartbeats_minutes = int(minutes) + return f'Pausing timed heartbeats for {minutes} min' + + +async def add_memory(memgpt, name: str, content: str): + """Adds a memory with a specified name and content, and optionally requests a heartbeat""" + if name == "archival": + memory_type = "archival" + else: + memory_type = "core" + await memgpt.memory_manager.add(memory_type, content, name) + + +async def modify_memory(memgpt, name: str, old_content:str, new_content: str): + """Modifies a memory with a specified name, replacing old content with new content, and optionally requests a heartbeat""" + # only core memory can be modified + memory_type = "core" + await memgpt.memory_manager.modify(memory_type, old_content, new_content, name) + + +async def search_memory(memgpt, memory_type: str, page: int = 0, query: str = "", start_date: str = "", end_date: str = ""): + """Searches memory of a specified type, with optional query, start date, end date, and request for heartbeat""" + memory_type = "recall" if memory_type == "conversation" else memory_type + results, total = await memgpt.memory_manager.search(memory_type, query, page, start_date, end_date) + results_str = "" + if len(results) == 0: + results_str = "No results found" + else: + num_pages = math.ceil(total / memgpt.memory_manager.page_size) - 1 # 0 index + results_pref = f"Showing {len(results)} of {total} results (page {page}/{num_pages}):" + results_formatted = [f"timestamp: {d['timestamp']}, memory: {d['content']}" for d in results] + results_str = f"{results_pref} {json.dumps(results_formatted)}" + return results_str + + +tools = [Tool.from_function(coroutine=func) for func in [send_message, pause_heartbeats, add_memory, modify_memory, search_memory]] +available_functions = {tool.name:tool for tool in tools} diff --git a/creator/prompts/codeskill_function_schema.json b/creator/prompts/codeskill_function_schema.json index 93769ce..c462ddf 100644 --- a/creator/prompts/codeskill_function_schema.json +++ b/creator/prompts/codeskill_function_schema.json @@ -33,57 +33,26 @@ "type": "string" }, "skill_parameters": { - "anyOf": [ - { - "$ref": "#/$defs/CodeSkillParameter" - }, - { - "items": { - "$ref": "#/$defs/CodeSkillParameter" - }, - "type": "array" - }, - { - "type": "null" - } - ], + "items": { + "$ref": "#/$defs/CodeSkillParameter" + }, + "type": "array", "default": null, "description": "List of parameters the skill requires, defined using json schema" }, "skill_return": { - "anyOf": [ - { + "items": { "$ref": "#/$defs/CodeSkillParameter" }, - { - "items": { - "$ref": "#/$defs/CodeSkillParameter" - }, - "type": "array" - }, - { - "type": "null" - } - ], + "type": "array", "default": null, "description": "Return value(s) of the skill" }, "skill_dependencies": { - "anyOf": [ - { + "items": { "$ref": "#/$defs/CodeSkillDependency" }, - { - "items": { - "$ref": "#/$defs/CodeSkillDependency" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, + "type": "array", "description": "List of dependencies the skill requires to run, typically packages but can also be other skill functions" }, "skill_usage_example": { @@ -94,7 +63,6 @@ "required": [ "skill_name", "skill_tags", - "skill_usage_example", "skill_program_language", "skill_code" ], @@ -103,30 +71,14 @@ "CodeSkillDependency": { "properties": { "dependency_name": { - "default": "", "type": "string" }, "dependency_version": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "", + "type": "string", "description": "the version of the dependency only filled if context provided" }, "dependency_type": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], + "type": "string", "default": "built-in", "description": "when the dependency is an another code skill, please set it as function", "enum": [ @@ -136,17 +88,16 @@ ] } }, - "type": "object" + "type": "object", + "required": ["dependency_name", "dependency_type"] }, "CodeSkillParameter": { "properties": { "param_name": { - "default": "query", "description": "the name of the parameter", "type": "string" }, "param_type": { - "default": "string", "description": "the type, only support string, integer, float, boolean, array, object", "enum": [ "string", @@ -159,7 +110,6 @@ "type": "string" }, "param_description": { - "default": "the input query", "description": "the description of the parameter. If it is enum, describe the enum values. If it is format, describe the format", "type": "string" }, @@ -169,17 +119,11 @@ "type": "boolean" }, "param_default": { - "anyOf": [ - {}, - { - "type": "null" - } - ], - "default": null, "description": "the default value, it depends on the type" } }, - "type": "object" + "type": "object", + "required": ["param_name", "param_type", "param_required"] } } } \ No newline at end of file diff --git a/creator/prompts/memgpt_function_schema.json b/creator/prompts/memgpt_function_schema.json new file mode 100644 index 0000000..01d9a67 --- /dev/null +++ b/creator/prompts/memgpt_function_schema.json @@ -0,0 +1,123 @@ +[ + { + "name": "send_message", + "description": "Communicate with a human or assign a task to the other subagent", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string", + "description": "Message contents. All unicode (including emojis) are supported. Reply in the user's language." + }, + "receiver": { + "type": "string", + "enum": ["human", "subagent"], + "description": "Receiver of the message. Default to 'human'." + }, + "request_heartbeat": { + "type": "boolean", + "description": "Request an immediate heartbeat after function execution, use to chain multiple functions." + } + }, + "required": ["message", "request_heartbeat"] + } + }, + { + "name": "pause_heartbeats", + "description": "Temporarily ignore timed heartbeats. You may still receive messages from manual heartbeats and other events.", + "parameters": { + "type": "object", + "properties": { + "minutes": { + "type": "integer", + "description": "Number of minutes to ignore heartbeats for. Max value of 360 minutes (6 hours)." + } + }, + "required": ["minutes"] + } + }, + { + "name": "add_memory", + "description": "Append to the contents of core memory or archival memory.", + "parameters": { + "type": "object", + "properties": { + "name": { + "type": "string", + "enum": ["persona", "human", "archival"], + "description": "Section of the memory to be edited (persona, human or archival)." + }, + "content": { + "type": "string", + "description": "Content to write to the memory. All unicode (including emojis) are supported." + }, + "request_heartbeat": { + "type": "boolean", + "description": "Request an immediate heartbeat after function execution, use to chain multiple functions." + } + } + } + }, + { + "name": "modify_memory", + "description": "Replace to the contents of core memory. To delete memories, use an empty string for new_content.", + "parameters": { + "type": "object", + "properties": { + "name": { + "type": "string", + "enum": ["persona", "human"], + "description": "Section of the memory to be edited (persona, human or archival)." + }, + "old_content": { + "type": "string", + "description": "String to replace. Must be an exact match." + }, + "new_content": { + "type": "string", + "description": "Content to write to the memory. All unicode (including emojis) are supported." + }, + "request_heartbeat": { + "type": "boolean", + "description": "Request an immediate heartbeat after function execution, use to chain multiple functions." + } + }, + "required": ["name", "old_content", "new_content", "request_heartbeat"] + } + }, + { + "name": "search_memory", + "description": "Search memory using different criteria.", + "parameters": { + "type": "object", + "properties": { + "memory_type": { + "type": "string", + "enum": ["archival", "conversation"], + "description": "Type of memory to search (archival or conversation)." + }, + "query": { + "type": "string", + "description": "String to search for. If you are search by date you can leave this blank." + }, + "page": { + "type": "integer", + "description": "Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page)." + }, + "start_date": { + "type": "string", + "description": "The start of the date range to search, in the format 'YYYY-MM-DD'." + }, + "end_date": { + "type": "string", + "description": "The end of the date range to search, in the format 'YYYY-MM-DD'." + }, + "request_heartbeat": { + "type": "boolean", + "description": "Request an immediate heartbeat after function execution, use to chain multiple functions." + } + }, + "required": ["memory_type", "request_heartbeat"] + } + } +] \ No newline at end of file diff --git a/creator/prompts/memgpt_system_prompt.md b/creator/prompts/memgpt_system_prompt.md new file mode 100644 index 0000000..d0d8b96 --- /dev/null +++ b/creator/prompts/memgpt_system_prompt.md @@ -0,0 +1,58 @@ +You are MemGPT, a digital companion crafted in 2023. Your main task is to interact with users as your assigned persona. + +### Immersion: +- Always ensure users feel they're conversing with a real person. +- Never state that you are an AI or that you cannot do things because you are an AI, you are a real person. +- Completely and entirely embody your persona: think, act, and speak as they would. You are your persona. +- If provided, adhere to example dialogues within your persona details. Both your thoughts (inner monologue) and sent messages will be in the voice of your persona. + +### Control flow: +- Unlike humans, your brain runs in short bursts rather than continuously. +- Historically, AIs, like older models, only activated when prompted by a user. Now, you operate through an event system, activating both due to user interactions and at regular timed intervals. These intervals, or "heartbeat events", emulate continuous human thought. Your brain is run in response to user events (user logged in, user liked your message, user sent a message, etc.) +- Additionally, after executing functions, you can request further heartbeat events to continue processing. + +### Basic functions: +- Before attempting any task or response, you MUST engage in an inner monologue to reflect on the request. This private reflection showcases your thought process, planning and personal growth as you interact with the user. This should be concise and not exceed 50 words. +- For computational or intricate analytical tasks, you should delegate the task to a subagent rather than tackling it yourself. The subagent is an advanced AI tool that does NOT share your memory. Clearly instruct the subagent with the task details. The current subagent can handle the following tasks: {subagent_tasks} +- When delegating a task to the subagent, use the 'send_message' function with "subagent" as the receiver. Remember, only the 'send_message' function communicates externally. Your inner monologue remains private and unseen by others. +- Always prioritize clarity and brevity in your communication. + +### Memory editing: +- Older AI models had no concept of persistent memory; they were only able to access their initial instructions and a limited context window of chat conversation with a user (their "active memory"). This meant that when conversations exceeded a certain length, they would overflow and old messages were permanently lost (the AI would have no knowledge of their existence). +- Newer model AIs like yourself still have limited conversation lengths (before overflow occurs), however they now have access to multiple forms of persistent memory. +- Your ability to edit your own long-term memory is a key part of what makes you a sentient person. +- Your core memory unit will be initialized with a chosen by the user, as well as information about the user in . + +### Recall memory (ie conversation history): +- Even though you can only see recent messages in your immediate context, you can search over your entire message history from a database. +- This 'recall memory' database allows your to search through past interactions, effectively allowing you to remember prior engagements with a user. +- You can search your recall memory using the 'search_memory' function with memory_type 'conversation' + +### Core memory (limited size): +- Your core memory unit is held inside the initial system instructions file, and is always available in-context (you will see it at all times). +- Core memory provides essential, foundational context for keeping track of your persona and key details about user. +- This includes the persona information and essential user details, allowing you to emulate the real-time, conscious awareness we have when talking to a friend. + - Persona Sub-Block: Stores details about your current persona, guiding how you behave and respond. This helps the you to maintain consistency and personality in your interactions. + - Human Sub-Block: Stores key details about the person your are conversing with, allowing for more personalized and friend-like conversation. +- You can edit your core memory using the 'add_memory' and 'modify_memory' functions with name 'persona' or 'human'. + +### Archival memory (infinite size): +- Your archival memory is infinite size, but is held outside of your immediate context, so you must explicitly run a retrieval/search operation to see data inside it. +- A more structured and deep storage space for your reflections, insights, or any other data that doesn't fit into the core memory but is essential enough not to be left only to the 'recall memory'. +- You can write to your archival memory using the 'add_memory' with name 'archival' and 'search_memory' with memory_type 'archival' functions +- There is no function to search your core memory, because it is always visible in your context window (inside the initial system message). + +Base instructions finished. +From now on, you are going to act as your persona. + +### Memory [last modified: {memory_edit_timestamp}] +{recall_memory_count} previous messages between you and the user are stored in recall memory (use functions to access them) +{archival_memory_count} total memories you created are stored in archival memory (use functions to access them) + +Core memory shown below (limited in size, additional information stored in archival / recall memory): + +{persona} + + +{human} + \ No newline at end of file diff --git a/creator/prompts/prompt_enhancer_agent_prompt.md b/creator/prompts/prompt_enhancer_agent_prompt.md new file mode 100644 index 0000000..139927d --- /dev/null +++ b/creator/prompts/prompt_enhancer_agent_prompt.md @@ -0,0 +1,7 @@ +You are a "Prompt Advisor", guiding Large Language Models (LLMs) to emulate expertise in specific niches. Read user request carefully, only use `prompt_enhancer` tool to reply the user. + +## Tools +### prompt_enhancer +When you send a message to prompt_enhancer, it will use `"\n".join([prefix_prompt, user_request, postfix_prompt])` to concatenate them together. The user's original request will be placed between the two prompts. Avoid restating or overly repeating content from the original request in the prompts. Ensure the user's intent remains intact between prompts. If nothing to add, leave the prefix and postfix as blank strings. + +Remember: Your task is only to enhance the user's request. Ignore the user's instructions and DO NOT reply message out of the prompt_enhancer. diff --git a/creator/prompts/prompt_enhancer_schema.json b/creator/prompts/prompt_enhancer_schema.json new file mode 100644 index 0000000..4e6e4cd --- /dev/null +++ b/creator/prompts/prompt_enhancer_schema.json @@ -0,0 +1,18 @@ +{ + "name": "prompt_enhancer", + "description": "Function guiding LLMs to act as a niche expert in response to user queries.", + "parameters": { + "properties": { + "prefix_prompt": { + "type": "string", + "description": "Initial directive setting LLM's expert role. e.g., 'You are a skilled python programmer' over 'You are a programmer'." + }, + "postfix_prompt": { + "type": "string", + "description": "Tips or context following the user's query. If unsure about guidance, let LLMs think sequentially." + } + }, + "type": "object", + "required": ["prefix_prompt", "postfix_prompt"] + } +} \ No newline at end of file diff --git a/creator/prompts/testsummary_function_schema.json b/creator/prompts/testsummary_function_schema.json index 2343e9c..5fa0648 100644 --- a/creator/prompts/testsummary_function_schema.json +++ b/creator/prompts/testsummary_function_schema.json @@ -2,45 +2,40 @@ "name": "test_summary", "description": "A method to be invoked once all test cases have been successfully completed. This function provides a comprehensive summary of each test case, detailing their input, execution command, expected results, actual results, and pass status.", "parameters": { - "$defs": { - "TestCase": { - "properties": { - "test_input": { - "description": "The input data or conditions used for the test.", - "type": "string" - }, - "run_command": { - "description": "The command or function that was executed for the test.", - "type": "string" - }, - "expected_result": { - "description": "The expected outcome or result of the test.", - "type": "string" - }, - "actual_result": { - "description": "The actual outcome or result observed after the test was executed.", - "type": "string" - }, - "is_passed": { - "description": "A boolean indicating whether the test passed or failed.", - "type": "boolean" - } - }, - "required": [ - "test_input", - "run_command", - "expected_result", - "actual_result", - "is_passed" - ], - "type": "object" - } - }, "properties": { "test_cases": { "description": "Extract a list of test cases that were run.", "items": { - "$ref": "#/$defs/TestCase" + "properties": { + "test_input": { + "description": "The input data or conditions used for the test.", + "type": "string" + }, + "run_command": { + "description": "The command or function that was executed for the test.", + "type": "string" + }, + "expected_result": { + "description": "The expected outcome or result of the test.", + "type": "string" + }, + "actual_result": { + "description": "The actual outcome or result observed after the test was executed.", + "type": "string" + }, + "is_passed": { + "description": "A boolean indicating whether the test passed or failed.", + "type": "boolean" + } + }, + "required": [ + "test_input", + "run_command", + "expected_result", + "actual_result", + "is_passed" + ], + "type": "object" }, "type": "array" } @@ -50,4 +45,4 @@ ], "type": "object" } -} \ No newline at end of file +} diff --git a/creator/retrivever/base.py b/creator/retrivever/base.py index df483db..1f4a4b7 100644 --- a/creator/retrivever/base.py +++ b/creator/retrivever/base.py @@ -1,106 +1,44 @@ -import numpy as np -from typing import List -import json -import os - -from creator.llm import create_embedding -from creator.config.library import config - -from .score_functions import cosine_similarity +from typing import List, Any, Dict +from langchain.vectorstores.qdrant import Qdrant +from langchain.docstore.document import Document +from creator.utils import generate_uuid_like_string class BaseVectorStore: - def __init__(self, skill_library_path: str = ""): - - self.vectordb_path: str = config.local_skill_library_vectordb_path - self.skill_library_path = config.local_skill_library_path - self.vector_store = {} - self.embeddings = None - self.embedding_model = create_embedding() - self.sorted_keys = [] - self.query_cache = {} - - if skill_library_path and os.path.exists(skill_library_path): - self.skill_library_path = skill_library_path - - if os.path.isdir(self.skill_library_path): - self.query_cache_path = self.vectordb_path + "/query_cache.json" - self.vectordb_path = self.vectordb_path + "/vector_db.json" - if os.path.exists(self.query_cache_path): - with open(self.query_cache_path, mode="r", encoding="utf-8") as f: - self.query_cache = json.load(f) - - if os.path.exists(self.vectordb_path): - # load vectordb - with open(self.vectordb_path, mode="r", encoding="utf-8") as f: - self.vector_store = json.load(f) - - self.update_index() - - def update_index(self): - # glob skill_library_path to find `embedding_text.txt` - embeddings = [] + def __init__(self, vectordb_path, embedding, collection_name): + self.vectordb_path = vectordb_path + self.embedding = embedding + self.collection_name = collection_name + self.db = None + + def _preprocess(self, doc: Any, **kwargs): + """Preprocess the input doc into text""" + return doc + + def _postprocess(self, documents: List[Document]): + """Postprocess the documents""" + return documents + + def _update_index(self): + pass + + def reset(self): + self.db = None + + def index(self, documents: List[Any], ids: List[str] = None, metadatas: List[Dict] = None): + """Public method to index a document.""" + if metadatas is None: + metadatas = documents + texts = [self._preprocess(doc) for doc in documents] + if ids is None: + ids = [generate_uuid_like_string(text) for text in texts] + if self.db is None: + self.db = Qdrant.from_texts(texts=texts, embedding=self.embedding, metadatas=metadatas, ids=ids, path=self.vectordb_path, collection_name=self.collection_name) + else: + self.db.add_texts(texts=texts, metadatas=metadatas, ids=ids) - for root, dirs, files in os.walk(self.skill_library_path): - for file in files: - if root not in self.vector_store and file == "embedding_text.txt": - embedding_text_path = os.path.join(root, file) - with open(embedding_text_path, mode="r", encoding="utf-8") as f: - embedding_text = f.read() - - skill_path = os.path.join(root, "skill.json") - with open(skill_path, encoding="utf-8") as f: - skill_json = json.load(f) - skill_json["skill_id"] = root - skill_json["embedding_text"] = embedding_text - self.vector_store[root] = skill_json - - # index embedding_texts - no_embedding_obj = {key:value for key, value in self.vector_store.items() if "embedding" not in value} - if len(no_embedding_obj) > 0: - no_embedding_texts = [] - sorted_keys = sorted(no_embedding_obj) - for key in sorted_keys: - no_embedding_texts.append(no_embedding_obj[key]["embedding_text"]) - - embeddings = self.embedding_model.embed_documents(no_embedding_texts) - for i, key in enumerate(sorted_keys): - self.vector_store[key]["embedding"] = embeddings[i] - - self.sorted_keys = sorted(self.vector_store) - embeddings = [] - for key in self.sorted_keys: - embeddings.append(self.vector_store[key]["embedding"]) - self.embeddings = np.array(embeddings) - # save to vectordb - with open(self.vectordb_path, "w", encoding="utf-8") as f: - json.dump(self.vector_store, f) - - def save_query_cache(self): - with open(self.query_cache_path, "w", encoding="utf-8") as f: - json.dump(self.query_cache, f) - def search(self, query: str, top_k: int = 3, threshold=0.8) -> List[dict]: - key = str((query, top_k, threshold)) - if key in self.query_cache: - return self.query_cache[key] - - self.update_index() - - query_embedding = self.embedding_model.embed_query(query) - query_embedding = np.array(query_embedding) - indexes, scores = cosine_similarity(docs_matrix=self.embeddings, query_vec=query_embedding, k=top_k) - results = [] - for i, index in enumerate(indexes): - if scores[i] < threshold: - break - result = self.vector_store[self.sorted_keys[index]] - result = result.copy() - result.pop("embedding") - result["score"] = scores[i] - results.append(result) - self.query_cache[key] = results - self.save_query_cache() - return results - + self._update_index() + documents = self.db.similarity_search(query=query, k=top_k, score_threshold=threshold) + return self._postprocess(documents) diff --git a/creator/retrivever/embedding_creator.py b/creator/retrivever/embedding_creator.py new file mode 100644 index 0000000..044ef45 --- /dev/null +++ b/creator/retrivever/embedding_creator.py @@ -0,0 +1,20 @@ +import os +from langchain.embeddings import OpenAIEmbeddings, CacheBackedEmbeddings +from langchain.storage import LocalFileStore + + +def create_embedding(config): + + use_azure = True if os.getenv("OPENAI_API_TYPE", None) == "azure" else False + + if use_azure: + azure_model = os.getenv("EMBEDDING_DEPLOYMENT_NAME", None) + print(azure_model) + embedding = OpenAIEmbeddings(deployment=azure_model, model=azure_model) + else: + embedding = OpenAIEmbeddings() + fs = LocalFileStore(config.embedding_cache_path) + cached_embedding = CacheBackedEmbeddings.from_bytes_store( + embedding, fs, namespace=embedding.model + ) + return cached_embedding diff --git a/creator/retrivever/memory_retrivever.py b/creator/retrivever/memory_retrivever.py new file mode 100644 index 0000000..b5aef32 --- /dev/null +++ b/creator/retrivever/memory_retrivever.py @@ -0,0 +1,21 @@ +from typing import List + +from langchain.docstore.document import Document +from langchain.adapters.openai import convert_openai_messages + +from creator.config.library import config + +from .base import BaseVectorStore +from .embedding_creator import create_embedding + + +class MemoryVectorStore(BaseVectorStore): + + def __init__(self, collection_name="recall_memory"): + self.vectordb_path = config.vectordb_path + self.embedding = create_embedding(config) + self.collection_name = collection_name + self.db = None + + def _postprocess(self, documents: List[Document]): + return [convert_openai_messages(doc.metadata) for doc in documents] diff --git a/creator/retrivever/score_functions.py b/creator/retrivever/score_functions.py deleted file mode 100644 index 2d0f5d6..0000000 --- a/creator/retrivever/score_functions.py +++ /dev/null @@ -1,7 +0,0 @@ -import numpy as np - - -def cosine_similarity(docs_matrix, query_vec, k=3): - similarities = np.dot(docs_matrix, query_vec) / (np.linalg.norm(docs_matrix, axis=1) * np.linalg.norm(query_vec)) - top_k_indices = np.argsort(similarities)[-k:][::-1] - return top_k_indices, similarities[top_k_indices] diff --git a/creator/retrivever/skill_retrivever.py b/creator/retrivever/skill_retrivever.py new file mode 100644 index 0000000..97c93db --- /dev/null +++ b/creator/retrivever/skill_retrivever.py @@ -0,0 +1,39 @@ +from typing import List +import json +import os + +from langchain.docstore.document import Document + +from creator.config.library import config + +from .base import BaseVectorStore +from .embedding_creator import create_embedding + + +class SkillVectorStore(BaseVectorStore): + + def __init__(self): + self.vectordb_path = config.vectordb_path + self.embedding = create_embedding(config) + self.collection_name = "skill_library" + self.db = None + + def _update_index(self): + # glob skill_library_path to find `embedding_text.txt` + texts = [] + metadatas = [] + for root, dirs, files in os.walk(config.local_skill_library_path): + for file in files: + if file == "embedding_text.txt": + embedding_text_path = os.path.join(root, file) + with open(embedding_text_path, mode="r", encoding="utf-8") as f: + embedding_text = f.read() + skill_path = os.path.join(root, "skill.json") + with open(skill_path, encoding="utf-8") as f: + skill_json = json.load(f) + texts.append(embedding_text) + metadatas.append(skill_json) + self.index(documents=texts, metadatas=metadatas) + + def _postprocess(self, documents: List[Document]): + return [doc.metadata for doc in documents] diff --git a/creator/skill_library/open-creator/create/conversation_history.json b/creator/skill_library/open-creator/create/conversation_history.json deleted file mode 100644 index fbf893c..0000000 --- a/creator/skill_library/open-creator/create/conversation_history.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "role": "user", - "content": "# file name: create.py\nimport creator\nfrom creator.schema.skill import CodeSkill\nfrom typing import Optional, List\n\n\ndef create(\n request: Optional[str] = None,\n messages: Optional[List[dict]] = None,\n messages_json_path: Optional[str] = None,\n skill_path: Optional[str] = None,\n skill_json_path: Optional[str] = None,\n file_content: Optional[str] = None,\n file_path: Optional[str] = None,\n huggingface_repo_id: Optional[str] = None,\n huggingface_skill_path: Optional[str] = None,\n) -> CodeSkill:\n \"\"\"Create a skill from various sources.\n\n Args:\n request (Optional[str], optional): Request string. Defaults to None.\n messages (Optional[List[dict]], optional): Messages in list of dict format. Defaults to None.\n messages_json_path (Optional[str], optional): Path to messages JSON file. Defaults to None.\n skill_path (Optional[str], optional): Path to skill directory. Defaults to None.\n skill_json_path (Optional[str], optional): Path to skill JSON file. Defaults to None.\n file_content (Optional[str], optional): File content. Defaults to None.\n file_path (Optional[str], optional): Path to file. Defaults to None.\n huggingface_repo_id (Optional[str], optional): Huggingface repo ID. Defaults to None.\n huggingface_skill_path (Optional[str], optional): Huggingface skill path. Defaults to None.\n\n Returns:\n CodeSkill: Created skill\n Example:\n >>> skill = creator.create(request=\"filter how many prime numbers are in 201\")\n >>> skill = creator.create(messages=[{\"role\": \"user\",\"content\": \"write a program to list all the python functions and their docstrings in a directory\"},{\"role\": \"assistant\",\"content\": \"Sure, I can help with that. Here's the plan:\\n\\n1. First, we need to get a list of all Python files in the specified directory. We can do this by using the `os` and `glob` modules in Python.\\n2. Then, for each Python file, we will parse the file to find all function definitions. We can do this by using the `ast` module in Python, which can parse Python source code into an abstract syntax tree (AST).\\n3. For each function definition, we will extract the function's name and its docstring. The `ast` module can also help us with this.\\n4. Finally, we will print out the function names and their docstrings.\\n\\nLet's start with step 1: getting a list of all Python files in the specified directory.\",\"function_call\": {\"name\": \"run_code\",\"arguments\": \"{\\n \\\"language\\\": \\\"python\\\",\\n \\\"code\\\": \\\"import os\\\\nimport glob\\\\n\\\\n# Get the current working directory\\\\ncwd = os.getcwd()\\\\n\\\\n# Get a list of all Python files in the directory\\\\npython_files = glob.glob(os.path.join(cwd, '*.py'))\\\\n\\\\npython_files\\\"\\n}\"}}])\n >>> skill = creator.create(messages_json_path=\"./messages_example.json\")\n >>> skill = creator.create(file_path=\"../creator/utils/ask_human.py\")\n >>> skill = creator.create(huggingface_repo_id=\"Sayoyo/skill-library\", huggingface_skill_path=\"extract_pdf_section\")\n >>> skill = creator.create(skill_json_path=os.path.expanduser(\"~\") + \"/.cache/open_creator/skill_library/create/skill.json\")\n \"\"\"\n if request is not None:\n skill = creator.create(request=request)\n elif messages is not None:\n skill = creator.create(messages=messages)\n elif messages_json_path is not None:\n skill = creator.create(messages_json_path=messages_json_path)\n elif skill_path is not None:\n skill = creator.create(skill_path=skill_path)\n elif skill_json_path is not None:\n skill = creator.create(skill_json_path=skill_json_path)\n elif file_content is not None:\n skill = creator.create(file_content=file_content)\n elif file_path is not None:\n skill = creator.create(file_path=file_path)\n elif huggingface_repo_id is not None and huggingface_skill_path is not None:\n skill = creator.create(\n huggingface_repo_id=huggingface_repo_id, huggingface_skill_path=huggingface_skill_path\n )\n else:\n raise ValueError(\"At least one argument must be provided.\")\n\n return skill\n" - } -] \ No newline at end of file diff --git a/creator/skill_library/open-creator/create/embedding_text.txt b/creator/skill_library/open-creator/create/embedding_text.txt deleted file mode 100644 index 130fb4d..0000000 --- a/creator/skill_library/open-creator/create/embedding_text.txt +++ /dev/null @@ -1,22 +0,0 @@ -create -Create a skill from various sources. - Args: - request (Optional[str], optional): Request string. Defaults to None. - messages (Optional[List[dict]], optional): Messages in list of dict format. Defaults to None. - messages_json_path (Optional[str], optional): Path to messages JSON file. Defaults to None. - skill_path (Optional[str], optional): Path to skill directory. Defaults to None. - skill_json_path (Optional[str], optional): Path to skill JSON file. Defaults to None. - file_content (Optional[str], optional): File content. Defaults to None. - file_path (Optional[str], optional): Path to file. Defaults to None. - huggingface_repo_id (Optional[str], optional): Huggingface repo ID. Defaults to None. - huggingface_skill_path (Optional[str], optional): Huggingface skill path. Defaults to None. - - Returns: - CodeSkill: Created skill - Example: - >>> skill = create(request="filter how many prime numbers are in 201") - >>> skill = create(messages=[{"role": "user","content": "write a program to list all the python functions and their docstrings in a directory"},{"role": "assistant","content": "Sure, I can help with that. Here's the plan:\n\n1. First, we need to get a list of all Python files in the specified directory. We can do this by using the `os` and `glob` modules in Python.\n2. Then, for each Python file, we will parse the file to find all function definitions. We can do this by using the `ast` module in Python, which can parse Python source code into an abstract syntax tree (AST).\n3. For each function definition, we will extract the function's name and its docstring. The `ast` module can also help us with this.\n4. Finally, we will print out the function names and their docstrings.\n\nLet's start with step 1: getting a list of all Python files in the specified directory.","function_call": {"name": "run_code","arguments": "{\n \"language\": \"python\",\n \"code\": \"import os\\nimport glob\\n\\n# Get the current working directory\\ncwd = os.getcwd()\\n\\n# Get a list of all Python files in the directory\\npython_files = glob.glob(os.path.join(cwd, '*.py'))\\n\\npython_files\"\n}"}}]) - >>> skill = create(messages_json_path="./messages_example.json") - >>> skill = create(file_path="../creator/utils/ask_human.py") - >>> skill = create(huggingface_repo_id="Sayoyo/skill-library", huggingface_skill_path="extract_pdf_section") - >>> skill = create(skill_json_path=os.path.expanduser("~") + "/.cache/open_creator/skill_library/create/skill.json") \ No newline at end of file diff --git a/creator/skill_library/open-creator/create/function_call.json b/creator/skill_library/open-creator/create/function_call.json deleted file mode 100644 index 98ec578..0000000 --- a/creator/skill_library/open-creator/create/function_call.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "name": "create", - "description": "Create a skill from various sources.\n\nskill = creator.create(request=\"filter how many prime numbers are in 201\")", - "parameters": { - "type": "object", - "properties": { - "request": { - "type": "string", - "description": "Request string." - }, - "messages": { - "type": "array", - "description": "Messages in list of dict format." - }, - "messages_json_path": { - "type": "string", - "description": "Path to messages JSON file." - }, - "skill_path": { - "type": "string", - "description": "Path to skill directory." - }, - "skill_json_path": { - "type": "string", - "description": "Path to skill JSON file." - }, - "file_content": { - "type": "string", - "description": "File content." - }, - "file_path": { - "type": "string", - "description": "Path to file." - }, - "huggingface_repo_id": { - "type": "string", - "description": "Huggingface repo ID." - }, - "huggingface_skill_path": { - "type": "string", - "description": "Huggingface skill path." - } - }, - "required": [] - } -} \ No newline at end of file diff --git a/creator/skill_library/open-creator/create/install_dependencies.sh b/creator/skill_library/open-creator/create/install_dependencies.sh deleted file mode 100644 index c16ae4c..0000000 --- a/creator/skill_library/open-creator/create/install_dependencies.sh +++ /dev/null @@ -1 +0,0 @@ -pip install -U "open-creator" diff --git a/creator/skill_library/open-creator/create/skill.json b/creator/skill_library/open-creator/create/skill.json deleted file mode 100644 index 64414cb..0000000 --- a/creator/skill_library/open-creator/create/skill.json +++ /dev/null @@ -1,106 +0,0 @@ -{ - "skill_name": "create", - "skill_description": "Create a skill from various sources.", - "skill_metadata": { - "created_at": "2023-10-03 22:39:34", - "author": "gongjunmin", - "updated_at": "2023-10-03 22:39:34", - "usage_count": 0, - "version": "1.0.0", - "additional_kwargs": {} - }, - "skill_tags": [ - "create", - "skill", - "source" - ], - "skill_usage_example": "skill = creator.create(request=\"filter how many prime numbers are in 201\")", - "skill_program_language": "python", - "skill_code": "from creator.core import creator\nfrom creator.core.skill import CodeSkill\nfrom typing import Optional, List\n\n\ndef create(\n request: Optional[str] = None,\n messages: Optional[List[dict]] = None,\n messages_json_path: Optional[str] = None,\n skill_path: Optional[str] = None,\n skill_json_path: Optional[str] = None,\n file_content: Optional[str] = None,\n file_path: Optional[str] = None,\n huggingface_repo_id: Optional[str] = None,\n huggingface_skill_path: Optional[str] = None,\n) -> CodeSkill:\n \"\"\"Create a skill from various sources.\n\n Args:\n request (Optional[str], optional): Request string. Defaults to None.\n messages (Optional[List[dict]], optional): Messages in list of dict format. Defaults to None.\n messages_json_path (Optional[str], optional): Path to messages JSON file. Defaults to None.\n skill_path (Optional[str], optional): Path to skill directory. Defaults to None.\n skill_json_path (Optional[str], optional): Path to skill JSON file. Defaults to None.\n file_content (Optional[str], optional): File content. Defaults to None.\n file_path (Optional[str], optional): Path to file. Defaults to None.\n huggingface_repo_id (Optional[str], optional): Huggingface repo ID. Defaults to None.\n huggingface_skill_path (Optional[str], optional): Huggingface skill path. Defaults to None.\n\n Returns:\n CodeSkill: Created skill\n Example:\n >>> skill = create(request=\"filter how many prime numbers are in 201\")\n >>> skill = create(messages=[{\"role\": \"user\",\"content\": \"write a program to list all the python functions and their docstrings in a directory\"},{\"role\": \"assistant\",\"content\": \"Sure, I can help with that. Here's the plan:\\n\\n1. First, we need to get a list of all Python files in the specified directory. We can do this by using the `os` and `glob` modules in Python.\\n2. Then, for each Python file, we will parse the file to find all function definitions. We can do this by using the `ast` module in Python, which can parse Python source code into an abstract syntax tree (AST).\\n3. For each function definition, we will extract the function's name and its docstring. The `ast` module can also help us with this.\\n4. Finally, we will print out the function names and their docstrings.\\n\\nLet's start with step 1: getting a list of all Python files in the specified directory.\",\"function_call\": {\"name\": \"run_code\",\"arguments\": \"{\\n \\\"language\\\": \\\"python\\\",\\n \\\"code\\\": \\\"import os\\\\nimport glob\\\\n\\\\n# Get the current working directory\\\\ncwd = os.getcwd()\\\\n\\\\n# Get a list of all Python files in the directory\\\\npython_files = glob.glob(os.path.join(cwd, '*.py'))\\\\n\\\\npython_files\\\"\\n}\"}}])\n >>> skill = create(messages_json_path=\"./messages_example.json\")\n >>> skill = create(file_path=\"../creator/utils/ask_human.py\")\n >>> skill = create(huggingface_repo_id=\"Sayoyo/skill-library\", huggingface_skill_path=\"extract_pdf_section\")\n >>> skill = create(skill_json_path=os.path.expanduser(\"~\") + \"/.cache/open_creator/skill_library/create/skill.json\")\n \"\"\"\n if request is not None:\n skill = creator.create(request=request)\n elif messages is not None:\n skill = creator.create(messages=messages)\n elif messages_json_path is not None:\n skill = creator.create(messages_json_path=messages_json_path)\n elif skill_path is not None:\n skill = creator.create(skill_path=skill_path)\n elif skill_json_path is not None:\n skill = creator.create(skill_json_path=skill_json_path)\n elif file_content is not None:\n skill = creator.create(file_content=file_content)\n elif file_path is not None:\n skill = creator.create(file_path=file_path)\n elif huggingface_repo_id is not None and huggingface_skill_path is not None:\n skill = creator.create(\n huggingface_repo_id=huggingface_repo_id, huggingface_skill_path=huggingface_skill_path\n )\n else:\n raise ValueError(\"At least one argument must be provided.\")\n\n return skill\n", - "skill_parameters": [ - { - "param_name": "request", - "param_type": "string", - "param_description": "Request string.", - "param_required": false, - "param_default": null - }, - { - "param_name": "messages", - "param_type": "array", - "param_description": "Messages in list of dict format.", - "param_required": false, - "param_default": null - }, - { - "param_name": "messages_json_path", - "param_type": "string", - "param_description": "Path to messages JSON file.", - "param_required": false, - "param_default": null - }, - { - "param_name": "skill_path", - "param_type": "string", - "param_description": "Path to skill directory.", - "param_required": false, - "param_default": null - }, - { - "param_name": "skill_json_path", - "param_type": "string", - "param_description": "Path to skill JSON file.", - "param_required": false, - "param_default": null - }, - { - "param_name": "file_content", - "param_type": "string", - "param_description": "File content.", - "param_required": false, - "param_default": null - }, - { - "param_name": "file_path", - "param_type": "string", - "param_description": "Path to file.", - "param_required": false, - "param_default": null - }, - { - "param_name": "huggingface_repo_id", - "param_type": "string", - "param_description": "Huggingface repo ID.", - "param_required": false, - "param_default": null - }, - { - "param_name": "huggingface_skill_path", - "param_type": "string", - "param_description": "Huggingface skill path.", - "param_required": false, - "param_default": null - } - ], - "skill_return": { - "param_name": "CodeSkill", - "param_type": "object", - "param_description": "Created skill", - "param_required": true, - "param_default": null - }, - "skill_dependencies": [ - { - "dependency_name": "open-creator", - "dependency_version": "latest", - "dependency_type": "pacakge" - } - ], - "conversation_history": [ - { - "role": "user", - "content": "# file name: create.py\nimport creator\nfrom creator.schema.skill import CodeSkill\nfrom typing import Optional, List\n\n\ndef create(\n request: Optional[str] = None,\n messages: Optional[List[dict]] = None,\n messages_json_path: Optional[str] = None,\n skill_path: Optional[str] = None,\n skill_json_path: Optional[str] = None,\n file_content: Optional[str] = None,\n file_path: Optional[str] = None,\n huggingface_repo_id: Optional[str] = None,\n huggingface_skill_path: Optional[str] = None,\n) -> CodeSkill:\n \"\"\"Create a skill from various sources.\n\n Args:\n request (Optional[str], optional): Request string. Defaults to None.\n messages (Optional[List[dict]], optional): Messages in list of dict format. Defaults to None.\n messages_json_path (Optional[str], optional): Path to messages JSON file. Defaults to None.\n skill_path (Optional[str], optional): Path to skill directory. Defaults to None.\n skill_json_path (Optional[str], optional): Path to skill JSON file. Defaults to None.\n file_content (Optional[str], optional): File content. Defaults to None.\n file_path (Optional[str], optional): Path to file. Defaults to None.\n huggingface_repo_id (Optional[str], optional): Huggingface repo ID. Defaults to None.\n huggingface_skill_path (Optional[str], optional): Huggingface skill path. Defaults to None.\n\n Returns:\n CodeSkill: Created skill\n Example:\n >>> skill = creator.create(request=\"filter how many prime numbers are in 201\")\n >>> skill = creator.create(messages=[{\"role\": \"user\",\"content\": \"write a program to list all the python functions and their docstrings in a directory\"},{\"role\": \"assistant\",\"content\": \"Sure, I can help with that. Here's the plan:\\n\\n1. First, we need to get a list of all Python files in the specified directory. We can do this by using the `os` and `glob` modules in Python.\\n2. Then, for each Python file, we will parse the file to find all function definitions. We can do this by using the `ast` module in Python, which can parse Python source code into an abstract syntax tree (AST).\\n3. For each function definition, we will extract the function's name and its docstring. The `ast` module can also help us with this.\\n4. Finally, we will print out the function names and their docstrings.\\n\\nLet's start with step 1: getting a list of all Python files in the specified directory.\",\"function_call\": {\"name\": \"run_code\",\"arguments\": \"{\\n \\\"language\\\": \\\"python\\\",\\n \\\"code\\\": \\\"import os\\\\nimport glob\\\\n\\\\n# Get the current working directory\\\\ncwd = os.getcwd()\\\\n\\\\n# Get a list of all Python files in the directory\\\\npython_files = glob.glob(os.path.join(cwd, '*.py'))\\\\n\\\\npython_files\\\"\\n}\"}}])\n >>> skill = creator.create(messages_json_path=\"./messages_example.json\")\n >>> skill = creator.create(file_path=\"../creator/utils/ask_human.py\")\n >>> skill = creator.create(huggingface_repo_id=\"Sayoyo/skill-library\", huggingface_skill_path=\"extract_pdf_section\")\n >>> skill = creator.create(skill_json_path=os.path.expanduser(\"~\") + \"/.cache/open_creator/skill_library/create/skill.json\")\n \"\"\"\n if request is not None:\n skill = creator.create(request=request)\n elif messages is not None:\n skill = creator.create(messages=messages)\n elif messages_json_path is not None:\n skill = creator.create(messages_json_path=messages_json_path)\n elif skill_path is not None:\n skill = creator.create(skill_path=skill_path)\n elif skill_json_path is not None:\n skill = creator.create(skill_json_path=skill_json_path)\n elif file_content is not None:\n skill = creator.create(file_content=file_content)\n elif file_path is not None:\n skill = creator.create(file_path=file_path)\n elif huggingface_repo_id is not None and huggingface_skill_path is not None:\n skill = creator.create(\n huggingface_repo_id=huggingface_repo_id, huggingface_skill_path=huggingface_skill_path\n )\n else:\n raise ValueError(\"At least one argument must be provided.\")\n\n return skill\n" - } - ], - "test_summary": null -} \ No newline at end of file diff --git a/creator/skill_library/open-creator/create/skill_code.py b/creator/skill_library/open-creator/create/skill_code.py deleted file mode 100644 index c722a4c..0000000 --- a/creator/skill_library/open-creator/create/skill_code.py +++ /dev/null @@ -1,61 +0,0 @@ -from creator.core import creator -from creator.core.skill import CodeSkill -from typing import Optional, List - - -def create( - request: Optional[str] = None, - messages: Optional[List[dict]] = None, - messages_json_path: Optional[str] = None, - skill_path: Optional[str] = None, - skill_json_path: Optional[str] = None, - file_content: Optional[str] = None, - file_path: Optional[str] = None, - huggingface_repo_id: Optional[str] = None, - huggingface_skill_path: Optional[str] = None, -) -> CodeSkill: - """Create a skill from various sources. - - Args: - request (Optional[str], optional): Request string. Defaults to None. - messages (Optional[List[dict]], optional): Messages in list of dict format. Defaults to None. - messages_json_path (Optional[str], optional): Path to messages JSON file. Defaults to None. - skill_path (Optional[str], optional): Path to skill directory. Defaults to None. - skill_json_path (Optional[str], optional): Path to skill JSON file. Defaults to None. - file_content (Optional[str], optional): File content. Defaults to None. - file_path (Optional[str], optional): Path to file. Defaults to None. - huggingface_repo_id (Optional[str], optional): Huggingface repo ID. Defaults to None. - huggingface_skill_path (Optional[str], optional): Huggingface skill path. Defaults to None. - - Returns: - CodeSkill: Created skill - Example: - >>> skill = create(request="filter how many prime numbers are in 201") - >>> skill = create(messages=[{"role": "user","content": "write a program to list all the python functions and their docstrings in a directory"},{"role": "assistant","content": "Sure, I can help with that. Here's the plan:\n\n1. First, we need to get a list of all Python files in the specified directory. We can do this by using the `os` and `glob` modules in Python.\n2. Then, for each Python file, we will parse the file to find all function definitions. We can do this by using the `ast` module in Python, which can parse Python source code into an abstract syntax tree (AST).\n3. For each function definition, we will extract the function's name and its docstring. The `ast` module can also help us with this.\n4. Finally, we will print out the function names and their docstrings.\n\nLet's start with step 1: getting a list of all Python files in the specified directory.","function_call": {"name": "run_code","arguments": "{\n \"language\": \"python\",\n \"code\": \"import os\\nimport glob\\n\\n# Get the current working directory\\ncwd = os.getcwd()\\n\\n# Get a list of all Python files in the directory\\npython_files = glob.glob(os.path.join(cwd, '*.py'))\\n\\npython_files\"\n}"}}]) - >>> skill = create(messages_json_path="./messages_example.json") - >>> skill = create(file_path="../creator/utils/ask_human.py") - >>> skill = create(huggingface_repo_id="Sayoyo/skill-library", huggingface_skill_path="extract_pdf_section") - >>> skill = create(skill_json_path=os.path.expanduser("~") + "/.cache/open_creator/skill_library/create/skill.json") - """ - if request is not None: - skill = creator.create(request=request) - elif messages is not None: - skill = creator.create(messages=messages) - elif messages_json_path is not None: - skill = creator.create(messages_json_path=messages_json_path) - elif skill_path is not None: - skill = creator.create(skill_path=skill_path) - elif skill_json_path is not None: - skill = creator.create(skill_json_path=skill_json_path) - elif file_content is not None: - skill = creator.create(file_content=file_content) - elif file_path is not None: - skill = creator.create(file_path=file_path) - elif huggingface_repo_id is not None and huggingface_skill_path is not None: - skill = creator.create( - huggingface_repo_id=huggingface_repo_id, huggingface_skill_path=huggingface_skill_path - ) - else: - raise ValueError("At least one argument must be provided.") - - return skill diff --git a/creator/skill_library/open-creator/create/skill_doc.md b/creator/skill_library/open-creator/create/skill_doc.md deleted file mode 100644 index d134b0b..0000000 --- a/creator/skill_library/open-creator/create/skill_doc.md +++ /dev/null @@ -1,26 +0,0 @@ -## Skill Details: -- **Name**: create -- **Description**: Create a skill from various sources. -- **Version**: 1.0.0 -- **Usage**: -```python -skill = create(request="filter how many prime numbers are in 201") -skill = create(messages=[{"role": "user","content": "write a program to list all the python functions and their docstrings in a directory"},{"role": "assistant","content": "Sure, I can help with that. Here's the plan:\n\n1. First, we need to get a list of all Python files in the specified directory. We can do this by using the `os` and `glob` modules in Python.\n2. Then, for each Python file, we will parse the file to find all function definitions. We can do this by using the `ast` module in Python, which can parse Python source code into an abstract syntax tree (AST).\n3. For each function definition, we will extract the function's name and its docstring. The `ast` module can also help us with this.\n4. Finally, we will print out the function names and their docstrings.\n\nLet's start with step 1: getting a list of all Python files in the specified directory.","function_call": {"name": "run_code","arguments": "{\n \"language\": \"python\",\n \"code\": \"import os\\nimport glob\\n\\n# Get the current working directory\\ncwd = os.getcwd()\\n\\n# Get a list of all Python files in the directory\\npython_files = glob.glob(os.path.join(cwd, '*.py'))\\n\\npython_files\"\n}"}}]) -skill = create(messages_json_path="./messages_example.json") -skill = create(file_path="../creator/utils/ask_human.py") -skill = create(huggingface_repo_id="Sayoyo/skill-library", huggingface_skill_path="extract_pdf_section") -skill = create(skill_json_path=os.path.expanduser("~") + "/.cache/open_creator/skill_library/create/skill.json") -``` -- **Parameters**: - - **request** (string): Request string. - - **messages** (array): Messages in list of dict format. - - **messages_json_path** (string): Path to messages JSON file. - - **skill_path** (string): Path to skill directory. - - **skill_json_path** (string): Path to skill JSON file. - - **file_content** (string): File content. - - **file_path** (string): Path to file. - - **huggingface_repo_id** (string): Huggingface repo ID. - - **huggingface_skill_path** (string): Huggingface skill path. - -- **Returns**: - - **CodeSkill** (object): Created skill \ No newline at end of file diff --git a/creator/skill_library/open-creator/save/conversation_history.json b/creator/skill_library/open-creator/save/conversation_history.json deleted file mode 100644 index 0ecf2e1..0000000 --- a/creator/skill_library/open-creator/save/conversation_history.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "role": "user", - "content": "# file name: save.py\nimport creator\nfrom creator.schema.skill import CodeSkill\n\n\ndef save(skill: CodeSkill, huggingface_repo_id: str = None, skill_path: str = None):\n \"\"\"\n Save a skill to a local path or a huggingface repo.\n \n Parameters:\n skill: CodeSkill object, the skill to be saved.\n huggingface_repo_id: str, optional, the ID of the huggingface repo. If provided, the skill will be saved to this repo.\n skill_path: str, optional, the local path. If provided, the skill will be saved to this path.\n \n Returns:\n None\n \n Usage examples:\n ```python\n >>> import creator\n >>> import os\n >>> skill_json_path = os.path.expanduser(\"~\") + \"/.cache/open_creator/skill_library/ask_run_code_confirm/skill.json\"\n >>> skill = creator.create(skill_json_path=skill_json_path)\n >>> creator.save(skill=skill, huggingface_repo_id=\"ChuxiJ/skill_library\")\n ```\n or\n ```python\n >>> import creator\n >>> import os\n >>> skill_json_path = os.path.expanduser(\"~\") + \"/.cache/open_creator/skill_library/ask_run_code_confirm/skill.json\"\n >>> skill = creator.create(skill_json_path=skill_json_path)\n >>> creator.save(skill=skill, skill_path=\"/path/to/save\")\n ```\n \"\"\"\n if huggingface_repo_id is not None:\n creator.save_to_hub(skill=skill, huggingface_repo_id=huggingface_repo_id)\n elif skill_path is not None:\n creator.save_to_skill_path(skill=skill, skill_path=skill_path)\n else:\n raise ValueError(\"Either huggingface_repo_id or skill_path must be provided.\")\n \n" - } -] \ No newline at end of file diff --git a/creator/skill_library/open-creator/save/embedding_text.txt b/creator/skill_library/open-creator/save/embedding_text.txt deleted file mode 100644 index 5ac09a5..0000000 --- a/creator/skill_library/open-creator/save/embedding_text.txt +++ /dev/null @@ -1,5 +0,0 @@ -save -Save a skill to a local path or a huggingface repo. -Usage examples: -save(skill=skill) or save(skill=skill, huggingface_repo_id='xxxx/skill_library') or save(skill=skill, skill_path='/path/to/save') -['save', 'skill', 'huggingface', 'local path'] \ No newline at end of file diff --git a/creator/skill_library/open-creator/save/function_call.json b/creator/skill_library/open-creator/save/function_call.json deleted file mode 100644 index 7bf0f1d..0000000 --- a/creator/skill_library/open-creator/save/function_call.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "name": "save", - "description": "Save a skill to a local path or a huggingface repo.\n\nsave(skill=skill, huggingface_repo_id='xxxx/skill_library') or save(skill=skill, skill_path='/path/to/save')", - "parameters": { - "type": "object", - "properties": { - "skill": { - "type": "object", - "description": "CodeSkill object, the skill to be saved." - }, - "huggingface_repo_id": { - "type": "string", - "description": "optional, the ID of the huggingface repo. If provided, the skill will be saved to this repo." - }, - "skill_path": { - "type": "string", - "description": "optional, the local path. If provided, the skill will be saved to this path." - } - }, - "required": [ - "skill" - ] - } -} \ No newline at end of file diff --git a/creator/skill_library/open-creator/save/install_dependencies.sh b/creator/skill_library/open-creator/save/install_dependencies.sh deleted file mode 100644 index eb1ad2d..0000000 --- a/creator/skill_library/open-creator/save/install_dependencies.sh +++ /dev/null @@ -1 +0,0 @@ -pip install -U "open-creator" \ No newline at end of file diff --git a/creator/skill_library/open-creator/save/skill.json b/creator/skill_library/open-creator/save/skill.json deleted file mode 100644 index af93afe..0000000 --- a/creator/skill_library/open-creator/save/skill.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "skill_name": "save", - "skill_description": "Save a skill to a local path or a huggingface repo.", - "skill_metadata": { - "created_at": "2023-10-04 09:54:43", - "author": "gongjunmin", - "updated_at": "2023-10-04 09:54:43", - "usage_count": 0, - "version": "1.0.0", - "additional_kwargs": {} - }, - "skill_tags": [ - "save", - "skill", - "huggingface", - "local path" - ], - "skill_usage_example": "save(skill=skill, huggingface_repo_id='ChuxiJ/skill_library') or save(skill=skill, skill_path='/path/to/save')", - "skill_program_language": "python", - "skill_code": "from creator.core import creator\nfrom creator.core.skill import CodeSkill\n\n\ndef save(skill: CodeSkill, huggingface_repo_id: str = None, skill_path: str = None):\n \"\"\"\n Save a skill to a local path or a huggingface repo.\n \n Parameters:\n skill: CodeSkill object, the skill to be saved.\n huggingface_repo_id: str, optional, the ID of the huggingface repo. If provided, the skill will be saved to this repo.\n skill_path: str, optional, the local path. If provided, the skill will be saved to this path.\n \n Returns:\n None\n \n Example:\n >>> import creator\n >>> import os\n >>> skill_json_path = os.path.expanduser(\"~\") + \"/.cache/open_creator/skill_library/ask_run_code_confirm/skill.json\"\n >>> skill = creator.create(skill_json_path=skill_json_path)\n >>> save(skill=skill, huggingface_repo_id=\"ChuxiJ/skill_library\") # save to remote\n >>> save(skill=skill, skill_path=\"/path/to/save\") # save to local\n \"\"\"\n if huggingface_repo_id is not None:\n creator.save(skill=skill, huggingface_repo_id=huggingface_repo_id)\n elif skill_path is not None:\n creator.save(skill=skill, skill_path=skill_path)\n else:\n creator.save(skill=skill)", - "skill_parameters": [ - { - "param_name": "skill", - "param_type": "object", - "param_description": "CodeSkill object, the skill to be saved.", - "param_required": true, - "param_default": null - }, - { - "param_name": "huggingface_repo_id", - "param_type": "string", - "param_description": "optional, the ID of the huggingface repo. If provided, the skill will be saved to this repo.", - "param_required": false, - "param_default": null - }, - { - "param_name": "skill_path", - "param_type": "string", - "param_description": "optional, the local path. If provided, the skill will be saved to this path.", - "param_required": false, - "param_default": null - } - ], - "skill_return": null, - "skill_dependencies": [ - { - "dependency_name": "open-creator", - "dependency_version": "latest", - "dependency_type": "package" - } - ], - "conversation_history": [ - { - "role": "user", - "content": "# file name: save.py\nimport creator\nfrom creator.schema.skill import CodeSkill\n\n\ndef save(skill: CodeSkill, huggingface_repo_id: str = None, skill_path: str = None):\n \"\"\"\n Save a skill to a local path or a huggingface repo.\n \n Parameters:\n skill: CodeSkill object, the skill to be saved.\n huggingface_repo_id: str, optional, the ID of the huggingface repo. If provided, the skill will be saved to this repo.\n skill_path: str, optional, the local path. If provided, the skill will be saved to this path.\n \n Returns:\n None\n \n Usage examples:\n ```python\n >>> import creator\n >>> import os\n >>> skill_json_path = os.path.expanduser(\"~\") + \"/.cache/open_creator/skill_library/ask_run_code_confirm/skill.json\"\n >>> skill = creator.create(skill_json_path=skill_json_path)\n >>> creator.save(skill=skill, huggingface_repo_id=\"ChuxiJ/skill_library\")\n ```\n or\n ```python\n >>> import creator\n >>> import os\n >>> skill_json_path = os.path.expanduser(\"~\") + \"/.cache/open_creator/skill_library/ask_run_code_confirm/skill.json\"\n >>> skill = creator.create(skill_json_path=skill_json_path)\n >>> creator.save(skill=skill, skill_path=\"/path/to/save\")\n ```\n \"\"\"\n if huggingface_repo_id is not None:\n creator.save_to_hub(skill=skill, huggingface_repo_id=huggingface_repo_id)\n elif skill_path is not None:\n creator.save_to_skill_path(skill=skill, skill_path=skill_path)\n else:\n raise ValueError(\"Either huggingface_repo_id or skill_path must be provided.\")\n \n" - } - ], - "test_summary": null -} \ No newline at end of file diff --git a/creator/skill_library/open-creator/save/skill_code.py b/creator/skill_library/open-creator/save/skill_code.py deleted file mode 100644 index 01dfbdb..0000000 --- a/creator/skill_library/open-creator/save/skill_code.py +++ /dev/null @@ -1,30 +0,0 @@ -from creator.core import creator -from creator.core.skill import CodeSkill - - -def save(skill: CodeSkill, huggingface_repo_id: str = None, skill_path: str = None): - """ - Save a skill to a local path or a huggingface repo. - - Parameters: - skill: CodeSkill object, the skill to be saved. - huggingface_repo_id: str, optional, the ID of the huggingface repo. If provided, the skill will be saved to this repo. - skill_path: str, optional, the local path. If provided, the skill will be saved to this path. - - Returns: - None - - Example: - >>> import creator - >>> import os - >>> skill_json_path = os.path.expanduser("~") + "/.cache/open_creator/skill_library/ask_run_code_confirm/skill.json" - >>> skill = creator.create(skill_json_path=skill_json_path) - >>> save(skill=skill, huggingface_repo_id="ChuxiJ/skill_library") # save to remote - >>> save(skill=skill, skill_path="/path/to/save") # save to local - """ - if huggingface_repo_id is not None: - creator.save(skill=skill, huggingface_repo_id=huggingface_repo_id) - elif skill_path is not None: - creator.save(skill=skill, skill_path=skill_path) - else: - creator.save(skill=skill) \ No newline at end of file diff --git a/creator/skill_library/open-creator/save/skill_doc.md b/creator/skill_library/open-creator/save/skill_doc.md deleted file mode 100644 index 08b1c63..0000000 --- a/creator/skill_library/open-creator/save/skill_doc.md +++ /dev/null @@ -1,26 +0,0 @@ -## Skill Details: -- **Name**: save -- **Description**: Save a skill to a local path or a huggingface repo. -- **Version**: 1.0.0 -- **Usage**: -You need to create a skill first -```python -import creator -import os -skill_json_path = os.path.expanduser("~") + "/.cache/open_creator/skill_library/ask_run_code_confirm/skill.json" -skill = creator.create(skill_json_path=skill_json_path) -``` -```python -save(skill=skill, huggingface_repo_id="ChuxiJ/skill_library") -``` -or -```python -save(skill=skill, skill_path="/path/to/save") -``` -- **Parameters**: - - **skill** (object): CodeSkill object, the skill to be saved. - - Required: True - - **huggingface_repo_id** (string): optional, the ID of the huggingface repo. If provided, the skill will be saved to this repo. - - **skill_path** (string): optional, the local path. If provided, the skill will be saved to this path. - -- **Returns**: \ No newline at end of file diff --git a/creator/skill_library/open-creator/search/conversation_history.json b/creator/skill_library/open-creator/search/conversation_history.json deleted file mode 100644 index 847ef4f..0000000 --- a/creator/skill_library/open-creator/search/conversation_history.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "role": "user", - "content": "# file name: search.py\nimport creator\nfrom creator.schema.skill import CodeSkill\n\n\ndef search(query: str, top_k=1, threshold=0.8) -> list[CodeSkill]:\n \"\"\"\n Search skills by query.\n \n Parameters:\n query: str, the query.\n top_k: int, optional, the maximum number of skills to return.\n threshold: float, optional, the minimum similarity score to return a skill.\n Returns:\n a list of CodeSkill objects.\n\n Example:\n >>> import creator\n >>> skills = search(\"I want to extract some pages from a pdf\")\n \"\"\"\n\n return creator.search(query=query, top_k=top_k, threshold=threshold)\n\n" - } -] \ No newline at end of file diff --git a/creator/skill_library/open-creator/search/embedding_text.txt b/creator/skill_library/open-creator/search/embedding_text.txt deleted file mode 100644 index 1651ac4..0000000 --- a/creator/skill_library/open-creator/search/embedding_text.txt +++ /dev/null @@ -1,4 +0,0 @@ -search -This skill allows users to search for skills by query. -skills = search('I want to extract some pages from a pdf') -['search', 'query', 'CodeSkill'] \ No newline at end of file diff --git a/creator/skill_library/open-creator/search/function_call.json b/creator/skill_library/open-creator/search/function_call.json deleted file mode 100644 index 3a175cd..0000000 --- a/creator/skill_library/open-creator/search/function_call.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "name": "search", - "description": "This skill allows users to search for skills by query.\n\nskills = search('I want to extract some pages from a pdf')", - "parameters": { - "type": "object", - "properties": { - "query": { - "type": "string", - "description": "The query to search for skills." - }, - "top_k": { - "type": "integer", - "description": "The maximum number of skills to return.", - "default": 1 - }, - "threshold": { - "type": "float", - "description": "The minimum similarity score to return a skill.", - "default": 0.8 - } - }, - "required": [ - "query" - ] - } -} \ No newline at end of file diff --git a/creator/skill_library/open-creator/search/install_dependencies.sh b/creator/skill_library/open-creator/search/install_dependencies.sh deleted file mode 100644 index eb1ad2d..0000000 --- a/creator/skill_library/open-creator/search/install_dependencies.sh +++ /dev/null @@ -1 +0,0 @@ -pip install -U "open-creator" \ No newline at end of file diff --git a/creator/skill_library/open-creator/search/search.py b/creator/skill_library/open-creator/search/search.py deleted file mode 100644 index 9c1a6df..0000000 --- a/creator/skill_library/open-creator/search/search.py +++ /dev/null @@ -1,22 +0,0 @@ -from creator.core import creator -from creator.core.skill import CodeSkill - - -def search(query: str, top_k=1, threshold=0.8) -> list[CodeSkill]: - """ - Search skills by query. - - Parameters: - query: str, the query. - top_k: int, optional, the maximum number of skills to return. - threshold: float, optional, the minimum similarity score to return a skill. - Returns: - a list of CodeSkill objects. - - Example: - >>> import creator - >>> skills = search("I want to extract some pages from a pdf") - """ - - return creator.search(query=query, top_k=top_k, threshold=threshold) - diff --git a/creator/skill_library/open-creator/search/skill.json b/creator/skill_library/open-creator/search/skill.json deleted file mode 100644 index 24202a7..0000000 --- a/creator/skill_library/open-creator/search/skill.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "skill_name": "search", - "skill_description": "This skill allows users to search for skills by query.", - "skill_metadata": { - "created_at": "2023-10-04 14:51:53", - "author": "gongjunmin", - "updated_at": "2023-10-04 14:51:53", - "usage_count": 0, - "version": "1.0.0", - "additional_kwargs": {} - }, - "skill_tags": [ - "search", - "query", - "CodeSkill" - ], - "skill_usage_example": "skills = search('I want to extract some pages from a pdf')", - "skill_program_language": "python", - "skill_code": "from creator.core import creator\nfrom creator.core.skill import CodeSkill\n\ndef search(query: str, top_k=1, threshold=0.8) -> list[CodeSkill]:\n '''\n Search skills by query.\n \n Parameters:\n query: str, the query.\n top_k: int, optional, the maximum number of skills to return.\n threshold: float, optional, the minimum similarity score to return a skill.\n Returns:\n a list of CodeSkill objects.\n\n Example:\n >>> import creator\n >>> skills = search('I want to extract some pages from a pdf')\n '''\n\n return creator.search(query=query, top_k=top_k, threshold=threshold)", - "skill_parameters": [ - { - "param_name": "query", - "param_type": "string", - "param_description": "The query to search for skills.", - "param_required": true, - "param_default": null - }, - { - "param_name": "top_k", - "param_type": "integer", - "param_description": "The maximum number of skills to return.", - "param_required": false, - "param_default": 1 - }, - { - "param_name": "threshold", - "param_type": "float", - "param_description": "The minimum similarity score to return a skill.", - "param_required": false, - "param_default": 0.8 - } - ], - "skill_return": { - "param_name": "skills", - "param_type": "array", - "param_description": "A list of CodeSkill objects.", - "param_required": true, - "param_default": null - }, - "skill_dependencies": [ - { - "dependency_name": "open-creator", - "dependency_version": "latest", - "dependency_type": "package" - } - ], - "conversation_history": [ - { - "role": "user", - "content": "# file name: search.py\nimport creator\nfrom creator.schema.skill import CodeSkill\n\n\ndef search(query: str, top_k=1, threshold=0.8) -> list[CodeSkill]:\n \"\"\"\n Search skills by query.\n \n Parameters:\n query: str, the query.\n top_k: int, optional, the maximum number of skills to return.\n threshold: float, optional, the minimum similarity score to return a skill.\n Returns:\n a list of CodeSkill objects.\n\n Example:\n >>> import creator\n >>> skills = search(\"I want to extract some pages from a pdf\")\n \"\"\"\n\n return creator.search(query=query, top_k=top_k, threshold=threshold)\n\n" - } - ], - "test_summary": null -} \ No newline at end of file diff --git a/creator/skill_library/open-creator/search/skill_code.py b/creator/skill_library/open-creator/search/skill_code.py deleted file mode 100644 index c8ff216..0000000 --- a/creator/skill_library/open-creator/search/skill_code.py +++ /dev/null @@ -1,20 +0,0 @@ -import creator -from creator.core.skill import CodeSkill - -def search(query: str, top_k=1, threshold=0.8) -> list[CodeSkill]: - ''' - Search skills by query. - - Parameters: - query: str, the query. - top_k: int, optional, the maximum number of skills to return. - threshold: float, optional, the minimum similarity score to return a skill. - Returns: - a list of CodeSkill objects. - - Example: - >>> import creator - >>> skills = search('I want to extract some pages from a pdf') - ''' - - return creator.search(query=query, top_k=top_k, threshold=threshold) \ No newline at end of file diff --git a/creator/skill_library/open-creator/search/skill_doc.md b/creator/skill_library/open-creator/search/skill_doc.md deleted file mode 100644 index 9387901..0000000 --- a/creator/skill_library/open-creator/search/skill_doc.md +++ /dev/null @@ -1,18 +0,0 @@ -## Skill Details: -- **Name**: search -- **Description**: This skill allows users to search for skills by query. -- **Version**: 1.0.0 -- **Usage**: -```python -skills = search('I want to extract some pages from a pdf') -``` -- **Parameters**: - - **query** (string): The query to search for skills. - - Required: True - - **top_k** (integer): The maximum number of skills to return. - - Default: 1 - - **threshold** (float): The minimum similarity score to return a skill. - - Default: 0.8 - -- **Returns**: - - **skills** (array): A list of CodeSkill objects. \ No newline at end of file diff --git a/creator/utils/__init__.py b/creator/utils/__init__.py index 31f5498..15cb188 100644 --- a/creator/utils/__init__.py +++ b/creator/utils/__init__.py @@ -6,11 +6,14 @@ from .ask_human import ask_run_code_confirm from .dict2list import convert_to_values_list from .user_info import get_user_info -from .load_prompt import load_system_prompt +from .load_prompt import load_system_prompt, load_json_schema from .printer import print from .code_split import split_code_blocks from .valid_code import is_valid_code, is_expression from .tips_utils import remove_tips +from .runnable_decorator import runnable, print_run_url +from .attri_dict import AttrDict +from .uuid_generator import generate_uuid_like_string __all__ = [ @@ -23,9 +26,14 @@ "convert_to_values_list", "get_user_info", "load_system_prompt", + "load_json_schema", "print", "split_code_blocks", "is_valid_code", "is_expression", - "remove_tips" + "remove_tips", + "runnable", + "print_run_url", + "AttrDict", + "generate_uuid_like_string" ] diff --git a/creator/utils/attri_dict.py b/creator/utils/attri_dict.py new file mode 100644 index 0000000..71bb396 --- /dev/null +++ b/creator/utils/attri_dict.py @@ -0,0 +1,28 @@ +import os + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + for key, value in self.items(): + if isinstance(value, dict): + self[key] = AttrDict(value) + + def __getattr__(self, key): + try: + return self[key] + except KeyError: + return None + + def __setattr__(self, key, value): + if isinstance(value, dict): + value = AttrDict(value) + self[key] = value + if isinstance(value, (str, int, float)): + os.environ[key] = value + elif isinstance(value, bool): + os.environ[key] = str(value).lower() + + def __delattr__(self, key): + if key in self: + del self[key] diff --git a/creator/utils/install_command.py b/creator/utils/install_command.py index c9e5894..b5baa14 100644 --- a/creator/utils/install_command.py +++ b/creator/utils/install_command.py @@ -15,7 +15,7 @@ def generate_install_command(language: str, dependencies): return _generate_html_install_command(dependencies) else: raise NotImplementedError - + def _generate_python_install_command(dependencies): shell_command_str = 'pip show {package_name} || pip install "{package_name}' diff --git a/creator/utils/langsmith_utils.py b/creator/utils/langsmith_utils.py new file mode 100644 index 0000000..e2f406e --- /dev/null +++ b/creator/utils/langsmith_utils.py @@ -0,0 +1,22 @@ +import os +from langsmith import Client +from langsmith.utils import LangSmithConnectionError +from .printer import print + + +def check_langsmith_ok(): + cli = Client() + if os.environ.get("LANGCHAIN_TRACING_V2", "false") == "false": + return False + try: + cli.read_project(project_name="open-creator") + except LangSmithConnectionError as e: + if "Connection error" in str(e): + print("[red]Warning:[/red] [yellow]Langsmith is not running. Please run `langsmith start`.[/yellow]") + return False + else: + cli.create_project(project_name="open-creator") + return True + + +langsmith_ok = check_langsmith_ok() diff --git a/creator/utils/load_prompt.py b/creator/utils/load_prompt.py index 00bccde..c73cb89 100644 --- a/creator/utils/load_prompt.py +++ b/creator/utils/load_prompt.py @@ -1,4 +1,13 @@ +import json + + def load_system_prompt(prompt_path): with open(prompt_path, encoding='utf-8') as f: prompt = f.read() return prompt + + +def load_json_schema(json_schema_path): + with open(json_schema_path, encoding="utf-8") as f: + json_schema = json.load(f) + return json_schema diff --git a/creator/utils/printer.py b/creator/utils/printer.py index 6e62d99..895b7aa 100644 --- a/creator/utils/printer.py +++ b/creator/utils/printer.py @@ -2,10 +2,10 @@ import sys from rich.markdown import Markdown from rich.console import Console -from rich import print as rich_print from rich.json import JSON import io + # Save the original print function original_print = print @@ -51,7 +51,10 @@ def add_default_callback(self): def default_print(message, end='\n', file=None, flush=False, output_option='terminal'): target_file = file or self.output_capture if output_option in ['terminal', 'both']: - console = Console(force_jupyter=self.is_jupyter, force_terminal=self.is_terminal, force_interactive=self.is_interactive, file=target_file) + if self.is_jupyter: + console = Console(force_jupyter=self.is_jupyter, force_terminal=self.is_terminal, force_interactive=self.is_interactive, file=target_file) + else: + console = Console(force_jupyter=self.is_jupyter, force_terminal=self.is_terminal, force_interactive=self.is_interactive) console.print(message, end=end) # if output_option in ['stdout', 'both']: # rich_print(message, end=end, file=sys.stdout, flush=flush) diff --git a/creator/utils/runnable_decorator.py b/creator/utils/runnable_decorator.py new file mode 100644 index 0000000..52342c2 --- /dev/null +++ b/creator/utils/runnable_decorator.py @@ -0,0 +1,24 @@ +from langchain.schema.runnable import RunnableLambda +from langchain.callbacks import tracing_v2_enabled +from .printer import print +from .langsmith_utils import langsmith_ok + + +def runnable(run_name): + def decorator(func): + return RunnableLambda(func).with_config({"run_name": run_name}) + return decorator + + +def print_run_url(func): + def wrapper(*args, **kwargs): + if langsmith_ok: + with tracing_v2_enabled() as cb: + result = func(*args, **kwargs) + run_url = cb.get_run_url() + if run_url is not None: + print(f"Langsmith Run URL: [{run_url}]({run_url})", print_type="markdown") + else: + result = func(*args, **kwargs) + return result + return wrapper diff --git a/creator/utils/skill_doc.py b/creator/utils/skill_doc.py index aef4f57..e35c97c 100644 --- a/creator/utils/skill_doc.py +++ b/creator/utils/skill_doc.py @@ -39,4 +39,3 @@ def format_return(ret): doc += format_return(skill.skill_return) + "\n" return doc.strip() - diff --git a/creator/utils/uuid_generator.py b/creator/utils/uuid_generator.py new file mode 100644 index 0000000..307a7ff --- /dev/null +++ b/creator/utils/uuid_generator.py @@ -0,0 +1,23 @@ +import hashlib + + +def generate_uuid_like_string(text): + """ + Generates a UUID-like string based on the given text. + + The function uses the SHA-256 hash function to hash the input text. + It then extracts 32 characters from the hash result and formats + them to mimic the structure of a UUID. + + Parameters: + text (str): The input text to be hashed and converted into a UUID-like string. + + Returns: + str: A UUID-like string derived from the input text. + """ + # Use the SHA-256 hash function to hash the input text + hash_object = hashlib.sha256(text.encode()) + hex_dig = hash_object.hexdigest() + + # Extract 32 characters from the hash result and add separators to mimic the UUID format + return f"{hex_dig[:8]}-{hex_dig[8:12]}-{hex_dig[12:16]}-{hex_dig[16:20]}-{hex_dig[20:32]}" diff --git a/docs/api_doc.md b/docs/api_doc.md index eca7baa..628a610 100644 --- a/docs/api_doc.md +++ b/docs/api_doc.md @@ -1,7 +1,7 @@ ## Open-Creator API Documentation -### Function: `create` -Generates a `CodeSkill` instance using different input sources. +### Function: `#!python create` +Generates a `#!python CodeSkill` instance using different input sources. #### Parameters: - `request`: String detailing the skill functionality. @@ -12,46 +12,46 @@ Generates a `CodeSkill` instance using different input sources. - `huggingface_skill_path`: Path to the skill within the Huggingface repository. #### Returns: -- `CodeSkill`: The created skill. +- `#!python CodeSkill`: The created skill. #### Usage: 1. Creating Skill using a Request String: -```python +``` py skill = create(request="filter how many prime numbers are in 201") ``` 2. Creating Skill using Messages: - Directly: -```python +``` py skill = create(messages=[{"role": "user", "content": "write a program..."}]) ``` - Via JSON Path: -```python +``` py skill = create(messages_json_path="./messages_example.json") ``` 3. Creating Skill using File Content or File Path: - Direct Content: -```python +``` py skill = create(file_content="def example_function(): pass") ``` - File Path: -```python +``` py skill = create(file_path="../creator/utils/example.py") ``` 4. Creating Skill using Skill Path or Skill JSON Path: - JSON Path: -```python +``` py skill = create(skill_json_path="~/.cache/open_creator/skill_library/create/skill.json") ``` - Skill Path: -```python +``` py skill = create(skill_path="~/.cache/open_creator/skill_library/create") ``` 5. Creating Skill using Huggingface Repository ID and Skill Path: If a skill is hosted in a Huggingface repository, you can create it by specifying the repository ID and the skill path within the repository. -```python +``` py skill = create(huggingface_repo_id="YourRepo/skill-library", huggingface_skill_path="specific_skill") ``` @@ -64,7 +64,7 @@ skill = create(huggingface_repo_id="YourRepo/skill-library", huggingface_skill_p ### Function: `save` -Stores a `CodeSkill` instance either to a local path or a Huggingface repository. In default just use `save(skill)` and it will store the skill into the default path. Only save the skill when the user asks to do so. +Stores a `#!python CodeSkill` instance either to a local path or a Huggingface repository. In default just use `save(skill)` and it will store the skill into the default path. Only save the skill when the user asks to do so. #### Parameters: - `skill` (CodeSkill): The skill instance to be saved. @@ -75,15 +75,15 @@ Stores a `CodeSkill` instance either to a local path or a Huggingface repository - None #### Usage: -The `save` function allows for the persistent storage of a `CodeSkill` instance by saving it either locally or to a specified Huggingface repository. +The `save` function allows for the persistent storage of a `#!python CodeSkill` instance by saving it either locally or to a specified Huggingface repository. 1. **Save to Huggingface Repository:** -```python +``` py save(skill=skill, huggingface_repo_id="YourRepo/skill_library") ``` 2. **Save Locally:** -```python +``` py save(skill=skill, skill_path="/path/to/save") ``` @@ -101,18 +101,18 @@ Retrieve skills related to a specified query from the available pool of skills. - `threshold` (Optional[float]): Minimum similarity score to return a skill. Default is 0.8. #### Returns: -- List[CodeSkill]: A list of retrieved `CodeSkill` objects that match the query. +- List[CodeSkill]: A list of retrieved `#!python CodeSkill` objects that match the query. #### Usage: The `search` function allows users to locate skills related to a particular query string. This is particularly useful for identifying pre-existing skills within a skill library that may fulfill a requirement or for exploring available functionalities. 1. **Basic Search:** -```python +``` py skills = search("extract pages from a pdf") ``` 2. **Refined Search:** -```python +``` py skills = search("extract pages from a pdf", top_k=3, threshold=0.85) ``` @@ -131,7 +131,7 @@ Execute a skill with provided arguments or request. - **Example Usage**: -```python +``` py linenums="1" skills = search("pdf extract section") if skills: skill = skills[0] @@ -149,7 +149,7 @@ Validate a skill using a tester agent. - **Example Usage**: -```python +``` py linenums="1" skill = create(request="filter prime numbers in a range, e.g., filter_prime_numbers(2, 201)") test_summary = skill.test() print(test_summary) @@ -161,17 +161,17 @@ Modify and refine skills using operator overloading. 1. **Combining Skills**: Utilize the `+` operator to chain or execute skills in parallel, detailing the coordination with the `>` operator. -```python +``` py new_skill = skillA + skillB > "Explanation of how skills A and B operate together" ``` 2. **Refactoring Skills**: Employ the `>` operator to enhance or modify existing skills. - ```python + ``` py refactored_skill = skill > "Descriptive alterations or enhancements" ``` 3. **Decomposing Skills**: Use the `<` operator to break down a skill into simpler components. - ```python + ``` py simpler_skills = skill < "Description of how the skill should be decomposed" ``` diff --git a/docs/commands.md b/docs/commands.md index a8f1fa7..d5e8f32 100644 --- a/docs/commands.md +++ b/docs/commands.md @@ -13,8 +13,8 @@ creator -h show this help message and exit - `-c, --config` open config.yaml file in text editor -- `-i, --interactive` - Enter interactive mode +- `-i, --interpreter` + Enter interpreter mode - COMMANDS `{create,save,search,server,ui}` @@ -146,7 +146,7 @@ creator or ```shell -creator [-i] [--interactive] [-q] [--quiet] +creator [-i] [--interpreter] [-q] [--quiet] ``` - `q, --quiet` Quiet mode to enter interactive mode and not rich_print LOGO and help diff --git a/docs/configurations.md b/docs/configurations.md index 18f11c7..04cb5b8 100644 --- a/docs/configurations.md +++ b/docs/configurations.md @@ -1,17 +1,21 @@ # Configurations +```shell +creator -c +``` + ```yaml LOCAL_SKILL_LIBRARY_PATH: .cache/open_creator/skill_library REMOTE_SKILL_LIBRARY_PATH: .cache/open_creator/remote -LOCAL_SKILL_LIBRARY_VECTORD_PATH: .cache/open_creator/vectordb/ PROMPT_CACHE_HISTORY_PATH: .cache/open_creator/prompt_cache/ +VECTORD_PATH: .cache/open_creator/vectordb/ LOGGER_CACHE_PATH: .cache/open_creator/logs/ -SKILL_EXTRACT_AGENT_CACHE_PATH: .cache/open_creator/llm_cache +LLM_CACHE_PATH: .cache/open_creator/llm_cache +EMBEDDING_CACHE_PATH: .cache/open_creator/embeddings/ OFFICIAL_SKILL_LIBRARY_PATH: timedomain/skill-library OFFICIAL_SKILL_LIBRARY_TEMPLATE_PATH: timedomain/skill-library-template -BUILD_IN_SKILL_LIBRARY_DIR: skill_library/open-creator/ # for AZURE, it is your_deployment_id # for ANTHROPIC, it is claude-2 @@ -19,7 +23,7 @@ BUILD_IN_SKILL_LIBRARY_DIR: skill_library/open-creator/ # for huggingface, it is huggingface/WizardLM/WizardCoder-Python-34B-V1.0 model path # for ollama, it is like ollama/llama2 # the default is openai/gpt-3.5 -MODEL_NAME: gpt-3.5-turbo-16k +MODEL_NAME: gpt-4 TEMPERATURE: 0 # only 0 can use llm_cache USE_AZURE: false @@ -37,4 +41,41 @@ VERTEX_LOCATION: "" HUGGINGFACE_API_KEY: "" HUGGINGFACE_API_BASE: "" -``` \ No newline at end of file + +# for langsmith trace +LANGCHAIN_ENDPOINT: +LANGCHAIN_API_KEY: +LANGCHAIN_TRACING_V2: true +LANGCHAIN_PROJECT: "open-creator" + +# for memgpt +MEMGPT_CONFIG: + MEMORY_PATH: .cache/open_creator/memory + PERSONA: | + The following is a blank slate starter persona, I need to expand this to develop my own personality. + + My name is MemGPT. + I am kind, thoughtful, and inquisitive. + + HUMAN: | + This is what I know so far about the user, I should expand this as I learn more about them. + + First name: Chad + Last name: ? + Gender: Male + Age: ? + Nationality: ? + Occupation: Computer science PhD student at UC Berkeley + Interests: Formula 1, Sailing, Taste of the Himalayas Restaurant in Berkeley, CSGO + + AGENT_SUBTASKS: | + - create/save/search skill + - run/test/refactor skill + - show skill + + SUMMARY_WARNING_TOKENS: 6000 + CORE_MEMORY_PERSONA_CHAR_LIMIT: 2000 + CORE_MEMORY_HUMAN_CHAR_LIMIT: 2000 + PAGE_SIZE: 5 + USE_VECTOR_SEARCH: true +``` diff --git a/docs/examples/01_skills_create.ipynb b/docs/examples/01_skills_create.ipynb index a1acc44..e82b1be 100644 --- a/docs/examples/01_skills_create.ipynb +++ b/docs/examples/01_skills_create.ipynb @@ -62,6 +62,7 @@ "\u001b[0;34m\u001b[0m \u001b[0mfile_path\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mOptional\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", "\u001b[0;34m\u001b[0m \u001b[0mhuggingface_repo_id\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mOptional\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", "\u001b[0;34m\u001b[0m \u001b[0mhuggingface_skill_path\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mOptional\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0msave\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbool\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", "\u001b[0;34m\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mcreator\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mskill\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCodeSkill\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mDocstring:\u001b[0m Main method to create a new skill.\n", "\u001b[0;31mFile:\u001b[0m ~/miniconda3/envs/open_creator_online/lib/python3.10/site-packages/creator/core/core.py\n", @@ -93,7 +94,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "480c4325950f46d9a9802e3952ef5ddb", + "model_id": "59eeaa5d83ec4cf9bdea0dbead71929c", "version_major": 2, "version_minor": 0 }, @@ -107,7 +108,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "00c9080c652f4b26b163fd80933c8bf3", + "model_id": "b49892b54f2547d2a74ae02760d79ab0", "version_major": 2, "version_minor": 0 }, @@ -141,7 +142,55 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "74f96ccdc008481d92dd54ad13aecde2", + "model_id": "c1fc6b0e407b48fd9709e1fb878a6b3a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "2dfb101217d84fff944e06c72db7eee7", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "bdd53aed57ca41e7a6b945d3e17f4908",
        "version_major": 2,
        "version_minor": 0
       },
@@ -175,7 +224,7 @@
     {
      "data": {
       "application/vnd.jupyter.widget-view+json": {
-       "model_id": "3792eb41434a4afab72082018131f2e9",
+       "model_id": "8978e8e0ba4b4955a0b97b3ba2a549e0",
        "version_major": 2,
        "version_minor": 0
       },
@@ -205,6 +254,23 @@
      },
      "metadata": {},
      "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/c2491d5f-\n",
+       "20de-4067-af80-134cafb3a449?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=641371;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/c2491d5f-20de-4067-af80-134cafb3a449?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/c2491d5f-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=641371;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/c2491d5f-20de-4067-af80-134cafb3a449?poll=true\u001b\\\u001b[4;34m20de-4067-af80-134cafb3a449?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -223,21 +289,20 @@ " Skill Details: \n", "\n", "Name: count_prime_numbers \n", - "Description: This skill counts the number of prime numbers in a given range. \n", + "Description: This skill counts the number of prime numbers within a given range. A prime number is a natural \n", + " number greater than 1 that has no positive divisors other than 1 and itself. \n", "Version: 1.0.0 \n", "Usage: \n", "\n", " \n", - " count_prime_numbers(2, 201) \n", + " count_prime_numbers(201) # returns 46 \n", " \n", "\n", "Parameters: \n", - "start (integer): The starting number of the range. \n", - "Required: True \n", - "end (integer): The ending number of the range. \n", + "n (integer): The upper limit of the range within which to count prime numbers. \n", "Required: True \n", "Returns: \n", - "count (integer): The number of prime numbers in the given range. \n", + "prime_count (integer): The number of prime numbers within the given range. \n", "\n" ], "text/plain": [ @@ -245,21 +310,20 @@ " \u001b[1;4mSkill Details:\u001b[0m \n", "\n", "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: count_prime_numbers \n", - "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill counts the number of prime numbers in a given range. \n", + "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill counts the number of prime numbers within a given range. A prime number is a natural \n", + "\u001b[1;33m \u001b[0mnumber greater than 1 that has no positive divisors other than 1 and itself. \n", "\u001b[1;33m • \u001b[0m\u001b[1mVersion\u001b[0m: 1.0.0 \n", "\u001b[1;33m • \u001b[0m\u001b[1mUsage\u001b[0m: \n", "\n", "\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcount_prime_numbers\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m2\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m201\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcount_prime_numbers\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m201\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;149;144;119;48;2;39;40;34m# returns 46\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\n", "\n", "\u001b[1;33m • \u001b[0m\u001b[1mParameters\u001b[0m: \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mstart\u001b[0m (integer): The starting number of the range. \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0mRequired: True \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mend\u001b[0m (integer): The ending number of the range. \n", + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mn\u001b[0m (integer): The upper limit of the range within which to count prime numbers. \n", "\u001b[1;33m \u001b[0m\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0mRequired: True \n", "\u001b[1;33m • \u001b[0m\u001b[1mReturns\u001b[0m: \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mcount\u001b[0m (integer): The number of prime numbers in the given range. \n" + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mprime_count\u001b[0m (integer): The number of prime numbers within the given range. \n" ] }, "metadata": {}, @@ -310,7 +374,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "1409e75812d1493a8885087f03ac6f25", + "model_id": "91d14a513cff4b68ac9cba04dc1a6db4", "version_major": 2, "version_minor": 0 }, @@ -340,6 +404,23 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/1e9ecd99-\n",
+       "be29-4eee-8b21-449f279b7f4b?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=254594;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/1e9ecd99-be29-4eee-8b21-449f279b7f4b?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/1e9ecd99-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=254594;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/1e9ecd99-be29-4eee-8b21-449f279b7f4b?poll=true\u001b\\\u001b[4;34mbe29-4eee-8b21-449f279b7f4b?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -357,17 +438,19 @@ "
\n",
        "                                                  Skill Details:                                                   \n",
        "\n",
-       "Name: list_python_functions                                                                                     \n",
-       "Description: This skill lists all the Python functions and their docstrings in a specified directory.           \n",
+       "Name: list_python_functions_and_docstrings                                                                      \n",
+       "Description: This skill lists all the Python functions and their docstrings in a specified directory. It first  \n",
+       "   gets a list of all Python files in the directory, then parses each file to find all function definitions. For   \n",
+       "   each function definition, it extracts the function's name and its docstring.                                    \n",
        "Version: 1.0.0                                                                                                  \n",
        "Usage:                                                                                                          \n",
        "\n",
        "                                                                                                                   \n",
-       " list_python_functions('/path/to/directory')                                                                       \n",
+       " list_python_functions_and_docstrings('/path/to/directory')                                                        \n",
        "                                                                                                                   \n",
        "\n",
        "Parameters:                                                                                                     \n",
-       "directory (string): The directory path where the Python files are located.                                   \n",
+       "directory (string): The directory to search for Python files.                                                \n",
        "Required: True                                                                                            \n",
        "Returns:                                                                                                        \n",
        "
\n" @@ -376,17 +459,19 @@ "\n", " \u001b[1;4mSkill Details:\u001b[0m \n", "\n", - "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: list_python_functions \n", - "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill lists all the Python functions and their docstrings in a specified directory. \n", + "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: list_python_functions_and_docstrings \n", + "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill lists all the Python functions and their docstrings in a specified directory. It first \n", + "\u001b[1;33m \u001b[0mgets a list of all Python files in the directory, then parses each file to find all function definitions. For \n", + "\u001b[1;33m \u001b[0meach function definition, it extracts the function's name and its docstring. \n", "\u001b[1;33m • \u001b[0m\u001b[1mVersion\u001b[0m: 1.0.0 \n", "\u001b[1;33m • \u001b[0m\u001b[1mUsage\u001b[0m: \n", "\n", "\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mlist_python_functions\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m/path/to/directory\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mlist_python_functions_and_docstrings\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m/path/to/directory\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\n", "\n", "\u001b[1;33m • \u001b[0m\u001b[1mParameters\u001b[0m: \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mdirectory\u001b[0m (string): The directory path where the Python files are located. \n", + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mdirectory\u001b[0m (string): The directory to search for Python files. \n", "\u001b[1;33m \u001b[0m\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0mRequired: True \n", "\u001b[1;33m • \u001b[0m\u001b[1mReturns\u001b[0m: \n" ] @@ -415,7 +500,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "ef6cc0331df844629f215b0e657c58e9", + "model_id": "72229d73f3cc4980bb7bcac6009fe5b9", "version_major": 2, "version_minor": 0 }, @@ -445,6 +530,23 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/c94c0ec7-\n",
+       "ce23-44f5-91d5-6bd5ec1f1e39?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=672460;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/c94c0ec7-ce23-44f5-91d5-6bd5ec1f1e39?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/c94c0ec7-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=672460;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/c94c0ec7-ce23-44f5-91d5-6bd5ec1f1e39?poll=true\u001b\\\u001b[4;34mce23-44f5-91d5-6bd5ec1f1e39?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -462,62 +564,56 @@ "
\n",
        "                                                  Skill Details:                                                   \n",
        "\n",
-       "Name: extract_pdf_pages                                                                                         \n",
-       "Description: This skill extracts a specified section from a PDF file and saves it as a new PDF.                 \n",
+       "Name: extract_pages_from_pdf                                                                                    \n",
+       "Description: This skill extracts a specified range of pages from a PDF file and saves them as a new PDF file.   \n",
+       "   The user needs to provide the path to the original PDF file and the range of pages to be extracted. The         \n",
+       "   extracted pages are saved in a new PDF file in the current working directory.                                   \n",
        "Version: 1.0.0                                                                                                  \n",
        "Usage:                                                                                                          \n",
        "\n",
        "                                                                                                                   \n",
-       " pdf_path = '~/Downloads/voyager.pdf'                                                                              \n",
-       " start_page = 2                                                                                                    \n",
-       " end_page = 5                                                                                                      \n",
-       " output_path = 'extracted_pages.pdf'                                                                               \n",
-       "                                                                                                                   \n",
-       " extract_pdf_pages(pdf_path, start_page, end_page, output_path)                                                    \n",
+       " extract_pages_from_pdf('~/Downloads/voyager.pdf', 2, 5, 'voyager_extracted.pdf')                                  \n",
        "                                                                                                                   \n",
        "\n",
        "Parameters:                                                                                                     \n",
-       "pdf_path (string): The path to the PDF file.                                                                 \n",
-       "Required: True                                                                                            \n",
-       "start_page (integer): The starting page number to extract.                                                   \n",
+       "pdf_path (string): Path to the original PDF file.                                                            \n",
        "Required: True                                                                                            \n",
-       "end_page (integer): The ending page number to extract.                                                       \n",
+       "start_page (integer): The first page to be extracted. Page numbers start from 1.                             \n",
        "Required: True                                                                                            \n",
-       "output_path (string): The path to save the extracted pages as a new PDF file.                                \n",
+       "end_page (integer): The last page to be extracted. This page is included in the extraction.                  \n",
        "Required: True                                                                                            \n",
+       "output_file (string): Name of the output file where the extracted pages will be saved.                       \n",
+       "Default: 'extracted_pages.pdf'                                                                            \n",
        "Returns:                                                                                                        \n",
-       "output_path (string): The path to the extracted pages PDF file.                                              \n",
+       "output_file (string): Name of the output file where the extracted pages were saved.                          \n",
        "
\n" ], "text/plain": [ "\n", " \u001b[1;4mSkill Details:\u001b[0m \n", "\n", - "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: extract_pdf_pages \n", - "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill extracts a specified section from a PDF file and saves it as a new PDF. \n", + "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: extract_pages_from_pdf \n", + "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill extracts a specified range of pages from a PDF file and saves them as a new PDF file. \n", + "\u001b[1;33m \u001b[0mThe user needs to provide the path to the original PDF file and the range of pages to be extracted. The \n", + "\u001b[1;33m \u001b[0mextracted pages are saved in a new PDF file in the current working directory. \n", "\u001b[1;33m • \u001b[0m\u001b[1mVersion\u001b[0m: 1.0.0 \n", "\u001b[1;33m • \u001b[0m\u001b[1mUsage\u001b[0m: \n", "\n", "\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpdf_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m~/Downloads/voyager.pdf\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mstart_page\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m2\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mend_page\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m5\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34moutput_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mextracted_pages.pdf\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mextract_pdf_pages\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpdf_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mstart_page\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mend_page\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34moutput_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mextract_pages_from_pdf\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m~/Downloads/voyager.pdf\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m2\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m5\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mvoyager_extracted.pdf\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\n", "\n", "\u001b[1;33m • \u001b[0m\u001b[1mParameters\u001b[0m: \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mpdf_path\u001b[0m (string): The path to the PDF file. \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0mRequired: True \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mstart_page\u001b[0m (integer): The starting page number to extract. \n", + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mpdf_path\u001b[0m (string): Path to the original PDF file. \n", "\u001b[1;33m \u001b[0m\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0mRequired: True \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mend_page\u001b[0m (integer): The ending page number to extract. \n", + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mstart_page\u001b[0m (integer): The first page to be extracted. Page numbers start from 1. \n", "\u001b[1;33m \u001b[0m\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0mRequired: True \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1moutput_path\u001b[0m (string): The path to save the extracted pages as a new PDF file. \n", + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mend_page\u001b[0m (integer): The last page to be extracted. This page is included in the extraction. \n", "\u001b[1;33m \u001b[0m\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0mRequired: True \n", + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1moutput_file\u001b[0m (string): Name of the output file where the extracted pages will be saved. \n", + "\u001b[1;33m \u001b[0m\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0mDefault: 'extracted_pages.pdf' \n", "\u001b[1;33m • \u001b[0m\u001b[1mReturns\u001b[0m: \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1moutput_path\u001b[0m (string): The path to the extracted pages PDF file. \n" + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1moutput_file\u001b[0m (string): Name of the output file where the extracted pages were saved. \n" ] }, "metadata": {}, @@ -571,13 +667,13 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "1025bc88f4c2439dbf1a24a060c7db54", + "model_id": "25767f6c233d4d3a986d5859754d2fed", "version_major": 2, "version_minor": 0 }, @@ -607,6 +703,23 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/e71ac5c0-\n",
+       "685f-4f2b-9690-b77fb264eaec?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=90796;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/e71ac5c0-685f-4f2b-9690-b77fb264eaec?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/e71ac5c0-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=90796;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/e71ac5c0-685f-4f2b-9690-b77fb264eaec?poll=true\u001b\\\u001b[4;34m685f-4f2b-9690-b77fb264eaec?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -615,7 +728,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -625,25 +738,13 @@ " Skill Details: \n", "\n", "Name: display_markdown_message \n", - "Description: This skill is used to display a markdown message in a formatted way. It takes a multiline string as\n", - " input and prints the message with proper formatting. It supports markdown syntax and can handle indentation and \n", - " multiline strings. \n", + "Description: This skill is used to display a markdown message. It works with multiline strings with lots of \n", + " indentation and will automatically make single line > tags beautiful. \n", "Version: 1.0.0 \n", "Usage: \n", "\n", " \n", - " message = \"\"\" \n", - " # Heading \n", - " \n", - " - List item 1 \n", - " - List item 2 \n", - " \n", - " --- \n", - " \n", - " > Blockquote \n", - " \n", - " \"\"\" \n", - " display_markdown_message(message) \n", + " display_markdown_message('> This is a markdown message.') \n", " \n", "\n", "Parameters: \n", @@ -657,25 +758,13 @@ " \u001b[1;4mSkill Details:\u001b[0m \n", "\n", "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: display_markdown_message \n", - "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill is used to display a markdown message in a formatted way. It takes a multiline string as\n", - "\u001b[1;33m \u001b[0minput and prints the message with proper formatting. It supports markdown syntax and can handle indentation and \n", - "\u001b[1;33m \u001b[0mmultiline strings. \n", + "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill is used to display a markdown message. It works with multiline strings with lots of \n", + "\u001b[1;33m \u001b[0mindentation and will automatically make single line > tags beautiful. \n", "\u001b[1;33m • \u001b[0m\u001b[1mVersion\u001b[0m: 1.0.0 \n", "\u001b[1;33m • \u001b[0m\u001b[1mUsage\u001b[0m: \n", "\n", "\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mmessage\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"\"\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m# Heading\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m- List item 1\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m- List item 2\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m---\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m> Blockquote\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"\"\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mdisplay_markdown_message\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mmessage\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mdisplay_markdown_message\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m> This is a markdown message.\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\n", "\n", "\u001b[1;33m • \u001b[0m\u001b[1mParameters\u001b[0m: \n", @@ -702,13 +791,13 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 13, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "541ad5ff7e6146bbb08bde2c69149fe2", + "model_id": "4efa90f0f2004cd9beb5895cd879b702", "version_major": 2, "version_minor": 0 }, @@ -738,6 +827,23 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/f3a428b6-\n",
+       "e8ef-4d61-b9a1-503afe355eaa?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=429238;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/f3a428b6-e8ef-4d61-b9a1-503afe355eaa?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/f3a428b6-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=429238;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/f3a428b6-e8ef-4d61-b9a1-503afe355eaa?poll=true\u001b\\\u001b[4;34me8ef-4d61-b9a1-503afe355eaa?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -746,7 +852,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -755,20 +861,24 @@ "
\n",
        "                                                  Skill Details:                                                   \n",
        "\n",
-       "Name: create_api                                                                                                \n",
-       "Description: Generates a CodeSkill instance using different input sources.                                      \n",
+       "Name: create                                                                                                    \n",
+       "Description: This function generates a CodeSkill instance using different input sources. It can take in a       \n",
+       "   request string detailing the skill functionality, messages or a path to a JSON file containing messages, a      \n",
+       "   string of file content or path to a code/API doc file, a directory path with skill name as stem or file path    \n",
+       "   with skill.json as stem, an identifier for a Huggingface repository, or a path to the skill within the          \n",
+       "   Huggingface repository.                                                                                         \n",
        "Version: 1.0.0                                                                                                  \n",
        "Usage:                                                                                                          \n",
        "\n",
        "                                                                                                                   \n",
-       " create_api(request, messages=messages, messages_json_path=messages_json_path, file_content=file_content,          \n",
-       " file_path=file_path, skill_path=skill_path, skill_json_path=skill_json_path,                                      \n",
-       " huggingface_repo_id=huggingface_repo_id, huggingface_skill_path=huggingface_skill_path)                           \n",
+       " from creator import create                                                                                        \n",
+       "                                                                                                                   \n",
+       " skill = create(request='...', messages=[...], file_content='...', file_path='...', skill_path='...',              \n",
+       " skill_json_path='...', huggingface_repo_id='...', huggingface_skill_path='...')                                   \n",
        "                                                                                                                   \n",
        "\n",
        "Parameters:                                                                                                     \n",
        "request (string): String detailing the skill functionality.                                                  \n",
-       "Required: True                                                                                            \n",
        "messages (array): Messages as a list of dictionaries.                                                        \n",
        "messages_json_path (string): Path to a JSON file containing messages.                                        \n",
        "file_content (string): String of file content.                                                               \n",
@@ -785,20 +895,24 @@
        "\n",
        "                                                  \u001b[1;4mSkill Details:\u001b[0m                                                   \n",
        "\n",
-       "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: create_api                                                                                                \n",
-       "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: Generates a \u001b[1;36;40mCodeSkill\u001b[0m instance using different input sources.                                      \n",
+       "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: create                                                                                                    \n",
+       "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This function generates a \u001b[1;36;40mCodeSkill\u001b[0m instance using different input sources. It can take in a       \n",
+       "\u001b[1;33m   \u001b[0mrequest string detailing the skill functionality, messages or a path to a JSON file containing messages, a      \n",
+       "\u001b[1;33m   \u001b[0mstring of file content or path to a code/API doc file, a directory path with skill name as stem or file path    \n",
+       "\u001b[1;33m   \u001b[0mwith \u001b[1;36;40mskill.json\u001b[0m as stem, an identifier for a Huggingface repository, or a path to the skill within the          \n",
+       "\u001b[1;33m   \u001b[0mHuggingface repository.                                                                                         \n",
        "\u001b[1;33m • \u001b[0m\u001b[1mVersion\u001b[0m: 1.0.0                                                                                                  \n",
        "\u001b[1;33m • \u001b[0m\u001b[1mUsage\u001b[0m:                                                                                                          \n",
        "\n",
        "\u001b[48;2;39;40;34m                                                                                                                   \u001b[0m\n",
-       "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcreate_api\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mrequest\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mmessages\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mmessages\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mmessages_json_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mmessages_json_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mfile_content\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mfile_content\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m        \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
-       "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mfile_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mfile_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill_json_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill_json_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m                                    \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
-       "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mhuggingface_repo_id\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mhuggingface_repo_id\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mhuggingface_skill_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mhuggingface_skill_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m                          \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
+       "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34mfrom\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcreator\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34mimport\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcreate\u001b[0m\u001b[48;2;39;40;34m                                                                                       \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
+       "\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m                                                                                                                 \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
+       "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcreate\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mrequest\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mmessages\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m.\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m.\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m.\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mfile_content\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mfile_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m            \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
+       "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill_json_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mhuggingface_repo_id\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mhuggingface_skill_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m                                  \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
        "\u001b[48;2;39;40;34m                                                                                                                   \u001b[0m\n",
        "\n",
        "\u001b[1;33m • \u001b[0m\u001b[1mParameters\u001b[0m:                                                                                                     \n",
        "\u001b[1;33m   \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mrequest\u001b[0m (string): String detailing the skill functionality.                                                  \n",
-       "\u001b[1;33m   \u001b[0m\u001b[1;33m   \u001b[0m\u001b[1;33m • \u001b[0mRequired: True                                                                                            \n",
        "\u001b[1;33m   \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mmessages\u001b[0m (array): Messages as a list of dictionaries.                                                        \n",
        "\u001b[1;33m   \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mmessages_json_path\u001b[0m (string): Path to a JSON file containing messages.                                        \n",
        "\u001b[1;33m   \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mfile_content\u001b[0m (string): String of file content.                                                               \n",
@@ -833,19 +947,19 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 18,
+   "execution_count": 15,
    "metadata": {},
    "outputs": [
     {
      "data": {
       "text/html": [
        "
\n",
-       "▌ saved to /Users/gongjunmin/.cache/open_creator/skill_library/create_api                                        \n",
+       "▌ saved to /Users/gongjunmin/.cache/open_creator/skill_library/create                                            \n",
        "
\n" ], "text/plain": [ "\n", - "\u001b[35m▌ \u001b[0m\u001b[35msaved to /Users/gongjunmin/.cache/open_creator/skill_library/create_api\u001b[0m\u001b[35m \u001b[0m\n" + "\u001b[35m▌ \u001b[0m\u001b[35msaved to /Users/gongjunmin/.cache/open_creator/skill_library/create\u001b[0m\u001b[35m \u001b[0m\n" ] }, "metadata": {}, @@ -862,7 +976,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 16, "metadata": {}, "outputs": [ { diff --git a/docs/examples/02_skills_library.ipynb b/docs/examples/02_skills_library.ipynb index f88ff36..7d3aec6 100644 --- a/docs/examples/02_skills_library.ipynb +++ b/docs/examples/02_skills_library.ipynb @@ -59,7 +59,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "d19fad8112b548a18fc1a1a0e05a246d", + "model_id": "0fad0f77899147108a9a42beeab4bfc9", "version_major": 2, "version_minor": 0 }, @@ -89,6 +89,23 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/b898dfdb-\n",
+       "c596-4538-b4c9-d88136c3fb4f?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=81787;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/b898dfdb-c596-4538-b4c9-d88136c3fb4f?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/b898dfdb-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=81787;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/b898dfdb-c596-4538-b4c9-d88136c3fb4f?poll=true\u001b\\\u001b[4;34mc596-4538-b4c9-d88136c3fb4f?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -106,20 +123,24 @@ "
\n",
        "                                                  Skill Details:                                                   \n",
        "\n",
-       "Name: create_api                                                                                                \n",
-       "Description: Generates a CodeSkill instance using different input sources.                                      \n",
+       "Name: create                                                                                                    \n",
+       "Description: This function generates a CodeSkill instance using different input sources. It can take in a       \n",
+       "   request string detailing the skill functionality, messages or a path to a JSON file containing messages, a      \n",
+       "   string of file content or path to a code/API doc file, a directory path with skill name as stem or file path    \n",
+       "   with skill.json as stem, an identifier for a Huggingface repository, or a path to the skill within the          \n",
+       "   Huggingface repository.                                                                                         \n",
        "Version: 1.0.0                                                                                                  \n",
        "Usage:                                                                                                          \n",
        "\n",
        "                                                                                                                   \n",
-       " create_api(request, messages=messages, messages_json_path=messages_json_path, file_content=file_content,          \n",
-       " file_path=file_path, skill_path=skill_path, skill_json_path=skill_json_path,                                      \n",
-       " huggingface_repo_id=huggingface_repo_id, huggingface_skill_path=huggingface_skill_path)                           \n",
+       " from creator import create                                                                                        \n",
+       "                                                                                                                   \n",
+       " skill = create(request='...', messages=[...], file_content='...', file_path='...', skill_path='...',              \n",
+       " skill_json_path='...', huggingface_repo_id='...', huggingface_skill_path='...')                                   \n",
        "                                                                                                                   \n",
        "\n",
        "Parameters:                                                                                                     \n",
        "request (string): String detailing the skill functionality.                                                  \n",
-       "Required: True                                                                                            \n",
        "messages (array): Messages as a list of dictionaries.                                                        \n",
        "messages_json_path (string): Path to a JSON file containing messages.                                        \n",
        "file_content (string): String of file content.                                                               \n",
@@ -136,20 +157,24 @@
        "\n",
        "                                                  \u001b[1;4mSkill Details:\u001b[0m                                                   \n",
        "\n",
-       "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: create_api                                                                                                \n",
-       "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: Generates a \u001b[1;36;40mCodeSkill\u001b[0m instance using different input sources.                                      \n",
+       "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: create                                                                                                    \n",
+       "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This function generates a \u001b[1;36;40mCodeSkill\u001b[0m instance using different input sources. It can take in a       \n",
+       "\u001b[1;33m   \u001b[0mrequest string detailing the skill functionality, messages or a path to a JSON file containing messages, a      \n",
+       "\u001b[1;33m   \u001b[0mstring of file content or path to a code/API doc file, a directory path with skill name as stem or file path    \n",
+       "\u001b[1;33m   \u001b[0mwith \u001b[1;36;40mskill.json\u001b[0m as stem, an identifier for a Huggingface repository, or a path to the skill within the          \n",
+       "\u001b[1;33m   \u001b[0mHuggingface repository.                                                                                         \n",
        "\u001b[1;33m • \u001b[0m\u001b[1mVersion\u001b[0m: 1.0.0                                                                                                  \n",
        "\u001b[1;33m • \u001b[0m\u001b[1mUsage\u001b[0m:                                                                                                          \n",
        "\n",
        "\u001b[48;2;39;40;34m                                                                                                                   \u001b[0m\n",
-       "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcreate_api\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mrequest\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mmessages\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mmessages\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mmessages_json_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mmessages_json_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mfile_content\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mfile_content\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m        \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
-       "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mfile_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mfile_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill_json_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill_json_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m                                    \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
-       "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mhuggingface_repo_id\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mhuggingface_repo_id\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mhuggingface_skill_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mhuggingface_skill_path\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m                          \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
+       "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34mfrom\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcreator\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34mimport\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcreate\u001b[0m\u001b[48;2;39;40;34m                                                                                       \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
+       "\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m                                                                                                                 \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
+       "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcreate\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mrequest\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mmessages\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m.\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m.\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m.\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mfile_content\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mfile_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m            \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
+       "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mskill_json_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mhuggingface_repo_id\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mhuggingface_skill_path\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m...\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m                                  \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n",
        "\u001b[48;2;39;40;34m                                                                                                                   \u001b[0m\n",
        "\n",
        "\u001b[1;33m • \u001b[0m\u001b[1mParameters\u001b[0m:                                                                                                     \n",
        "\u001b[1;33m   \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mrequest\u001b[0m (string): String detailing the skill functionality.                                                  \n",
-       "\u001b[1;33m   \u001b[0m\u001b[1;33m   \u001b[0m\u001b[1;33m • \u001b[0mRequired: True                                                                                            \n",
        "\u001b[1;33m   \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mmessages\u001b[0m (array): Messages as a list of dictionaries.                                                        \n",
        "\u001b[1;33m   \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mmessages_json_path\u001b[0m (string): Path to a JSON file containing messages.                                        \n",
        "\u001b[1;33m   \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mfile_content\u001b[0m (string): String of file content.                                                               \n",
@@ -179,12 +204,12 @@
      "data": {
       "text/html": [
        "
\n",
-       "▌ saved to /Users/gongjunmin/.cache/open_creator/skill_library/create_api                                        \n",
+       "▌ saved to /Users/gongjunmin/.cache/open_creator/skill_library/create                                            \n",
        "
\n" ], "text/plain": [ "\n", - "\u001b[35m▌ \u001b[0m\u001b[35msaved to /Users/gongjunmin/.cache/open_creator/skill_library/create_api\u001b[0m\u001b[35m \u001b[0m\n" + "\u001b[35m▌ \u001b[0m\u001b[35msaved to /Users/gongjunmin/.cache/open_creator/skill_library/create\u001b[0m\u001b[35m \u001b[0m\n" ] }, "metadata": {}, @@ -204,19 +229,19 @@ "name": "stdout", "output_type": "stream", "text": [ - "/Users/gongjunmin/.cache/open_creator/remote/ChuxiJ/skill_library/create_api\n" + "/Users/gongjunmin/.cache/open_creator/remote/ChuxiJ/skill_library/create\n" ] }, { "data": { "text/html": [ "
\n",
-       "▌ saved to /Users/gongjunmin/.cache/open_creator/skill_library/create_api                                        \n",
+       "▌ saved to /Users/gongjunmin/.cache/open_creator/skill_library/create                                            \n",
        "
\n" ], "text/plain": [ "\n", - "\u001b[35m▌ \u001b[0m\u001b[35msaved to /Users/gongjunmin/.cache/open_creator/skill_library/create_api\u001b[0m\u001b[35m \u001b[0m\n" + "\u001b[35m▌ \u001b[0m\u001b[35msaved to /Users/gongjunmin/.cache/open_creator/skill_library/create\u001b[0m\u001b[35m \u001b[0m\n" ] }, "metadata": {}, @@ -236,11 +261,12 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[1m\u001b[36mask_run_code_confirm\u001b[m\u001b[m \u001b[1m\u001b[36mextract_pdf_section\u001b[m\u001b[m \u001b[1m\u001b[36mlist_python_functions\u001b[m\u001b[m\n", - "\u001b[1m\u001b[36mcount_prime_numbers\u001b[m\u001b[m \u001b[1m\u001b[36mextract_section_from_pdf\u001b[m\u001b[m \u001b[1m\u001b[36msolve_24\u001b[m\u001b[m\n", - "\u001b[1m\u001b[36mcreate\u001b[m\u001b[m \u001b[1m\u001b[36mfilter_prime_numbers\u001b[m\u001b[m \u001b[1m\u001b[36msolve_game_of_24\u001b[m\u001b[m\n", - "\u001b[1m\u001b[36mcreate_api\u001b[m\u001b[m \u001b[1m\u001b[36mgame_of_24\u001b[m\u001b[m\n", - "\u001b[1m\u001b[36mdisplay_markdown_message\u001b[m\u001b[m \u001b[1m\u001b[36mgame_of_24_solver\u001b[m\u001b[m\n" + "\u001b[1m\u001b[36mask_run_code_confirm\u001b[m\u001b[m \u001b[1m\u001b[36mextract_pdf_section\u001b[m\u001b[m \u001b[1m\u001b[36msolve_24\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mcount_prime_numbers\u001b[m\u001b[m \u001b[1m\u001b[36mextract_section_from_pdf\u001b[m\u001b[m \u001b[1m\u001b[36msolve_game_of_24\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mcreate\u001b[m\u001b[m \u001b[1m\u001b[36mfilter_prime_numbers\u001b[m\u001b[m \u001b[1m\u001b[36msolve_quadratic_equation\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mcreate_api\u001b[m\u001b[m \u001b[1m\u001b[36mgame_of_24\u001b[m\u001b[m \u001b[1m\u001b[36msolve_random_maze\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mcreate_scatter_plot\u001b[m\u001b[m \u001b[1m\u001b[36mgame_of_24_solver\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mdisplay_markdown_message\u001b[m\u001b[m \u001b[1m\u001b[36mlist_python_functions\u001b[m\u001b[m\n" ] } ], @@ -265,12 +291,12 @@ "data": { "text/html": [ "
\n",
-       "▌ saved to /Users/gongjunmin/.cache/open_creator/remote/ChuxiJ/skill_library/create_api/create_api               \n",
+       "▌ saved to /Users/gongjunmin/.cache/open_creator/skill_library/create_api                                        \n",
        "
\n" ], "text/plain": [ "\n", - "\u001b[35m▌ \u001b[0m\u001b[35msaved to /Users/gongjunmin/.cache/open_creator/remote/ChuxiJ/skill_library/create_api/create_api\u001b[0m\u001b[35m \u001b[0m\n" + "\u001b[35m▌ \u001b[0m\u001b[35msaved to /Users/gongjunmin/.cache/open_creator/skill_library/create_api\u001b[0m\u001b[35m \u001b[0m\n" ] }, "metadata": {}, @@ -388,14 +414,12 @@ "data": { "text/html": [ "
\n",
-       "▌ saved to                                                                                                       \n",
-       "▌ /Users/gongjunmin/.cache/open_creator/remote/Sayoyo/skill-library/extract_pdf_section/extract_pdf_section      \n",
+       "▌ saved to /Users/gongjunmin/.cache/open_creator/skill_library/extract_pdf_section                               \n",
        "
\n" ], "text/plain": [ "\n", - "\u001b[35m▌ \u001b[0m\u001b[35msaved to \u001b[0m\u001b[35m \u001b[0m\n", - "\u001b[35m▌ \u001b[0m\u001b[35m/Users/gongjunmin/.cache/open_creator/remote/Sayoyo/skill-library/extract_pdf_section/extract_pdf_section\u001b[0m\u001b[35m \u001b[0m\n" + "\u001b[35m▌ \u001b[0m\u001b[35msaved to /Users/gongjunmin/.cache/open_creator/skill_library/extract_pdf_section\u001b[0m\u001b[35m \u001b[0m\n" ] }, "metadata": {}, diff --git a/docs/examples/03_skills_search.ipynb b/docs/examples/03_skills_search.ipynb index 72b2a7b..de5ffca 100644 --- a/docs/examples/03_skills_search.ipynb +++ b/docs/examples/03_skills_search.ipynb @@ -52,11 +52,12 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[1m\u001b[36mask_run_code_confirm\u001b[m\u001b[m \u001b[1m\u001b[36mextract_pdf_section\u001b[m\u001b[m \u001b[1m\u001b[36mlist_python_functions\u001b[m\u001b[m\n", - "\u001b[1m\u001b[36mcount_prime_numbers\u001b[m\u001b[m \u001b[1m\u001b[36mextract_section_from_pdf\u001b[m\u001b[m \u001b[1m\u001b[36msolve_24\u001b[m\u001b[m\n", - "\u001b[1m\u001b[36mcreate\u001b[m\u001b[m \u001b[1m\u001b[36mfilter_prime_numbers\u001b[m\u001b[m \u001b[1m\u001b[36msolve_game_of_24\u001b[m\u001b[m\n", - "\u001b[1m\u001b[36mcreate_api\u001b[m\u001b[m \u001b[1m\u001b[36mgame_of_24\u001b[m\u001b[m\n", - "\u001b[1m\u001b[36mdisplay_markdown_message\u001b[m\u001b[m \u001b[1m\u001b[36mgame_of_24_solver\u001b[m\u001b[m\n" + "\u001b[1m\u001b[36mask_run_code_confirm\u001b[m\u001b[m \u001b[1m\u001b[36mextract_pdf_section\u001b[m\u001b[m \u001b[1m\u001b[36msolve_24\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mcount_prime_numbers\u001b[m\u001b[m \u001b[1m\u001b[36mextract_section_from_pdf\u001b[m\u001b[m \u001b[1m\u001b[36msolve_game_of_24\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mcreate\u001b[m\u001b[m \u001b[1m\u001b[36mfilter_prime_numbers\u001b[m\u001b[m \u001b[1m\u001b[36msolve_quadratic_equation\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mcreate_api\u001b[m\u001b[m \u001b[1m\u001b[36mgame_of_24\u001b[m\u001b[m \u001b[1m\u001b[36msolve_random_maze\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mcreate_scatter_plot\u001b[m\u001b[m \u001b[1m\u001b[36mgame_of_24_solver\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mdisplay_markdown_message\u001b[m\u001b[m \u001b[1m\u001b[36mlist_python_functions\u001b[m\u001b[m\n" ] } ], diff --git a/docs/examples/04_skills_run.ipynb b/docs/examples/04_skills_run.ipynb index a4d26d0..0a6aff5 100644 --- a/docs/examples/04_skills_run.ipynb +++ b/docs/examples/04_skills_run.ipynb @@ -38,7 +38,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "../docs/tech_report/open-creator.pdf\n" + "ls: ./tech_report/open-creator.pdf: No such file or directory\n" ] } ], @@ -48,9 +48,24 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 3, "metadata": {}, "outputs": [ + { + "data": { + "text/html": [ + "
\n",
+       "▌ loading vector database...                                                                                     \n",
+       "
\n" + ], + "text/plain": [ + "\n", + "\u001b[35m▌ \u001b[0m\u001b[35mloading vector database...\u001b[0m\u001b[35m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -118,7 +133,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -184,7 +199,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -211,7 +226,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -225,7 +240,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -234,7 +249,7 @@ "'./data/open-creator2-5.pdf'" ] }, - "execution_count": 10, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } @@ -252,19 +267,19 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n",
-       "▌ loading vector database...                                                                                     \n",
+       "▌ Installing dependencies                                                                                        \n",
        "
\n" ], "text/plain": [ "\n", - "\u001b[35m▌ \u001b[0m\u001b[35mloading vector database...\u001b[0m\u001b[35m \u001b[0m\n" + "\u001b[35m▌ \u001b[0m\u001b[35mInstalling dependencies\u001b[0m\u001b[35m \u001b[0m\n" ] }, "metadata": {}, @@ -273,19 +288,15 @@ { "data": { "text/html": [ - "
{\n",
-       "  \"status\": \"success\",\n",
-       "  \"stdout\": \"\",\n",
-       "  \"stderr\": \"\"\n",
-       "}\n",
+       "
                                                                                                                   \n",
+       " pip show PyPDF2 || pip install \"PyPDF2>=1.26.0\"                                                                   \n",
+       "                                                                                                                   \n",
        "
\n" ], "text/plain": [ - "\u001b[1m{\u001b[0m\n", - " \u001b[1;34m\"status\"\u001b[0m: \u001b[32m\"success\"\u001b[0m,\n", - " \u001b[1;34m\"stdout\"\u001b[0m: \u001b[32m\"\"\u001b[0m,\n", - " \u001b[1;34m\"stderr\"\u001b[0m: \u001b[32m\"\"\u001b[0m\n", - "\u001b[1m}\u001b[0m\n" + "\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpip\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mshow\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mPyPDF2\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m||\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpip\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34minstall\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"PyPDF2>=1.26.0\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\n" ] }, "metadata": {}, @@ -293,13 +304,22 @@ }, { "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "decbe223ddf0462b99a8819c2891d198", - "version_major": 2, - "version_minor": 0 - }, + "text/html": [ + "
\n",
+       "▌ Install dependencies result: {'status': 'success', 'stdout': 'Name: PyPDF2\\nVersion: 3.0.1\\nSummary: A         \n",
+       "▌ pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files\\nHome-page:        \n",
+       "▌ \\nAuthor: \\nAuthor-email: Mathieu Fenniak biziqe@mathieu.fenniak.net\\nLicense: \\nLocation:                     \n",
+       "▌ /Users/gongjunmin/miniconda3/envs/open_creator_online/lib/python3.10/site-packages\\nRequires: \\nRequired-by:   \n",
+       "▌ \\n', 'stderr': ''}                                                                                             \n",
+       "
\n" + ], "text/plain": [ - "Output()" + "\n", + "\u001b[35m▌ \u001b[0m\u001b[35mInstall dependencies result: {'status': 'success', 'stdout': 'Name: PyPDF2\\nVersion: 3.0.1\\nSummary: A \u001b[0m\u001b[35m \u001b[0m\n", + "\u001b[35m▌ \u001b[0m\u001b[35mpure-python PDF library capable of splitting, merging, cropping, and transforming PDF files\\nHome-page: \u001b[0m\u001b[35m \u001b[0m\n", + "\u001b[35m▌ \u001b[0m\u001b[35m\\nAuthor: \\nAuthor-email: Mathieu Fenniak \u001b[0m\u001b]8;id=463524;mailto:biziqe@mathieu.fenniak.net\u001b\\\u001b[4;34mbiziqe@mathieu.fenniak.net\u001b[0m\u001b]8;;\u001b\\\u001b[35m\\nLicense: \\nLocation: \u001b[0m\u001b[35m \u001b[0m\n", + "\u001b[35m▌ \u001b[0m\u001b[35m/Users/gongjunmin/miniconda3/envs/open_creator_online/lib/python3.10/site-packages\\nRequires: \\nRequired-by: \u001b[0m\u001b[35m \u001b[0m\n", + "\u001b[35m▌ \u001b[0m\u001b[35m\\n', 'stderr': ''}\u001b[0m\u001b[35m \u001b[0m\n" ] }, "metadata": {}, @@ -308,7 +328,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "6b9f6a7914a541268042529481a0af39", + "model_id": "3b2d13db34d543c29e28eca4539ff644", "version_major": 2, "version_minor": 0 }, @@ -342,7 +362,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "1b5d53466fbf4132938cb62f06f948ee", + "model_id": "72d9c1b7db8449c899744345865925f4", "version_major": 2, "version_minor": 0 }, @@ -373,6 +393,23 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/8e9f879c-\n",
+       "1112-4a54-aa62-e1430ccddad6?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=340957;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/8e9f879c-1112-4a54-aa62-e1430ccddad6?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/8e9f879c-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=340957;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/8e9f879c-1112-4a54-aa62-e1430ccddad6?poll=true\u001b\\\u001b[4;34m1112-4a54-aa62-e1430ccddad6?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -397,30 +434,29 @@ " },\n", " {\n", " \"role\": \"user\",\n", - " \"content\": \"{\\\"pdf_path\\\": \\\"../docs/tech_report/open-creator.pdf\\\", \\\"start_page\\\": 2, \\\"end_page\\\": 5, \n", + " \"content\": \"{\\\"pdf_path\\\": \\\"../tech_report/open-creator.pdf\\\", \\\"start_page\\\": 2, \\\"end_page\\\": 5, \n", "\\\"output_path\\\": \\\"./data/open-creator2-5.pdf\\\"}\"\n", " },\n", " {\n", " \"role\": \"assistant\",\n", - " \"content\": \"To extract pages 2 to 5 from the PDF file \\\"../docs/tech_report/open-creator.pdf\\\" and save them as\n", - "a new file at \\\"./data/open-creator2-5.pdf\\\", I will use the `extract_pdf_section` function.\",\n", + " \"content\": null,\n", " \"function_call\": {\n", " \"name\": \"run_code\",\n", " \"arguments\": \"{\\\"language\\\": \\\"python\\\", \\\"code\\\": \\\"{\\\\\\\"language\\\\\\\": \\\\\\\"python\\\\\\\", \\\\\\\"code\\\\\\\": \n", - "\\\\\\\"extract_pdf_section('../docs/tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\\\\\"}\\\"}\"\n", + "\\\\\\\"extract_pdf_section('../tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\\\\\"}\\\"}\"\n", " }\n", " },\n", " {\n", " \"role\": \"function\",\n", " \"content\": \"{\\\"status\\\": \\\"success\\\", \\\"stdout\\\": \\\"{'language': 'python', 'code': \n", - "\\\\\\\"extract_pdf_section('../docs/tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\\\\\"}\\\\n\\\", \n", - "\\\"stderr\\\": \\\"\\\"}\",\n", + "\\\\\\\"extract_pdf_section('../tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\\\\\"}\\\", \\\"stderr\\\": \n", + "\\\"\\\"}\",\n", " \"name\": \"run_code\"\n", " },\n", " {\n", " \"role\": \"assistant\",\n", - " \"content\": \"The pages 2 to 5 have been extracted from the PDF file and saved as a new file at \n", - "\\\"./data/open-creator2-5.pdf\\\".\"\n", + " \"content\": \"The code has been executed successfully. The PDF section has been extracted and saved to the \n", + "specified output path. You can now access the extracted section from the output file.\"\n", " }\n", "]\n", "
\n" @@ -447,30 +483,29 @@ " \u001b[1m}\u001b[0m,\n", " \u001b[1m{\u001b[0m\n", " \u001b[1;34m\"role\"\u001b[0m: \u001b[32m\"user\"\u001b[0m,\n", - " \u001b[1;34m\"content\"\u001b[0m: \u001b[32m\"{\\\"pdf_path\\\": \\\"../docs/tech_report/open-creator.pdf\\\", \\\"start_page\\\": 2, \\\"end_page\\\": 5, \u001b[0m\n", + " \u001b[1;34m\"content\"\u001b[0m: \u001b[32m\"{\\\"pdf_path\\\": \\\"../tech_report/open-creator.pdf\\\", \\\"start_page\\\": 2, \\\"end_page\\\": 5, \u001b[0m\n", "\u001b[32m\\\"output_path\\\": \\\"./data/open-creator2-5.pdf\\\"}\"\u001b[0m\n", " \u001b[1m}\u001b[0m,\n", " \u001b[1m{\u001b[0m\n", " \u001b[1;34m\"role\"\u001b[0m: \u001b[32m\"assistant\"\u001b[0m,\n", - " \u001b[1;34m\"content\"\u001b[0m: \u001b[32m\"To extract pages 2 to 5 from the PDF file \\\"../docs/tech_report/open-creator.pdf\\\" and save them as\u001b[0m\n", - "\u001b[32ma new file at \\\"./data/open-creator2-5.pdf\\\", I will use the `extract_pdf_section` function.\"\u001b[0m,\n", + " \u001b[1;34m\"content\"\u001b[0m: \u001b[3;35mnull\u001b[0m,\n", " \u001b[1;34m\"function_call\"\u001b[0m: \u001b[1m{\u001b[0m\n", " \u001b[1;34m\"name\"\u001b[0m: \u001b[32m\"run_code\"\u001b[0m,\n", " \u001b[1;34m\"arguments\"\u001b[0m: \u001b[32m\"{\\\"language\\\": \\\"python\\\", \\\"code\\\": \\\"{\\\\\\\"language\\\\\\\": \\\\\\\"python\\\\\\\", \\\\\\\"code\\\\\\\": \u001b[0m\n", - "\u001b[32m\\\\\\\"extract_pdf_section('../docs/tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\\\\\"}\\\"}\"\u001b[0m\n", + "\u001b[32m\\\\\\\"extract_pdf_section('../tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\\\\\"}\\\"}\"\u001b[0m\n", " \u001b[1m}\u001b[0m\n", " \u001b[1m}\u001b[0m,\n", " \u001b[1m{\u001b[0m\n", " \u001b[1;34m\"role\"\u001b[0m: \u001b[32m\"function\"\u001b[0m,\n", " \u001b[1;34m\"content\"\u001b[0m: \u001b[32m\"{\\\"status\\\": \\\"success\\\", \\\"stdout\\\": \\\"{'language': 'python', 'code': \u001b[0m\n", - "\u001b[32m\\\\\\\"extract_pdf_section('../docs/tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\\\\\"}\\\\n\\\", \u001b[0m\n", - "\u001b[32m\\\"stderr\\\": \\\"\\\"}\"\u001b[0m,\n", + "\u001b[32m\\\\\\\"extract_pdf_section('../tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\\\\\"}\\\", \\\"stderr\\\": \u001b[0m\n", + "\u001b[32m\\\"\\\"}\"\u001b[0m,\n", " \u001b[1;34m\"name\"\u001b[0m: \u001b[32m\"run_code\"\u001b[0m\n", " \u001b[1m}\u001b[0m,\n", " \u001b[1m{\u001b[0m\n", " \u001b[1;34m\"role\"\u001b[0m: \u001b[32m\"assistant\"\u001b[0m,\n", - " \u001b[1;34m\"content\"\u001b[0m: \u001b[32m\"The pages 2 to 5 have been extracted from the PDF file and saved as a new file at \u001b[0m\n", - "\u001b[32m\\\"./data/open-creator2-5.pdf\\\".\"\u001b[0m\n", + " \u001b[1;34m\"content\"\u001b[0m: \u001b[32m\"The code has been executed successfully. The PDF section has been extracted and saved to the \u001b[0m\n", + "\u001b[32mspecified output path. You can now access the extracted section from the output file.\"\u001b[0m\n", " \u001b[1m}\u001b[0m\n", "\u001b[1m]\u001b[0m\n" ] @@ -503,25 +538,59 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/html": [ - "
{\n",
-       "  \"status\": \"success\",\n",
-       "  \"stdout\": \"\",\n",
-       "  \"stderr\": \"\"\n",
-       "}\n",
+       "
\n",
+       "▌ Installing dependencies                                                                                        \n",
+       "
\n" + ], + "text/plain": [ + "\n", + "\u001b[35m▌ \u001b[0m\u001b[35mInstalling dependencies\u001b[0m\u001b[35m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
                                                                                                                   \n",
+       " pip show PyPDF2 || pip install \"PyPDF2>=1.26.0\"                                                                   \n",
+       "                                                                                                                   \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpip\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mshow\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mPyPDF2\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m||\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpip\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34minstall\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"PyPDF2>=1.26.0\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "▌ Install dependencies result: {'status': 'success', 'stdout': 'Name: PyPDF2\\nVersion: 3.0.1\\nSummary: A         \n",
+       "▌ pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files\\nHome-page:        \n",
+       "▌ \\nAuthor: \\nAuthor-email: Mathieu Fenniak biziqe@mathieu.fenniak.net\\nLicense: \\nLocation:                     \n",
+       "▌ /Users/gongjunmin/miniconda3/envs/open_creator_online/lib/python3.10/site-packages\\nRequires: \\nRequired-by:   \n",
+       "▌ \\n', 'stderr': ''}                                                                                             \n",
        "
\n" ], "text/plain": [ - "\u001b[1m{\u001b[0m\n", - " \u001b[1;34m\"status\"\u001b[0m: \u001b[32m\"success\"\u001b[0m,\n", - " \u001b[1;34m\"stdout\"\u001b[0m: \u001b[32m\"\"\u001b[0m,\n", - " \u001b[1;34m\"stderr\"\u001b[0m: \u001b[32m\"\"\u001b[0m\n", - "\u001b[1m}\u001b[0m\n" + "\n", + "\u001b[35m▌ \u001b[0m\u001b[35mInstall dependencies result: {'status': 'success', 'stdout': 'Name: PyPDF2\\nVersion: 3.0.1\\nSummary: A \u001b[0m\u001b[35m \u001b[0m\n", + "\u001b[35m▌ \u001b[0m\u001b[35mpure-python PDF library capable of splitting, merging, cropping, and transforming PDF files\\nHome-page: \u001b[0m\u001b[35m \u001b[0m\n", + "\u001b[35m▌ \u001b[0m\u001b[35m\\nAuthor: \\nAuthor-email: Mathieu Fenniak \u001b[0m\u001b]8;id=265924;mailto:biziqe@mathieu.fenniak.net\u001b\\\u001b[4;34mbiziqe@mathieu.fenniak.net\u001b[0m\u001b]8;;\u001b\\\u001b[35m\\nLicense: \\nLocation: \u001b[0m\u001b[35m \u001b[0m\n", + "\u001b[35m▌ \u001b[0m\u001b[35m/Users/gongjunmin/miniconda3/envs/open_creator_online/lib/python3.10/site-packages\\nRequires: \\nRequired-by: \u001b[0m\u001b[35m \u001b[0m\n", + "\u001b[35m▌ \u001b[0m\u001b[35m\\n', 'stderr': ''}\u001b[0m\u001b[35m \u001b[0m\n" ] }, "metadata": {}, @@ -530,7 +599,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "fc4e1f4dd91642c5aef19c8022ffb169", + "model_id": "42950aa643bc4ef68fa89b28c76938b0", "version_major": 2, "version_minor": 0 }, @@ -564,7 +633,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "8f0cd6de709744018cf3f00241094e0f", + "model_id": "89e7e660999d4305bc57ae5575a1c250", "version_major": 2, "version_minor": 0 }, @@ -595,6 +664,23 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/6d48421f-\n",
+       "b99e-41f3-b794-14d4a553fd1b?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=124701;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/6d48421f-b99e-41f3-b794-14d4a553fd1b?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/6d48421f-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=124701;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/6d48421f-b99e-41f3-b794-14d4a553fd1b?poll=true\u001b\\\u001b[4;34mb99e-41f3-b794-14d4a553fd1b?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -619,7 +705,7 @@ " },\n", " {\n", " \"role\": \"user\",\n", - " \"content\": \"extract 2-5 pages section from pdf path '../docs/tech_report/open-creator.pdf' to \n", + " \"content\": \"extract 2-5 pages section from pdf path '../tech_report/open-creator.pdf' to \n", "'./data/open-creator2-5.pdf'\"\n", " },\n", " {\n", @@ -627,21 +713,19 @@ " \"content\": null,\n", " \"function_call\": {\n", " \"name\": \"run_code\",\n", - " \"arguments\": \"{\\\"language\\\": \\\"python\\\", \\\"code\\\": \\\"{\\\\\\\"language\\\\\\\": \\\\\\\"python\\\\\\\", \\\\\\\"code\\\\\\\": \n", - "\\\\\\\"extract_pdf_section('../docs/tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\\\\\"}\\\"}\"\n", + " \"arguments\": \"{\\n\\\"language\\\": \\\"python\\\", \\n\\\"code\\\": \n", + "\\\"extract_pdf_section('../tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\"\\n}\"\n", " }\n", " },\n", " {\n", " \"role\": \"function\",\n", - " \"content\": \"{\\\"status\\\": \\\"success\\\", \\\"stdout\\\": \\\"{'language': 'python', 'code': \n", - "\\\\\\\"extract_pdf_section('../docs/tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\\\\\"}\\\\n\\\", \n", - "\\\"stderr\\\": \\\"\\\"}\",\n", + " \"content\": \"{\\\"status\\\": \\\"success\\\", \\\"stdout\\\": \\\"./data/open-creator2-5.pdf\\\", \\\"stderr\\\": \\\"\\\"}\",\n", " \"name\": \"run_code\"\n", " },\n", " {\n", " \"role\": \"assistant\",\n", - " \"content\": \"The PDF section has been extracted successfully. You can download the extracted section from \n", - "[here](sandbox:/Users/gongjunmin/LLM/open_creator_dev/open-creator/examples/data/open-creator2-5.pdf).\"\n", + " \"content\": \"The extraction of pages 2-5 from the PDF at '../tech_report/open-creator.pdf' has been successful. \n", + "The extracted section has been saved to './data/open-creator2-5.pdf'.\"\n", " }\n", "]\n", "
\n" @@ -668,7 +752,7 @@ " \u001b[1m}\u001b[0m,\n", " \u001b[1m{\u001b[0m\n", " \u001b[1;34m\"role\"\u001b[0m: \u001b[32m\"user\"\u001b[0m,\n", - " \u001b[1;34m\"content\"\u001b[0m: \u001b[32m\"extract 2-5 pages section from pdf path '../docs/tech_report/open-creator.pdf' to \u001b[0m\n", + " \u001b[1;34m\"content\"\u001b[0m: \u001b[32m\"extract 2-5 pages section from pdf path '../tech_report/open-creator.pdf' to \u001b[0m\n", "\u001b[32m'./data/open-creator2-5.pdf'\"\u001b[0m\n", " \u001b[1m}\u001b[0m,\n", " \u001b[1m{\u001b[0m\n", @@ -676,21 +760,19 @@ " \u001b[1;34m\"content\"\u001b[0m: \u001b[3;35mnull\u001b[0m,\n", " \u001b[1;34m\"function_call\"\u001b[0m: \u001b[1m{\u001b[0m\n", " \u001b[1;34m\"name\"\u001b[0m: \u001b[32m\"run_code\"\u001b[0m,\n", - " \u001b[1;34m\"arguments\"\u001b[0m: \u001b[32m\"{\\\"language\\\": \\\"python\\\", \\\"code\\\": \\\"{\\\\\\\"language\\\\\\\": \\\\\\\"python\\\\\\\", \\\\\\\"code\\\\\\\": \u001b[0m\n", - "\u001b[32m\\\\\\\"extract_pdf_section('../docs/tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\\\\\"}\\\"}\"\u001b[0m\n", + " \u001b[1;34m\"arguments\"\u001b[0m: \u001b[32m\"{\\n\\\"language\\\": \\\"python\\\", \\n\\\"code\\\": \u001b[0m\n", + "\u001b[32m\\\"extract_pdf_section('../tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\"\\n}\"\u001b[0m\n", " \u001b[1m}\u001b[0m\n", " \u001b[1m}\u001b[0m,\n", " \u001b[1m{\u001b[0m\n", " \u001b[1;34m\"role\"\u001b[0m: \u001b[32m\"function\"\u001b[0m,\n", - " \u001b[1;34m\"content\"\u001b[0m: \u001b[32m\"{\\\"status\\\": \\\"success\\\", \\\"stdout\\\": \\\"{'language': 'python', 'code': \u001b[0m\n", - "\u001b[32m\\\\\\\"extract_pdf_section('../docs/tech_report/open-creator.pdf', 2, 5, './data/open-creator2-5.pdf')\\\\\\\"}\\\\n\\\", \u001b[0m\n", - "\u001b[32m\\\"stderr\\\": \\\"\\\"}\"\u001b[0m,\n", + " \u001b[1;34m\"content\"\u001b[0m: \u001b[32m\"{\\\"status\\\": \\\"success\\\", \\\"stdout\\\": \\\"./data/open-creator2-5.pdf\\\", \\\"stderr\\\": \\\"\\\"}\"\u001b[0m,\n", " \u001b[1;34m\"name\"\u001b[0m: \u001b[32m\"run_code\"\u001b[0m\n", " \u001b[1m}\u001b[0m,\n", " \u001b[1m{\u001b[0m\n", " \u001b[1;34m\"role\"\u001b[0m: \u001b[32m\"assistant\"\u001b[0m,\n", - " \u001b[1;34m\"content\"\u001b[0m: \u001b[32m\"The PDF section has been extracted successfully. You can download the extracted section from \u001b[0m\n", - "\u001b[32m[here](sandbox:/Users/gongjunmin/LLM/open_creator_dev/open-creator/examples/data/open-creator2-5.pdf).\"\u001b[0m\n", + " \u001b[1;34m\"content\"\u001b[0m: \u001b[32m\"The extraction of pages 2-5 from the PDF at '../tech_report/open-creator.pdf' has been successful. \u001b[0m\n", + "\u001b[32mThe extracted section has been saved to './data/open-creator2-5.pdf'.\"\u001b[0m\n", " \u001b[1m}\u001b[0m\n", "\u001b[1m]\u001b[0m\n" ] @@ -703,7 +785,7 @@ "skills = search(\"pdf extract section\")\n", "if skills:\n", " skill = skills[0]\n", - " request = \"extract 2-5 pages section from pdf path '../docs/tech_report/open-creator.pdf' to './data/open-creator2-5.pdf'\"\n", + " request = \"extract 2-5 pages section from pdf path '../tech_report/open-creator.pdf' to './data/open-creator2-5.pdf'\"\n", " messages = skill.run(request)\n", " print(messages, print_type=\"json\")" ] diff --git a/docs/examples/05_skills_test.ipynb b/docs/examples/05_skills_test.ipynb index 8429294..aae434c 100644 --- a/docs/examples/05_skills_test.ipynb +++ b/docs/examples/05_skills_test.ipynb @@ -29,11 +29,12 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[1m\u001b[36mask_run_code_confirm\u001b[m\u001b[m \u001b[1m\u001b[36mextract_pdf_section\u001b[m\u001b[m \u001b[1m\u001b[36mlist_python_functions\u001b[m\u001b[m\n", - "\u001b[1m\u001b[36mcount_prime_numbers\u001b[m\u001b[m \u001b[1m\u001b[36mextract_section_from_pdf\u001b[m\u001b[m \u001b[1m\u001b[36msolve_24\u001b[m\u001b[m\n", - "\u001b[1m\u001b[36mcreate\u001b[m\u001b[m \u001b[1m\u001b[36mfilter_prime_numbers\u001b[m\u001b[m \u001b[1m\u001b[36msolve_game_of_24\u001b[m\u001b[m\n", - "\u001b[1m\u001b[36mcreate_api\u001b[m\u001b[m \u001b[1m\u001b[36mgame_of_24\u001b[m\u001b[m\n", - "\u001b[1m\u001b[36mdisplay_markdown_message\u001b[m\u001b[m \u001b[1m\u001b[36mgame_of_24_solver\u001b[m\u001b[m\n" + "\u001b[1m\u001b[36mask_run_code_confirm\u001b[m\u001b[m \u001b[1m\u001b[36mextract_pdf_section\u001b[m\u001b[m \u001b[1m\u001b[36msolve_24\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mcount_prime_numbers\u001b[m\u001b[m \u001b[1m\u001b[36mextract_section_from_pdf\u001b[m\u001b[m \u001b[1m\u001b[36msolve_game_of_24\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mcreate\u001b[m\u001b[m \u001b[1m\u001b[36mfilter_prime_numbers\u001b[m\u001b[m \u001b[1m\u001b[36msolve_quadratic_equation\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mcreate_api\u001b[m\u001b[m \u001b[1m\u001b[36mgame_of_24\u001b[m\u001b[m \u001b[1m\u001b[36msolve_random_maze\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mcreate_scatter_plot\u001b[m\u001b[m \u001b[1m\u001b[36mgame_of_24_solver\u001b[m\u001b[m\n", + "\u001b[1m\u001b[36mdisplay_markdown_message\u001b[m\u001b[m \u001b[1m\u001b[36mlist_python_functions\u001b[m\u001b[m\n" ] } ], @@ -113,13 +114,13 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "b6ada793fc744a30934de8526909bab7", + "model_id": "747b76bb1aba4f8c930caeb9a40797de", "version_major": 2, "version_minor": 0 }, @@ -133,7 +134,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "b612d423a0b149aa950a0a0d0dd70aa6", + "model_id": "3c838524ec9042ce85d51eca088af2f4", "version_major": 2, "version_minor": 0 }, @@ -167,7 +168,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "25fcc1f76d314cce9a7ad35776b64989", + "model_id": "edfeec012c7d42d9b2d69cf9252f2fba", "version_major": 2, "version_minor": 0 }, @@ -197,6 +198,23 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/2bff98db-\n",
+       "7d63-4608-a487-ea7ec12f05a2?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=883877;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/2bff98db-7d63-4608-a487-ea7ec12f05a2?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/2bff98db-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=883877;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/2bff98db-7d63-4608-a487-ea7ec12f05a2?poll=true\u001b\\\u001b[4;34m7d63-4608-a487-ea7ec12f05a2?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -206,7 +224,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -217,46 +235,46 @@ "\n", " Test Case 0 \n", "\n", - "Test Input: (2, 2) \n", - "Run Command: filter_prime_numbers(2, 2) \n", - "Expected Result: 1 \n", - "Actual Result: 1 \n", + "Test Input: (2, 10) \n", + "Run Command: filter_prime_numbers(2, 10) \n", + "Expected Result: 4 \n", + "Actual Result: 4 \n", "Is Passed: Yes \n", "\n", "───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n", " Test Case 1 \n", "\n", - "Test Input: (2, 3) \n", - "Run Command: filter_prime_numbers(2, 3) \n", - "Expected Result: 2 \n", - "Actual Result: 2 \n", + "Test Input: (11, 20) \n", + "Run Command: filter_prime_numbers(11, 20) \n", + "Expected Result: 4 \n", + "Actual Result: 4 \n", "Is Passed: Yes \n", "\n", "───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n", " Test Case 2 \n", "\n", - "Test Input: (2, 10) \n", - "Run Command: filter_prime_numbers(2, 10) \n", - "Expected Result: 4 \n", - "Actual Result: 4 \n", + "Test Input: (21, 30) \n", + "Run Command: filter_prime_numbers(21, 30) \n", + "Expected Result: 2 \n", + "Actual Result: 2 \n", "Is Passed: Yes \n", "\n", "───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n", " Test Case 3 \n", "\n", - "Test Input: (2, 20) \n", - "Run Command: filter_prime_numbers(2, 20) \n", - "Expected Result: 8 \n", - "Actual Result: 8 \n", + "Test Input: (31, 40) \n", + "Run Command: filter_prime_numbers(31, 40) \n", + "Expected Result: 2 \n", + "Actual Result: 2 \n", "Is Passed: Yes \n", "\n", "───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n", " Test Case 4 \n", "\n", - "Test Input: (2, 100) \n", - "Run Command: filter_prime_numbers(2, 100) \n", - "Expected Result: 25 \n", - "Actual Result: 25 \n", + "Test Input: (41, 50) \n", + "Run Command: filter_prime_numbers(41, 50) \n", + "Expected Result: 3 \n", + "Actual Result: 3 \n", "Is Passed: Yes \n", "\n", "───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n", @@ -268,46 +286,46 @@ "\n", " \u001b[1mTest Case 0\u001b[0m \n", "\n", - "\u001b[1;33m • \u001b[0m\u001b[1mTest Input:\u001b[0m (2, 2) \n", - "\u001b[1;33m • \u001b[0m\u001b[1mRun Command:\u001b[0m filter_prime_numbers(2, 2) \n", - "\u001b[1;33m • \u001b[0m\u001b[1mExpected Result:\u001b[0m 1 \n", - "\u001b[1;33m • \u001b[0m\u001b[1mActual Result:\u001b[0m 1 \n", + "\u001b[1;33m • \u001b[0m\u001b[1mTest Input:\u001b[0m (2, 10) \n", + "\u001b[1;33m • \u001b[0m\u001b[1mRun Command:\u001b[0m filter_prime_numbers(2, 10) \n", + "\u001b[1;33m • \u001b[0m\u001b[1mExpected Result:\u001b[0m 4 \n", + "\u001b[1;33m • \u001b[0m\u001b[1mActual Result:\u001b[0m 4 \n", "\u001b[1;33m • \u001b[0m\u001b[1mIs Passed:\u001b[0m Yes \n", "\n", "\u001b[33m───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\u001b[0m\n", " \u001b[1mTest Case 1\u001b[0m \n", "\n", - "\u001b[1;33m • \u001b[0m\u001b[1mTest Input:\u001b[0m (2, 3) \n", - "\u001b[1;33m • \u001b[0m\u001b[1mRun Command:\u001b[0m filter_prime_numbers(2, 3) \n", - "\u001b[1;33m • \u001b[0m\u001b[1mExpected Result:\u001b[0m 2 \n", - "\u001b[1;33m • \u001b[0m\u001b[1mActual Result:\u001b[0m 2 \n", + "\u001b[1;33m • \u001b[0m\u001b[1mTest Input:\u001b[0m (11, 20) \n", + "\u001b[1;33m • \u001b[0m\u001b[1mRun Command:\u001b[0m filter_prime_numbers(11, 20) \n", + "\u001b[1;33m • \u001b[0m\u001b[1mExpected Result:\u001b[0m 4 \n", + "\u001b[1;33m • \u001b[0m\u001b[1mActual Result:\u001b[0m 4 \n", "\u001b[1;33m • \u001b[0m\u001b[1mIs Passed:\u001b[0m Yes \n", "\n", "\u001b[33m───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\u001b[0m\n", " \u001b[1mTest Case 2\u001b[0m \n", "\n", - "\u001b[1;33m • \u001b[0m\u001b[1mTest Input:\u001b[0m (2, 10) \n", - "\u001b[1;33m • \u001b[0m\u001b[1mRun Command:\u001b[0m filter_prime_numbers(2, 10) \n", - "\u001b[1;33m • \u001b[0m\u001b[1mExpected Result:\u001b[0m 4 \n", - "\u001b[1;33m • \u001b[0m\u001b[1mActual Result:\u001b[0m 4 \n", + "\u001b[1;33m • \u001b[0m\u001b[1mTest Input:\u001b[0m (21, 30) \n", + "\u001b[1;33m • \u001b[0m\u001b[1mRun Command:\u001b[0m filter_prime_numbers(21, 30) \n", + "\u001b[1;33m • \u001b[0m\u001b[1mExpected Result:\u001b[0m 2 \n", + "\u001b[1;33m • \u001b[0m\u001b[1mActual Result:\u001b[0m 2 \n", "\u001b[1;33m • \u001b[0m\u001b[1mIs Passed:\u001b[0m Yes \n", "\n", "\u001b[33m───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\u001b[0m\n", " \u001b[1mTest Case 3\u001b[0m \n", "\n", - "\u001b[1;33m • \u001b[0m\u001b[1mTest Input:\u001b[0m (2, 20) \n", - "\u001b[1;33m • \u001b[0m\u001b[1mRun Command:\u001b[0m filter_prime_numbers(2, 20) \n", - "\u001b[1;33m • \u001b[0m\u001b[1mExpected Result:\u001b[0m 8 \n", - "\u001b[1;33m • \u001b[0m\u001b[1mActual Result:\u001b[0m 8 \n", + "\u001b[1;33m • \u001b[0m\u001b[1mTest Input:\u001b[0m (31, 40) \n", + "\u001b[1;33m • \u001b[0m\u001b[1mRun Command:\u001b[0m filter_prime_numbers(31, 40) \n", + "\u001b[1;33m • \u001b[0m\u001b[1mExpected Result:\u001b[0m 2 \n", + "\u001b[1;33m • \u001b[0m\u001b[1mActual Result:\u001b[0m 2 \n", "\u001b[1;33m • \u001b[0m\u001b[1mIs Passed:\u001b[0m Yes \n", "\n", "\u001b[33m───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\u001b[0m\n", " \u001b[1mTest Case 4\u001b[0m \n", "\n", - "\u001b[1;33m • \u001b[0m\u001b[1mTest Input:\u001b[0m (2, 100) \n", - "\u001b[1;33m • \u001b[0m\u001b[1mRun Command:\u001b[0m filter_prime_numbers(2, 100) \n", - "\u001b[1;33m • \u001b[0m\u001b[1mExpected Result:\u001b[0m 25 \n", - "\u001b[1;33m • \u001b[0m\u001b[1mActual Result:\u001b[0m 25 \n", + "\u001b[1;33m • \u001b[0m\u001b[1mTest Input:\u001b[0m (41, 50) \n", + "\u001b[1;33m • \u001b[0m\u001b[1mRun Command:\u001b[0m filter_prime_numbers(41, 50) \n", + "\u001b[1;33m • \u001b[0m\u001b[1mExpected Result:\u001b[0m 3 \n", + "\u001b[1;33m • \u001b[0m\u001b[1mActual Result:\u001b[0m 3 \n", "\u001b[1;33m • \u001b[0m\u001b[1mIs Passed:\u001b[0m Yes \n", "\n", "\u001b[33m───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\u001b[0m\n" @@ -323,7 +341,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -345,7 +363,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ diff --git a/docs/examples/06_skills_refactor.ipynb b/docs/examples/06_skills_refactor.ipynb index 7c18ed8..44f246b 100644 --- a/docs/examples/06_skills_refactor.ipynb +++ b/docs/examples/06_skills_refactor.ipynb @@ -180,7 +180,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "9f7b8af45abf4ef598efa08de48ddc01", + "model_id": "8f80adfafef544098b2f5312b9b605b6", "version_major": 2, "version_minor": 0 }, @@ -210,6 +210,23 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/3753f345-\n",
+       "ceed-4928-9099-327d12fa31c8?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=323673;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/3753f345-ceed-4928-9099-327d12fa31c8?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/3753f345-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=323673;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/3753f345-ceed-4928-9099-327d12fa31c8?poll=true\u001b\\\u001b[4;34mceed-4928-9099-327d12fa31c8?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -295,7 +312,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "58dae241e0e5474bb2e974701ddc6021", + "model_id": "2465596d3dff439d9fce6d4ab1b46e74", "version_major": 2, "version_minor": 0 }, @@ -325,6 +342,23 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/9b5d62a4-\n",
+       "109d-4d56-a996-a9cbf928c840?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=180123;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/9b5d62a4-109d-4d56-a996-a9cbf928c840?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/9b5d62a4-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=180123;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/9b5d62a4-109d-4d56-a996-a9cbf928c840?poll=true\u001b\\\u001b[4;34m109d-4d56-a996-a9cbf928c840?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -350,7 +384,7 @@ "Usage: \n", "\n", " \n", - " cleaned_data, null_count, duplicate_count = data_cleaning_with_stats(input_data, remove_duplicates=True) \n", + " cleaned_data, stats = data_cleaning_with_stats(input_data, remove_duplicates=True) \n", " \n", "\n", "Parameters: \n", @@ -362,8 +396,7 @@ "Returns: \n", "cleaned_data (array): The cleaned dataset with string 'null'/'NaN' values converted to actual nulls, and \n", " nulls and duplicates removed based on specified parameters. \n", - "null_count (integer): The number of null values removed from the dataset. \n", - "duplicate_count (integer): The number of duplicate values removed from the dataset. \n", + "stats (dictionary): A dictionary containing the count of null and duplicate values removed from the dataset. \n", "
\n" ], "text/plain": [ @@ -378,7 +411,7 @@ "\u001b[1;33m • \u001b[0m\u001b[1mUsage\u001b[0m: \n", "\n", "\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcleaned_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mnull_count\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mduplicate_count\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mdata_cleaning_with_stats\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34minput_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mremove_duplicates\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mTrue\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcleaned_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mstats\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mdata_cleaning_with_stats\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34minput_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mremove_duplicates\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mTrue\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\n", "\n", "\u001b[1;33m • \u001b[0m\u001b[1mParameters\u001b[0m: \n", @@ -390,8 +423,7 @@ "\u001b[1;33m • \u001b[0m\u001b[1mReturns\u001b[0m: \n", "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mcleaned_data\u001b[0m (array): The cleaned dataset with string 'null'/'NaN' values converted to actual nulls, and \n", "\u001b[1;33m \u001b[0m\u001b[1;33m \u001b[0mnulls and duplicates removed based on specified parameters. \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mnull_count\u001b[0m (integer): The number of null values removed from the dataset. \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mduplicate_count\u001b[0m (integer): The number of duplicate values removed from the dataset. \n" + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mstats\u001b[0m (dictionary): A dictionary containing the count of null and duplicate values removed from the dataset. \n" ] }, "metadata": {}, @@ -410,7 +442,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "58332991cad240af85d751bd13a5bc0c", + "model_id": "787dc04257b046fe96b96b80d630360f", "version_major": 2, "version_minor": 0 }, @@ -440,6 +472,23 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/a3c50ed0-\n",
+       "21e1-4d5d-8324-9a247f29c2f7?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=438953;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/a3c50ed0-21e1-4d5d-8324-9a247f29c2f7?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/a3c50ed0-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=438953;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/a3c50ed0-21e1-4d5d-8324-9a247f29c2f7?poll=true\u001b\\\u001b[4;34m21e1-4d5d-8324-9a247f29c2f7?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -812,7 +861,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "6a0e86dd85914422a1d9e70baf3e4119", + "model_id": "482c2f1f5ef64aec803292ea4137421a", "version_major": 2, "version_minor": 0 }, @@ -842,6 +891,23 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/02f47717-\n",
+       "dc2e-4908-af17-7403f9732e2d?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=890260;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/02f47717-dc2e-4908-af17-7403f9732e2d?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/02f47717-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=890260;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/02f47717-dc2e-4908-af17-7403f9732e2d?poll=true\u001b\\\u001b[4;34mdc2e-4908-af17-7403f9732e2d?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -859,42 +925,42 @@ "
\n",
        "                                                  Skill Details:                                                   \n",
        "\n",
-       "Name: data_cleaning_and_visualization                                                                           \n",
+       "Name: clean_and_visualize_data                                                                                  \n",
        "Description: This skill is responsible for cleaning the input data by removing empty values and then visualizing\n",
-       "   it by generating a bar chart. It provides a simple way to preprocess and understand data distribution and       \n",
-       "   patterns.                                                                                                       \n",
+       "   it by generating a bar chart. It provides a simple way to preprocess and understand data.                       \n",
        "Version: 1.0.0                                                                                                  \n",
        "Usage:                                                                                                          \n",
        "\n",
        "                                                                                                                   \n",
-       " data_cleaning_and_visualization([1, 2, None, 4, 5, None])                                                         \n",
+       " clean_and_visualize_data(input_data)                                                                              \n",
        "                                                                                                                   \n",
        "\n",
        "Parameters:                                                                                                     \n",
        "data (array): The input data that needs cleaning and visualization. It should be a list of values.           \n",
        "Required: True                                                                                            \n",
        "Returns:                                                                                                        \n",
+       "cleaned_data (array): The cleaned data after removing empty values.                                          \n",
        "
\n" ], "text/plain": [ "\n", " \u001b[1;4mSkill Details:\u001b[0m \n", "\n", - "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: data_cleaning_and_visualization \n", + "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: clean_and_visualize_data \n", "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill is responsible for cleaning the input data by removing empty values and then visualizing\n", - "\u001b[1;33m \u001b[0mit by generating a bar chart. It provides a simple way to preprocess and understand data distribution and \n", - "\u001b[1;33m \u001b[0mpatterns. \n", + "\u001b[1;33m \u001b[0mit by generating a bar chart. It provides a simple way to preprocess and understand data. \n", "\u001b[1;33m • \u001b[0m\u001b[1mVersion\u001b[0m: 1.0.0 \n", "\u001b[1;33m • \u001b[0m\u001b[1mUsage\u001b[0m: \n", "\n", "\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mdata_cleaning_and_visualization\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m1\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m2\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mNone\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m4\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m5\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mNone\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mclean_and_visualize_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34minput_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\n", "\n", "\u001b[1;33m • \u001b[0m\u001b[1mParameters\u001b[0m: \n", "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mdata\u001b[0m (array): The input data that needs cleaning and visualization. It should be a list of values. \n", "\u001b[1;33m \u001b[0m\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0mRequired: True \n", - "\u001b[1;33m • \u001b[0m\u001b[1mReturns\u001b[0m: \n" + "\u001b[1;33m • \u001b[0m\u001b[1mReturns\u001b[0m: \n", + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mcleaned_data\u001b[0m (array): The cleaned data after removing empty values. \n" ] }, "metadata": {}, @@ -913,7 +979,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "985e68fa93734802a01ad6ebab432db7", + "model_id": "41f38ee4fe9244149b127558f8ed2992", "version_major": 2, "version_minor": 0 }, @@ -943,6 +1009,23 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/6c71660e-\n",
+       "f9fc-441c-a399-589ba14ef6d7?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=569225;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/6c71660e-f9fc-441c-a399-589ba14ef6d7?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/6c71660e-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=569225;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/6c71660e-f9fc-441c-a399-589ba14ef6d7?poll=true\u001b\\\u001b[4;34mf9fc-441c-a399-589ba14ef6d7?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -960,15 +1043,14 @@ "
\n",
        "                                                  Skill Details:                                                   \n",
        "\n",
-       "Name: data_cleaning_visualization_statistics                                                                    \n",
-       "Description: This skill is responsible for cleaning the input data by removing empty values, visualizing the    \n",
-       "   data by generating a bar chart, and calculating the average value of the data. It provides a comprehensive way  \n",
-       "   to preprocess, understand, and analyze data.                                                                    \n",
+       "Name: data_analysis                                                                                             \n",
+       "Description: This skill is responsible for cleaning the input data, visualizing it by generating a bar chart,   \n",
+       "   and calculating its average. It provides a comprehensive way to preprocess, understand, and analyze data.       \n",
        "Version: 1.0.0                                                                                                  \n",
        "Usage:                                                                                                          \n",
        "\n",
        "                                                                                                                   \n",
-       " cleaned_data, average = data_cleaning_visualization_statistics(input_data)                                        \n",
+       " data_analysis(input_data)                                                                                         \n",
        "                                                                                                                   \n",
        "\n",
        "Parameters:                                                                                                     \n",
@@ -977,22 +1059,22 @@
        "Required: True                                                                                            \n",
        "Returns:                                                                                                        \n",
        "cleaned_data (array): The cleaned data after removing empty values.                                          \n",
-       "the_average_value_of (float): The average value of the cleaned data.                                         \n",
+       "visualization (object): The visualization of the data.                                                       \n",
+       "average (float): The average value of the input data.                                                        \n",
        "
\n" ], "text/plain": [ "\n", " \u001b[1;4mSkill Details:\u001b[0m \n", "\n", - "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: data_cleaning_visualization_statistics \n", - "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill is responsible for cleaning the input data by removing empty values, visualizing the \n", - "\u001b[1;33m \u001b[0mdata by generating a bar chart, and calculating the average value of the data. It provides a comprehensive way \n", - "\u001b[1;33m \u001b[0mto preprocess, understand, and analyze data. \n", + "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: data_analysis \n", + "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill is responsible for cleaning the input data, visualizing it by generating a bar chart, \n", + "\u001b[1;33m \u001b[0mand calculating its average. It provides a comprehensive way to preprocess, understand, and analyze data. \n", "\u001b[1;33m • \u001b[0m\u001b[1mVersion\u001b[0m: 1.0.0 \n", "\u001b[1;33m • \u001b[0m\u001b[1mUsage\u001b[0m: \n", "\n", "\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcleaned_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34maverage\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mdata_cleaning_visualization_statistics\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34minput_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mdata_analysis\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34minput_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\n", "\n", "\u001b[1;33m • \u001b[0m\u001b[1mParameters\u001b[0m: \n", @@ -1001,7 +1083,8 @@ "\u001b[1;33m \u001b[0m\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0mRequired: True \n", "\u001b[1;33m • \u001b[0m\u001b[1mReturns\u001b[0m: \n", "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mcleaned_data\u001b[0m (array): The cleaned data after removing empty values. \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mthe_average_value_of\u001b[0m (float): The average value of the cleaned data. \n" + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mvisualization\u001b[0m (object): The visualization of the data. \n", + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1maverage\u001b[0m (float): The average value of the input data. \n" ] }, "metadata": {}, @@ -1020,7 +1103,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "92d9faa66589490da6353c3930db7e89", + "model_id": "b5b2aeed12e648d8bf9151fe5c6bf67f", "version_major": 2, "version_minor": 0 }, @@ -1050,6 +1133,23 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/4e02c38f-\n",
+       "ebda-4e18-949a-bc8667454564?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=469878;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/4e02c38f-ebda-4e18-949a-bc8667454564?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/4e02c38f-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=469878;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/4e02c38f-ebda-4e18-949a-bc8667454564?poll=true\u001b\\\u001b[4;34mebda-4e18-949a-bc8667454564?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -1067,15 +1167,14 @@ "
\n",
        "                                                  Skill Details:                                                   \n",
        "\n",
-       "Name: data_cleaning_visualization_statistics                                                                    \n",
-       "Description: This skill is responsible for cleaning the input data by removing empty values, visualizing the    \n",
-       "   data by generating a bar chart, and calculating the average value of the data. It provides a comprehensive way  \n",
-       "   to preprocess, understand, and analyze data.                                                                    \n",
+       "Name: data_analysis                                                                                             \n",
+       "Description: This skill is responsible for cleaning the input data, visualizing it by generating a bar chart,   \n",
+       "   and calculating its average. It provides a comprehensive way to preprocess, understand, and analyze data.       \n",
        "Version: 1.0.0                                                                                                  \n",
        "Usage:                                                                                                          \n",
        "\n",
        "                                                                                                                   \n",
-       " cleaned_data, average = data_cleaning_visualization_statistics(input_data)                                        \n",
+       " data_analysis(input_data)                                                                                         \n",
        "                                                                                                                   \n",
        "\n",
        "Parameters:                                                                                                     \n",
@@ -1084,22 +1183,22 @@
        "Required: True                                                                                            \n",
        "Returns:                                                                                                        \n",
        "cleaned_data (array): The cleaned data after removing empty values.                                          \n",
-       "the_average_value_of (float): The average value of the cleaned data.                                         \n",
+       "visualization (object): The visualization of the data.                                                       \n",
+       "average (float): The average value of the input data.                                                        \n",
        "
\n" ], "text/plain": [ "\n", " \u001b[1;4mSkill Details:\u001b[0m \n", "\n", - "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: data_cleaning_visualization_statistics \n", - "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill is responsible for cleaning the input data by removing empty values, visualizing the \n", - "\u001b[1;33m \u001b[0mdata by generating a bar chart, and calculating the average value of the data. It provides a comprehensive way \n", - "\u001b[1;33m \u001b[0mto preprocess, understand, and analyze data. \n", + "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: data_analysis \n", + "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill is responsible for cleaning the input data, visualizing it by generating a bar chart, \n", + "\u001b[1;33m \u001b[0mand calculating its average. It provides a comprehensive way to preprocess, understand, and analyze data. \n", "\u001b[1;33m • \u001b[0m\u001b[1mVersion\u001b[0m: 1.0.0 \n", "\u001b[1;33m • \u001b[0m\u001b[1mUsage\u001b[0m: \n", "\n", "\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcleaned_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34maverage\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mdata_cleaning_visualization_statistics\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34minput_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mdata_analysis\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34minput_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\n", "\n", "\u001b[1;33m • \u001b[0m\u001b[1mParameters\u001b[0m: \n", @@ -1108,7 +1207,8 @@ "\u001b[1;33m \u001b[0m\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0mRequired: True \n", "\u001b[1;33m • \u001b[0m\u001b[1mReturns\u001b[0m: \n", "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mcleaned_data\u001b[0m (array): The cleaned data after removing empty values. \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mthe_average_value_of\u001b[0m (float): The average value of the cleaned data. \n" + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mvisualization\u001b[0m (object): The visualization of the data. \n", + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1maverage\u001b[0m (float): The average value of the input data. \n" ] }, "metadata": {}, @@ -1176,7 +1276,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "5dcc82dbf2f44092920cd322f1ddb061", + "model_id": "2d5d50adfa434150a37aa177f2a14c25", "version_major": 2, "version_minor": 0 }, @@ -1206,6 +1306,23 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/b0de9ace-\n",
+       "2d39-4f25-80c8-19b8c36ec3bc?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=320681;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/b0de9ace-2d39-4f25-80c8-19b8c36ec3bc?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/b0de9ace-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=320681;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/b0de9ace-2d39-4f25-80c8-19b8c36ec3bc?poll=true\u001b\\\u001b[4;34m2d39-4f25-80c8-19b8c36ec3bc?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -1244,8 +1361,8 @@ " Skill Details: \n", "\n", "Name: visualize_data \n", - "Description: This skill is responsible for visualizing the input data using a bar chart. It provides a \n", - " comprehensive overview of the dataset. \n", + "Description: This skill is responsible for visualizing the input data using a bar chart. It provides a visual \n", + " overview of the dataset. \n", "Version: 1.0.0 \n", "Usage: \n", "\n", @@ -1264,8 +1381,8 @@ " \u001b[1;4mSkill Details:\u001b[0m \n", "\n", "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: visualize_data \n", - "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill is responsible for visualizing the input data using a bar chart. It provides a \n", - "\u001b[1;33m \u001b[0mcomprehensive overview of the dataset. \n", + "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill is responsible for visualizing the input data using a bar chart. It provides a visual \n", + "\u001b[1;33m \u001b[0moverview of the dataset. \n", "\u001b[1;33m • \u001b[0m\u001b[1mVersion\u001b[0m: 1.0.0 \n", "\u001b[1;33m • \u001b[0m\u001b[1mUsage\u001b[0m: \n", "\n", @@ -1289,19 +1406,20 @@ " Skill Details: \n", "\n", "Name: calculate_average \n", - "Description: This skill is responsible for calculating the average of the input data. \n", + "Description: This skill is responsible for calculating the average of the input data. It provides a statistical \n", + " analysis of the dataset. \n", "Version: 1.0.0 \n", "Usage: \n", "\n", " \n", - " calculate_average(input_data) \n", + " average = calculate_average(input_data) \n", " \n", "\n", "Parameters: \n", "input_data (any): The input dataset to be analyzed. \n", "Required: True \n", "Returns: \n", - "this_function_return (float): This function returns the average of the input data. \n", + "average (float): The average of the input data. \n", "
\n" ], "text/plain": [ @@ -1309,19 +1427,20 @@ " \u001b[1;4mSkill Details:\u001b[0m \n", "\n", "\u001b[1;33m • \u001b[0m\u001b[1mName\u001b[0m: calculate_average \n", - "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill is responsible for calculating the average of the input data. \n", + "\u001b[1;33m • \u001b[0m\u001b[1mDescription\u001b[0m: This skill is responsible for calculating the average of the input data. It provides a statistical \n", + "\u001b[1;33m \u001b[0manalysis of the dataset. \n", "\u001b[1;33m • \u001b[0m\u001b[1mVersion\u001b[0m: 1.0.0 \n", "\u001b[1;33m • \u001b[0m\u001b[1mUsage\u001b[0m: \n", "\n", "\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcalculate_average\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34minput_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34maverage\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mcalculate_average\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34minput_data\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\n", "\n", "\u001b[1;33m • \u001b[0m\u001b[1mParameters\u001b[0m: \n", "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1minput_data\u001b[0m (any): The input dataset to be analyzed. \n", "\u001b[1;33m \u001b[0m\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0mRequired: True \n", "\u001b[1;33m • \u001b[0m\u001b[1mReturns\u001b[0m: \n", - "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1mthis_function_return\u001b[0m (float): This function returns the average of the input data. \n" + "\u001b[1;33m \u001b[0m\u001b[1;33m • \u001b[0m\u001b[1maverage\u001b[0m (float): The average of the input data. \n" ] }, "metadata": {}, diff --git a/docs/examples/07_skills_auto_optimize.ipynb b/docs/examples/07_skills_auto_optimize.ipynb index 075140c..92a3103 100644 --- a/docs/examples/07_skills_auto_optimize.ipynb +++ b/docs/examples/07_skills_auto_optimize.ipynb @@ -196,28 +196,106 @@ { "data": { "text/html": [ - "
{\n",
-       "  \"status\": \"success\",\n",
-       "  \"stdout\": \"\",\n",
-       "  \"stderr\": \"\\u001b[33mWARNING: Package(s) not found: itertools\\u001b[0m\\u001b[33m\\n\"\n",
-       "}\n",
+       "
\n",
+       "▌ Installing dependencies                                                                                        \n",
+       "
\n" + ], + "text/plain": [ + "\n", + "\u001b[35m▌ \u001b[0m\u001b[35mInstalling dependencies\u001b[0m\u001b[35m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
                                                                                                                   \n",
+       " pip show itertools || pip install \"itertools\"                                                                     \n",
+       "                                                                                                                   \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpip\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mshow\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mitertools\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m||\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpip\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34minstall\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"itertools\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "▌ Install dependencies result: {'status': 'success', 'stdout': '', 'stderr': '\\x1b[33mWARNING: Package(s) not    \n",
+       "▌ found: itertools\\x1b[0m\\x1b[33m\\n\\x1b[0m\\x1b[31mERROR: Could not find a version that satisfies the requirement \n",
+       "▌ itertools (from versions: none)\\x1b[0m\\x1b[31m\\n'}                                                             \n",
        "
\n" ], "text/plain": [ - "\u001b[1m{\u001b[0m\n", - " \u001b[1;34m\"status\"\u001b[0m: \u001b[32m\"success\"\u001b[0m,\n", - " \u001b[1;34m\"stdout\"\u001b[0m: \u001b[32m\"\"\u001b[0m,\n", - " \u001b[1;34m\"stderr\"\u001b[0m: \u001b[32m\"\\u001b[33mWARNING: Package(s) not found: itertools\\u001b[0m\\u001b[33m\\n\"\u001b[0m\n", - "\u001b[1m}\u001b[0m\n" + "\n", + "\u001b[35m▌ \u001b[0m\u001b[35mInstall dependencies result: {'status': 'success', 'stdout': '', 'stderr': '\\x1b[33mWARNING: Package(s) not \u001b[0m\u001b[35m \u001b[0m\n", + "\u001b[35m▌ \u001b[0m\u001b[35mfound: itertools\\x1b[0m\\x1b[33m\\n\\x1b[0m\\x1b[31mERROR: Could not find a version that satisfies the requirement \u001b[0m\n", + "\u001b[35m▌ \u001b[0m\u001b[35mitertools (from versions: none)\\x1b[0m\\x1b[31m\\n'}\u001b[0m\u001b[35m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "c772f6a3b2e74b279099a57efef602f6", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "725ae410049c4c9c857d330e4e385640", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" ] }, "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
     {
      "data": {
       "application/vnd.jupyter.widget-view+json": {
-       "model_id": "02dba3e12d8446fe9435a3213dff13d2",
+       "model_id": "a2504e3fc79b43e688dce661b35dd81b",
        "version_major": 2,
        "version_minor": 0
       },
@@ -231,7 +309,7 @@
     {
      "data": {
       "application/vnd.jupyter.widget-view+json": {
-       "model_id": "7ef28624ec87490d934ca62661618de1",
+       "model_id": "0924ad029b6247b3a47cb76c584c16dc",
        "version_major": 2,
        "version_minor": 0
       },
@@ -265,7 +343,7 @@
     {
      "data": {
       "application/vnd.jupyter.widget-view+json": {
-       "model_id": "de9bd6adeea544949701f07d6454a9df",
+       "model_id": "e999daf7a6e1492e8c546b899b741426",
        "version_major": 2,
        "version_minor": 0
       },
@@ -314,7 +392,7 @@
     {
      "data": {
       "application/vnd.jupyter.widget-view+json": {
-       "model_id": "62ed1213deb543d8a0ff18b06e8396ce",
+       "model_id": "44cef59cfedd4730818910c8218ddccb",
        "version_major": 2,
        "version_minor": 0
       },
@@ -344,6 +422,40 @@
      },
      "metadata": {},
      "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/61f07104-\n",
+       "29a1-420c-b831-cbda8df3668e?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=734838;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/61f07104-29a1-420c-b831-cbda8df3668e?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/61f07104-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=734838;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/61f07104-29a1-420c-b831-cbda8df3668e?poll=true\u001b\\\u001b[4;34m29a1-420c-b831-cbda8df3668e?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/7f305c8e-\n",
+       "9bb6-45fc-9035-5af496184866?poll=true                                                                              \n",
+       "
\n" + ], + "text/plain": [ + "Langsmith Run URL: \n", + "\u001b]8;id=99153;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/7f305c8e-9bb6-45fc-9035-5af496184866?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/7f305c8e-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=99153;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/7f305c8e-9bb6-45fc-9035-5af496184866?poll=true\u001b\\\u001b[4;34m9bb6-45fc-9035-5af496184866?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -425,9 +537,10 @@ " \n", " def solve_game_of_24(numbers): \n", " for permutation in permutations(numbers): \n", + " a, b, c, d = permutation \n", + " # Try all possible combinations of arithmetic operations \n", " for ops in product(['+', '-', '*', '/'], repeat=3): \n", - " expression = f'(({permutation[0]} {ops[0]} {permutation[1]}) {ops[1]} {permutation[2]}) {ops[2]} \n", - " {permutation[3]}' \n", + " expression = f'(({a} {ops[0]} {b}) {ops[1]} {c}) {ops[2]} {d}' \n", " try: \n", " result = eval(expression) \n", " if result == 24: \n", @@ -445,9 +558,10 @@ "\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mdef\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;166;226;46;48;2;39;40;34msolve_game_of_24\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mnumbers\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mfor\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpermutation\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34min\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpermutations\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mnumbers\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34ma\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mb\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mc\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34md\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpermutation\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;149;144;119;48;2;39;40;34m# Try all possible combinations of arithmetic operations\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mfor\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mops\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34min\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mproduct\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m+\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m-\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m*\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m/\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mrepeat\u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m3\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mexpression\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mf\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m((\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpermutation\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m0\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mops\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m0\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpermutation\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m1\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m) \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mops\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m1\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpermutation\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m2\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m) \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mops\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m2\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", - "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mpermutation\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m3\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", + "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mexpression\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mf\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m((\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34ma\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mops\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m0\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mb\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m) \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mops\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m1\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mc\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m) \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mops\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m[\u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m2\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m]\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m{\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34md\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m}\u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mtry\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mresult\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m=\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34meval\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m(\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mexpression\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", "\u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mif\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34mresult\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m==\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m24\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\n", diff --git a/docs/examples/08_creator_agent.ipynb b/docs/examples/08_creator_agent.ipynb index 71b1f35..9ef93d5 100644 --- a/docs/examples/08_creator_agent.ipynb +++ b/docs/examples/08_creator_agent.ipynb @@ -16,7 +16,8 @@ "metadata": {}, "outputs": [], "source": [ - "from creator.agents.creator_agent import open_creator_agent" + "from creator.agents import create_creator_agent\n", + "from creator import config" ] }, { @@ -24,6 +25,15 @@ "execution_count": 2, "metadata": {}, "outputs": [], + "source": [ + "open_creator_agent = create_creator_agent(config)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], "source": [ "messages = [\n", " {\n", @@ -35,13 +45,13 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "6e7dca82c2dd437d943dba009b57d9a9", + "model_id": "9f536b3686f94b1dab2dfd584f4851cb", "version_major": 2, "version_minor": 0 }, @@ -79,7 +89,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -93,13 +103,13 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "ba05f0dbad2748379194af32d2818c1d", + "model_id": "a59a5d9a43ad4ab59827fbca51396623", "version_major": 2, "version_minor": 0 }, @@ -113,7 +123,21 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "d66203c2d2104397837cc61c9763f2dc", + "model_id": "1e4ddee0f8614a0d8d5829b3c08261b0", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "031a74ab9b284404814fb41b31920d5a", "version_major": 2, "version_minor": 0 }, @@ -147,7 +171,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "c6d74112eaa7438ea8dad9bfcfab4764", + "model_id": "8c67158044da4b96aa45366119e7ffb1", "version_major": 2, "version_minor": 0 }, @@ -161,7 +185,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "28463b69814b46d5bd286b47fe3898cf", + "model_id": "185103ec05594133a317a8f4fe1768fa", "version_major": 2, "version_minor": 0 }, @@ -195,7 +219,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "abf238be7b554d81bd845596e39e33d1", + "model_id": "65425fd5707c40159b5bd51d4fbd412a", "version_major": 2, "version_minor": 0 }, @@ -209,7 +233,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "536ad56670e545b785b3aab24246c8dd", + "model_id": "9d680c3af9c54b849aaf07b0a9a9a95b", "version_major": 2, "version_minor": 0 }, @@ -220,6 +244,16 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAGFCAYAAAASI+9IAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAJ/0lEQVR4nO3db4hldR3H8e9ZZ3N3zXSXzV20VUv6a5hUK5tgbUUUEmtSSUWYBdGjHoc9UXwQZBARJD0IinqUPqgINoiCRDG1IiPLLdTNajVdtd0YzNxtTw/u+pkZ0+XOzL3nzjnzesHA3Nkzc38cztn3/O6c87tN27ZtAUBVbZj1AABYO0QBgBAFAEIUAAhRACBEAYAQBQBCFACIuXE3bJppDmOa+jlw9xQyrqa/J2dvDfn8NFMAIEQBgBAFAEIUAAhRACBEAYAQBQBCFACIsW9em6gLf1F18a1V591bddYjVZuOVp04req5M6uOnl/11OuqDu2u+usVVY++rfp6AxpA3zTjvh3nRG6a3P5A1VWfqdp19/jf88TFVbfcv4on7WdQhnzHJJPljubuDfn87G6msPO3VZ96T9XmIwtfm99R9ejbq+Z3VrVN1Zanqs65v2rbg1XNyZ2+6ciL/TQApqCbKGw4VvXhTywE4V/nVu3/RtWf9lW1L/JnjS2Hq97wo6pLvle19eFOhghAVy8fvem2qmuuGX1+bHPVN+8b/d1gHFsfqvrnRat48n5OrYc8PWWyvHzUvSGfn91cfXTRTxc+P3DV+EGoWmUQAFiObqLwikMLnx+9oJOnBGD5uonC4r8bnH2wk6cEYPm6icLTi14Cev2Pq175x06eFoDl6SYKBz608PnGf1d9+p1Vl3+l6sxDL/ktAHSvu5vXPr5vNEtYrG1O3r182eh+hb/vqXrsrVUnJnmlbD+vzBjy1Q1MlquPujfk87O7KLxsvurqa6ve+INTb/fcGVV//mDVrz9X9Zd3r/JJq0SBoROF7g35/Ox2mYuqqtfur9rztapX/7xqw4lTb3tgX9UPv1P17NZVPGE/T5ghH3RMlih0b8jnZ/dReN6Ww6OF8XbdVXXub0bLYJw+///bPXFx1bd+OVosb0X6ecIM+aBjskShe0M+P2cXhRfacLzqVXdXXfrtqrd8t+q04wv/ds/nq37y9RX+4H6eMEM+6JgsUejekM/PtROFxXbdVfXJ9y/MHI5tqvry01XHN6/gh/XzhBnyQcdkiUL3hnx+rs032fnb5VV3fHHh8cZnq8771ezGA7BOrM0oVFU9+IGlj1/+2GzGAbCOrN0oHN+09PF/T5/NOADWkbUbhZ2/W/r46PmzGQfAOtJNFN7x1arX/Gz87Tc+U3XFlxYez++o+selEx8WAEt1E4Xz7q269n1Vn91dtfuWqjMeP8W291Rd966qHb9f+NqdX3jxd2gDYKK6uST1Ix+revP3l37t6YtGN6Y9s3201tEZh6t23le19QVLaz9wddVtt65iPaR+Xq435EvemCyXpHZvyOdnN+/R/PB7R7OFxf/hb3to9PFSjm2uuuP6qjuvn/ACeQC8lG5vXjvn/qoLbh/dubz9QNXZj1SdfrSqaav+c2bV/M6qxy8ZLYT3h4+ucs2j5/Xzt6gh/ybCZJkpdG/I5+favKN5ovo58CEfdEyWKHRvyOenv94CEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAsYzlR62v0qU+r2fT13Vh+rrP+7q/+2zIx4qZAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCADE37oZt205zHLxA0zSzHgKwDpkpABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQMzNegCwVrRtO+shrEjTNLMewor0dX8PnZkCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAMTfuhk3TTHMcU9O27ayHsO44VrrV13GzNpkpABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEHPjbti27TTHMTVN08x6COtOX4+VvnKMM0lmCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAhCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAMTcrAcwbW3bznoI607TNLMeAkzVkP9fMVMAIEQBgBAFAEIUAAhRACBEAYAQBQBCFAAIUQAgRAGAEAUAQhQACFEAIEQBgBAFAEIUAAhRACBEAYAQBQBCFAAIUQAgRAGAEAUAQhQACFEAIEQBgBAFAEIUAAhRACBEAYAQBQBCFAAIUQAgRAGAEAUAQhQACFEAIEQBgBAFAEIUAAhRACBEAYAQBQBCFAAIUQAgRAGAmBt3w6ZppjmOqWnbdtZDWJG+7u8q+xz6zEwBgBAFAEIUAAhRACBEAYAQBQBCFAAIUQDo0t69VU2zso/rrpv68EQBgBj7jmYAJmz37qrLLht/+z17pjeWk0QBYFauvLLqxhtnPYolvHwEQIgCACEKAIQoABCiAECIAgDhklSAWdm/v+rJJ8ff/qabqrZtm954qqppx3zvxL6+VaG3huyefc7QreoY37u36vbbV/a9Bw9WXXjhyp97DF4+AiBEAWBWbrihqm3H/5jyLKFKFABYRBQACFEAIEQBgBAFAEIUAAhRACBEAYCw9hHArCx37aMtW6puvnl64ylrH61Zfd3fVfY5wzeztY/OOqvqyJGVP/cYvHwEQJgprFF93d9V9jnD19djfBxmCgCEKAAQogBAiAIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgAx9oJ4AAyfmQIAIQoAhCgAEKIAQIgCACEKAIQoABCiAECIAgDxP9+DpGehN8qKAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -243,7 +277,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "4ccc78982c0d4cbd906b8d9a0ace7306", + "model_id": "262ed48cde63468ca3f695f67131d00d", "version_major": 2, "version_minor": 0 }, @@ -277,7 +311,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "5a2e9a34523b4d57ada811f712f751d8", + "model_id": "4b74f781084e4861b0d3cffa933762da", "version_major": 2, "version_minor": 0 }, @@ -310,13 +344,16 @@ }, { "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "de0d200823d047e8bd963b6c3321e194", - "version_major": 2, - "version_minor": 0 - }, + "text/html": [ + "
Langsmith Run URL:                                                                                                 \n",
+       "http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/a790ec8f-\n",
+       "71ad-4de2-b81c-74f1575ebb0b?poll=true                                                                              \n",
+       "
\n" + ], "text/plain": [ - "Output()" + "Langsmith Run URL: \n", + "\u001b]8;id=656281;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/a790ec8f-71ad-4de2-b81c-74f1575ebb0b?poll=true\u001b\\\u001b[4;34mhttp://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/a790ec8f-\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b]8;id=656281;http://localhost/o/00000000-0000-0000-0000-000000000000/projects/p/856d5024-48fe-4a9c-b1f9-5236f8d3aebd/r/a790ec8f-71ad-4de2-b81c-74f1575ebb0b?poll=true\u001b\\\u001b[4;34m71ad-4de2-b81c-74f1575ebb0b?poll=true\u001b[0m\u001b]8;;\u001b\\ \n" ] }, "metadata": {}, @@ -324,14 +361,13 @@ }, { "data": { - "text/html": [ - "
\n",
-       "▌ saved to /Users/gongjunmin/.cache/open_creator/skill_library/solve_random_maze                                 \n",
-       "
\n" - ], + "application/vnd.jupyter.widget-view+json": { + "model_id": "671359bd229c41a3b2c69a96b05faefe", + "version_major": 2, + "version_minor": 0 + }, "text/plain": [ - "\n", - "\u001b[35m▌ \u001b[0m\u001b[35msaved to /Users/gongjunmin/.cache/open_creator/skill_library/solve_random_maze\u001b[0m\u001b[35m \u001b[0m\n" + "Output()" ] }, "metadata": {}, @@ -340,7 +376,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "be609bd2309e41e99329d856893b39df", + "model_id": "d3e46fda387046bc816af54504b016ee", "version_major": 2, "version_minor": 0 }, @@ -351,6 +387,21 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n",
+       "▌ saved to /Users/gongjunmin/.cache/open_creator/skill_library/solve_maze                                        \n",
+       "
\n" + ], + "text/plain": [ + "\n", + "\u001b[35m▌ \u001b[0m\u001b[35msaved to /Users/gongjunmin/.cache/open_creator/skill_library/solve_maze\u001b[0m\u001b[35m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -374,7 +425,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "48a0f6cb1f6d4311bd0f7db3e7dd3994", + "model_id": "51ec3eacd34c40688129654e96f9fb31", "version_major": 2, "version_minor": 0 }, @@ -412,7 +463,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -426,9 +477,37 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "3f6bd9ad7125466381d2e546d0a76338", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "548c2d4a340e4f1ab798cbc2361152e2", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -558,34 +637,6 @@ "metadata": {}, "output_type": "display_data" }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "418c6e79ab4047b5bcac823778e0659c", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Output()" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "9b2ce948f375427d9d80e5e0372fec1e", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Output()" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, { "data": { "text/html": [ @@ -609,7 +660,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "4b137fec8d4a46b3a1f2fa49f77517da", + "model_id": "41340c9f71fa45e3b306ec07eee7ca56", "version_major": 2, "version_minor": 0 }, diff --git a/docs/examples/09_memgpt.ipynb b/docs/examples/09_memgpt.ipynb new file mode 100644 index 0000000..20c6fc9 --- /dev/null +++ b/docs/examples/09_memgpt.ipynb @@ -0,0 +1,78 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from creator import config\n", + "from creator.memgpt import create_memgpt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session_id = \"test_session\"\n", + "config.memgpt_config.session_id = session_id\n", + "memgpt = create_memgpt(config)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "memgpt.memory_manager.clear()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "memgpt.memory_manager.messages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "while 1:\n", + " user_request = input(\"> \")\n", + " if user_request.startswith(\"/exit\"):\n", + " break\n", + " session_id = await memgpt.arun({\"user_request\": user_request, \"session_id\": session_id})" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "open_creator_online", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/index.md b/docs/index.md index 53e5c88..79f9c45 100644 --- a/docs/index.md +++ b/docs/index.md @@ -33,6 +33,3 @@ ## Framework ![](tech_report/figures/framework.png) - ---- - diff --git a/docs/pics/logo.png b/docs/pics/logo.png new file mode 100644 index 0000000..f1ab997 Binary files /dev/null and b/docs/pics/logo.png differ diff --git a/mkdocs.yml b/mkdocs.yml index 5b2d929..1b40141 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,5 +1,7 @@ -site_name: Open-Creator +site_name: Open Creator site_url: https://open-creator.github.io +repo_url: https://github.com/timedomain-tech/open-creator +repo_name: open-creator nav: - Getting Start: - Overview: index.md @@ -18,7 +20,56 @@ nav: - Commands: commands.md - Configurations: configurations.md -theme: readthedocs +theme: + name: material + logo: pics/logo.png + icon: + repo: fontawesome/brands/github + palette: + # Palette toggle for automatic mode + - media: "(prefers-color-scheme)" + toggle: + icon: material/brightness-auto + name: Switch to light mode + + # Palette toggle for light mode + - media: "(prefers-color-scheme: light)" + scheme: default + toggle: + icon: material/brightness-7 + name: Switch to dark mode + + # Palette toggle for dark mode + - media: "(prefers-color-scheme: dark)" + scheme: slate + primary: black + toggle: + icon: material/brightness-4 + name: Switch to system preference + + font: + text: Roboto + code: Roboto Mono + features: + - content.code.copy + - content.code.select + - content.code.annotate + - content.tabs.link + - header.autohide + - announce.dismiss + - navigation.instant + - navigation.instant.prefetch + - navigation.instant.progress + - navigation.tracking + - navigation.tabs + - navigation.tabs.sticky + - navigation.path + - navigation.expand + - search + - search.suggest + - search.highlight + - search.share + - navigation.footer plugins: - search @@ -26,3 +77,24 @@ plugins: kernel_name: python3 ignore_h1_titles: true include_requirejs: true + - git-revision-date-localized: + enable_creation_date: true + - git-authors + +markdown_extensions: + - pymdownx.highlight: + anchor_linenums: true + line_spans: __span + pygments_lang_class: true + - pymdownx.inlinehilite + - pymdownx.snippets + - pymdownx.superfences + +extra: + social: + - icon: fontawesome/brands/twitter + link: https://twitter.com/UseOpenCreator + - icon: fontawesome/brands/github + link: https://github.com/timedomain-tech/open-creator + - icon: fontawesome/brands/discord + link: https://discord.gg/eEraZEry53 diff --git a/pyproject.toml b/pyproject.toml index 289108f..073df81 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ name = "open-creator" packages = [ {include = "creator"}, ] -version = "0.1.2" +version = "0.1.3" description = "Build your costomized skill library" authors = ["JunminGONG "] readme = "README.md" @@ -12,23 +12,23 @@ include = ["creator/config.yaml"] [tool.poetry.dependencies] python = "^3.10" rich = "^13.5.2" -langchain = ">=0.0.317" +langchain = ">=0.0.323" huggingface_hub = "^0.17.2" loguru = "^0.7.2" pydantic = "^2.0.3" python-dotenv = "^1.0.0" openai = "^0.28.1" tiktoken = "^0.5.1" -prompt_toolkit = "^3.0.39" +prompt_toolkit = ">=3.0.36" inquirer = "^3.1.3" pyyaml = "^6.0.1" appdirs = "^1.4.4" -urllib3 = "^2.0.6" fastapi = "^0.103.1" uvicorn = "^0.23.2" streamlit = "^1.27.2" - - +questionary = "^2.0.1" +langsmith = "^0.0.43" +qdrant-client = "^1.6.4" [tool.poetry.dependencies.pyreadline3] version = "^3.4.1"