diff --git a/python/.gitignore b/python/.gitignore
index 6e128526e14c..d7d27c394883 100644
--- a/python/.gitignore
+++ b/python/.gitignore
@@ -157,7 +157,7 @@ cython_debug/
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
-#.idea/
+.idea/
.ruff_cache/
diff --git a/python/packages/autogen-core/docs/src/reference/index.md b/python/packages/autogen-core/docs/src/reference/index.md
index ffa2581ba96d..51c8d40e1791 100644
--- a/python/packages/autogen-core/docs/src/reference/index.md
+++ b/python/packages/autogen-core/docs/src/reference/index.md
@@ -52,6 +52,8 @@ python/autogen_ext.models.openai
python/autogen_ext.models.replay
python/autogen_ext.models.azure
python/autogen_ext.models.semantic_kernel
+python/autogen_ext.task_centric_memory
+python/autogen_ext.task_centric_memory.utils
python/autogen_ext.models.ollama
python/autogen_ext.tools.code_execution
python/autogen_ext.tools.graphrag
diff --git a/python/packages/autogen-core/docs/src/reference/python/autogen_ext.task_centric_memory.rst b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.task_centric_memory.rst
new file mode 100644
index 000000000000..8cb37807255d
--- /dev/null
+++ b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.task_centric_memory.rst
@@ -0,0 +1,8 @@
+autogen\_ext.task\_centric\_memory
+==================================
+
+
+.. automodule:: autogen_ext.task_centric_memory
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/python/packages/autogen-core/docs/src/reference/python/autogen_ext.task_centric_memory.utils.rst b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.task_centric_memory.utils.rst
new file mode 100644
index 000000000000..805d42ee310c
--- /dev/null
+++ b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.task_centric_memory.utils.rst
@@ -0,0 +1,8 @@
+autogen\_ext.task\_centric\_memory.utils
+========================================
+
+
+.. automodule:: autogen_ext.task_centric_memory.utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/python/packages/autogen-ext/imgs/task_centric_memory.png b/python/packages/autogen-ext/imgs/task_centric_memory.png
new file mode 100644
index 000000000000..763b4b3cfbf0
--- /dev/null
+++ b/python/packages/autogen-ext/imgs/task_centric_memory.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a9d5d3cdaa77c863ecbeec41ce988c1018d49b2e914a9b3775f6574ea4bbbcee
+size 37076
diff --git a/python/packages/autogen-ext/imgs/task_centric_memory_2.png b/python/packages/autogen-ext/imgs/task_centric_memory_2.png
new file mode 100644
index 000000000000..1aed539683e2
--- /dev/null
+++ b/python/packages/autogen-ext/imgs/task_centric_memory_2.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:119f7baf93e71fee417d1a9f9f994f6b3d4fbbc5aae930096a6897e755167e61
+size 28253
diff --git a/python/packages/autogen-ext/imgs/task_centric_memory_3.png b/python/packages/autogen-ext/imgs/task_centric_memory_3.png
new file mode 100644
index 000000000000..7674512390b8
--- /dev/null
+++ b/python/packages/autogen-ext/imgs/task_centric_memory_3.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e2af80416085182ba099e5094014f37b7f88daf972dce704d862540566a52bb9
+size 30082
diff --git a/python/packages/autogen-ext/pyproject.toml b/python/packages/autogen-ext/pyproject.toml
index 049a3cedbd7e..9b0539cf18ad 100644
--- a/python/packages/autogen-ext/pyproject.toml
+++ b/python/packages/autogen-ext/pyproject.toml
@@ -66,6 +66,8 @@ jupyter-executor = [
"nbclient>=0.10.2",
]
+task-centric-memory = ["chromadb>=0.6.3"]
+
semantic-kernel-core = [
"semantic-kernel>=1.17.1",
]
@@ -140,7 +142,7 @@ dev = [
[tool.ruff]
extend = "../../pyproject.toml"
include = ["src/**", "tests/*.py"]
-exclude = ["src/autogen_ext/agents/web_surfer/*.js", "src/autogen_ext/runtimes/grpc/protos", "tests/protos"]
+exclude = ["src/autogen_ext/agents/web_surfer/*.js", "src/autogen_ext/runtimes/grpc/protos", "tests/protos", "README.md"]
[tool.pyright]
extends = "../../pyproject.toml"
diff --git a/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/README.md b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/README.md
new file mode 100644
index 000000000000..9553245a74f9
--- /dev/null
+++ b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/README.md
@@ -0,0 +1,209 @@
+# Task-Centric Memory
+_(EXPERIMENTAL, RESEARCH IN PROGRESS)_
+
+**Task-Centric Memory** is an active research project aimed at giving AI agents the ability to:
+
+* Accomplish general tasks more effectively by learning quickly and continually beyond context-window limitations.
+* Remember guidance, corrections, plans, and demonstrations provided by users.
+* Learn through the agent's own experience and adapt quickly to changing circumstances.
+* Avoid repeating mistakes on tasks that are similar to those previously encountered.
+
+## Installation
+
+Install AutoGen and its extension package as follows:
+
+```bash
+pip install -U "autogen-agentchat" "autogen-ext[openai]" "autogen-ext[task-centric-memory]"
+```
+
+## Quickstart
+
+
+
+
+
+This first code snippet runs a basic test to verify that the installation was successful,
+as illustrated by the diagram to the right.
+
+```python
+import asyncio
+from autogen_ext.models.openai import OpenAIChatCompletionClient
+from autogen_ext.task_centric_memory import TaskCentricMemoryController
+from autogen_ext.task_centric_memory.utils import PageLogger
+
+
+async def main() -> None:
+ client = OpenAIChatCompletionClient(model="gpt-4o")
+ logger = PageLogger(config={"level": "DEBUG", "path": "~/pagelogs/quickstart"}) # Optional, but very useful.
+ memory_controller = TaskCentricMemoryController(reset=True, client=client, logger=logger)
+
+ # Add a few task-insight pairs as memories, where an insight can be any string that may help solve the task.
+ await memory_controller.add_memo(task="What color do I like?", insight="Deep blue is my favorite color")
+ await memory_controller.add_memo(task="What's another color I like?", insight="I really like cyan")
+ await memory_controller.add_memo(task="What's my favorite food?", insight="Halibut is my favorite")
+
+ # Retrieve memories for a new task that's related to only two of the stored memories.
+ memos = await memory_controller.retrieve_relevant_memos(task="What colors do I like most?")
+ print("{} memories retrieved".format(len(memos)))
+ for memo in memos:
+ print("- " + memo.insight)
+
+asyncio.run(main())
+```
+
+
+
+
+
+This second code example shows one way to incorporate task-centric memory directly into an AutoGen agent,
+in this case a subclass of RoutedAgent.
+To keep the code smalle, only the simplest form of memory retrieval is exercised by this agent.
+
+```python
+
+import asyncio
+from dataclasses import dataclass
+from typing import List
+
+from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler
+from autogen_core.models import ChatCompletionClient, LLMMessage, SystemMessage, UserMessage
+from autogen_ext.models.openai import OpenAIChatCompletionClient
+from autogen_ext.task_centric_memory import TaskCentricMemoryController
+from autogen_ext.task_centric_memory.utils import PageLogger
+
+
+@dataclass
+class Message:
+ content: str
+
+
+class MemoryEnabledAgent(RoutedAgent):
+ def __init__(
+ self, description: str, model_client: ChatCompletionClient, memory_controller: TaskCentricMemoryController
+ ) -> None:
+ super().__init__(description)
+ self._model_client = model_client
+ self._memory_controller = memory_controller
+
+ @message_handler
+ async def handle_message(self, message: Message, context: MessageContext) -> Message:
+ # Retrieve relevant memories for the task.
+ memos = await self._memory_controller.retrieve_relevant_memos(task=message.content)
+
+ # Format the memories for the model.
+ formatted_memos = "Info that may be useful:\n" + "\n".join(["- " + memo.insight for memo in memos])
+ print(f"{'-'*23}Text appended to the user message{'-'*24}\n{formatted_memos}\n{'-'*80}")
+
+ # Create the messages for the model with the retrieved memories.
+ messages: List[LLMMessage] = [
+ SystemMessage(content="You are a helpful assistant."),
+ UserMessage(content=message.content, source="user"),
+ UserMessage(content=formatted_memos, source="user"),
+ ]
+
+ # Call the model with the messages.
+ model_result = await self._model_client.create(messages=messages)
+ assert isinstance(model_result.content, str)
+
+ # Send the model's response to the user.
+ return Message(content=model_result.content)
+
+
+async def main() -> None:
+ client = OpenAIChatCompletionClient(model="gpt-4o")
+ logger = PageLogger(config={"level": "DEBUG", "path": "~/pagelogs/quickstart2"}) # Optional, but very useful.
+ memory_controller = TaskCentricMemoryController(reset=True, client=client, logger=logger)
+
+ # Prepopulate memory to mimic learning from a prior session.
+ await memory_controller.add_memo(task="What color do I like?", insight="Deep blue is my favorite color")
+ await memory_controller.add_memo(task="What's another color I like?", insight="I really like cyan")
+ await memory_controller.add_memo(task="What's my favorite food?", insight="Halibut is my favorite")
+
+ # Create and start an agent runtime.
+ runtime = SingleThreadedAgentRuntime()
+ runtime.start()
+
+ # Register the agent type.
+ await MemoryEnabledAgent.register(
+ runtime,
+ "memory_enabled_agent",
+ lambda: MemoryEnabledAgent(
+ "A agent with memory", model_client=client, memory_controller=memory_controller
+ ),
+ )
+
+ # Send a direct message to the agent.
+ request = "What colors do I like most?"
+ print("User request: " + request)
+ response = await runtime.send_message(
+ Message(content=request), AgentId("memory_enabled_agent", "default")
+ )
+ print("Agent response: " + response.content)
+
+ # Stop the agent runtime.
+ await runtime.stop()
+
+
+asyncio.run(main())
+```
+
+## Sample Code
+
+The example above modifies the agent's code.
+But it's also possible to add task-centric memory to an agent or multi-agent team _without_ modifying any agent code.
+See the [sample code](../../../../../samples/task_centric_memory) for that and other forms of fast, memory-based learning.
+
+
+## Architecture
+
+
+
+
+
+The block diagram to the right outlines the key components of the architecture in the most general form.
+The memory components are shown in blue, and the green blocks represent external components.
+
+The **Task-Centric Memory Controller** implements the fast-learning methods described below,
+and manages communication with a **Task-Centric Memory Bank** containing a vector DB and associated structures.
+
+The **Agent or Team** is the AI agent or team of agents to which memory is being added.
+The sample code shows how to add task-centric memory to a simple AssistantAgent or a MagenticOneGroupChat team.
+
+The **Apprentice, app, or service** represents the code that instantiates the agent and memory controller,
+and routes information between them, effectively wrapping agent and memory into a combined component.
+The term _Apprentice_ connotes that this combination uses memory to learn quickly on the job.
+The Apprentice class is a minimal reference implementation provided as utility code for illustration and testing,
+but most applications will use their own code instead of the Apprentice.
+
+## Memory Creation and Storage
+
+Each stored memory (called a _memo_) contains a text insight and (optionally) a task description.
+The insight is intended to help the agent accomplish future tasks that are similar to a prior task.
+The memory controller provides methods for different types of learning.
+If the user provides advice for solving a given task, the advice is extracted and stored as an insight.
+If the user demonstrates how to perform a task,
+the task and demonstration are stored together as an insight used to solve similar but different tasks.
+If the agent is given a task (free of side-effects) and some means of determining success or failure,
+the memory controller repeats the following learning loop in the background some number of times:
+
+1. Test the agent on the task a few times to check for a failure.
+2. If a failure is found, analyze the agent's response in order to:
+ 1. Diagnose the failure of reasoning or missing information,
+ 2. Phrase a general piece of advice, such as what a teacher might give to a student,
+ 3. Temporarily append this advice to the task description,
+ 4. Return to step 1.
+ 5. If some piece of advice succeeds in helping the agent solve the task a number of times, add the advice as an insight to memory.
+3. For each insight to be stored in memory, an LLM is prompted to generate a set of free-form, multi-word topics related to the insight. Each topic is embedded to a fixed-length vector and stored in a vector DB mapping it to the topic’s related insight.
+
+## Memory Retrieval and Usage
+
+The memory controller provides methods for different types of memory retrieval.
+When the agent is given a task, the following steps are performed by the controller:
+1. The task is rephrased into a generalized form.
+2. A set of free-form, multi-word query topics are generated from the generalized task.
+3. A potentially large number of previously stored topics, those most similar to each query topic, are retrieved from the vector DB along with the insights they map to.
+4. These candidate memos are filtered by the aggregate similarity of their stored topics to the query topics.
+5. In the final filtering stage, an LLM is prompted to validate only those insights that seem potentially useful in solving the task at hand.
+
+Retrieved insights that pass the filtering steps are listed under a heading like
+"Important insights that may help solve tasks like this", then appended to the task description before it is passed to the agent as usual.
diff --git a/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/__init__.py b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/__init__.py
new file mode 100644
index 000000000000..2ffa639a9ab7
--- /dev/null
+++ b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/__init__.py
@@ -0,0 +1,3 @@
+from .task_centric_memory_controller import TaskCentricMemoryController
+
+__all__ = ["TaskCentricMemoryController"]
diff --git a/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/_prompter.py b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/_prompter.py
new file mode 100644
index 000000000000..2602feeb2c61
--- /dev/null
+++ b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/_prompter.py
@@ -0,0 +1,287 @@
+import time
+from typing import List, Union
+
+from autogen_core import Image
+from autogen_core.models import (
+ AssistantMessage,
+ ChatCompletionClient,
+ CreateResult,
+ LLMMessage,
+ SystemMessage,
+ UserMessage,
+)
+
+from .utils._functions import UserContent
+from .utils.page_logger import PageLogger
+
+
+class Prompter:
+ """
+ Centralizes most of the Apprentice prompts sent to the model client.
+
+ Args:
+ client: The client to call the model.
+ logger: An optional logger. If None, no logging will be performed.
+ """
+
+ def __init__(self, client: ChatCompletionClient, logger: PageLogger | None = None) -> None:
+ if logger is None:
+ logger = PageLogger() # Nothing will be logged by this object.
+ self.logger = logger
+
+ self.client = client
+ self.default_system_message_content = "You are a helpful assistant."
+ self.time_spent_in_model_calls = 0.0
+ self.num_model_calls = 0
+ self.start_time = time.time()
+
+ # Create the chat history
+ self._chat_history: List[LLMMessage] = []
+
+ async def call_model(
+ self,
+ summary: str,
+ user_content: UserContent,
+ system_message_content: str | None = None,
+ keep_these_messages: bool = True,
+ ) -> str:
+ """
+ Calls the model client with the given input and returns the response.
+ """
+ # Prepare the input message list
+ if system_message_content is None:
+ system_message_content = self.default_system_message_content
+ system_message: LLMMessage
+ if self.client.model_info["family"] == "o1":
+ # No system message allowed, so pass it as the first user message.
+ system_message = UserMessage(content=system_message_content, source="User")
+ else:
+ # System message allowed.
+ system_message = SystemMessage(content=system_message_content)
+
+ user_message = UserMessage(content=user_content, source="User")
+ input_messages = [system_message] + self._chat_history + [user_message]
+
+ # Double check the types of the input messages.
+ for message in input_messages:
+ for part in message.content:
+ assert isinstance(part, str) or isinstance(part, Image), "Invalid message content type: {}".format(
+ type(part)
+ )
+
+ # Call the model
+ start_time = time.time()
+ response = await self.client.create(input_messages)
+ assert isinstance(response, CreateResult)
+ response_string = response.content
+ assert isinstance(response_string, str)
+ response_message = AssistantMessage(content=response_string, source="Assistant")
+ assert isinstance(response_message, AssistantMessage)
+ self.time_spent_in_model_calls += time.time() - start_time
+ self.num_model_calls += 1
+
+ # Log the model call
+ self.logger.log_model_call(summary=summary, input_messages=input_messages, response=response)
+
+ # Manage the chat history
+ if keep_these_messages:
+ self._chat_history.append(user_message)
+ self._chat_history.append(response_message)
+
+ # Return the response as a string for now
+ return response_string
+
+ def _clear_history(self) -> None:
+ """
+ Empties the message list containing the chat history.
+ """
+ self._chat_history = []
+
+ async def learn_from_failure(
+ self, task_description: str, memory_section: str, final_response: str, expected_answer: str, work_history: str
+ ) -> str:
+ """
+ Tries to create an insight to help avoid the given failure in the future.
+ """
+ sys_message = """- You are a patient and thorough teacher.
+- Your job is to review work done by students and help them learn how to do better."""
+
+ user_message: List[Union[str, Image]] = []
+ user_message.append("# A team of students made a mistake on the following task:\n")
+ user_message.extend([task_description])
+
+ if len(memory_section) > 0:
+ user_message.append(memory_section)
+
+ user_message.append("# Here's the expected answer, which would have been correct:\n")
+ user_message.append(expected_answer)
+
+ user_message.append("# Here is the students' answer, which was INCORRECT:\n")
+ user_message.append(final_response)
+
+ user_message.append("# Please review the students' work which follows:\n")
+ user_message.append("**----- START OF STUDENTS' WORK -----**\n\n")
+ user_message.append(work_history)
+ user_message.append("\n**----- END OF STUDENTS' WORK -----**\n\n")
+
+ user_message.append(
+ "# Now carefully review the students' work above, explaining in detail what the students did right and what they did wrong.\n"
+ )
+
+ self._clear_history()
+ await self.call_model(
+ summary="Ask the model to learn from this failure",
+ system_message_content=sys_message,
+ user_content=user_message,
+ )
+ user_message = [
+ "Now put yourself in the mind of the students. What misconception led them to their incorrect answer?"
+ ]
+ await self.call_model(
+ summary="Ask the model to state the misconception",
+ system_message_content=sys_message,
+ user_content=user_message,
+ )
+
+ user_message = [
+ "Please express your key insights in the form of short, general advice that will be given to the students. Just one or two sentences, or they won't bother to read it."
+ ]
+ insight = await self.call_model(
+ summary="Ask the model to formulate a concise insight",
+ system_message_content=sys_message,
+ user_content=user_message,
+ )
+ return insight
+
+ async def find_index_topics(self, input_string: str) -> List[str]:
+ """
+ Returns a list of topics related to the given string.
+ """
+ sys_message = """You are an expert at semantic analysis."""
+
+ user_message: List[Union[str, Image]] = []
+ user_message.append("""- My job is to create a thorough index for a book called Task Completion, and I need your help.
+- Every paragraph in the book needs to be indexed by all the topics related to various kinds of tasks and strategies for completing them.
+- Your job is to read the text below and extract the task-completion topics that are covered.
+- The number of topics depends on the length and content of the text. But you should list at least one topic, and potentially many more.
+- Each topic you list should be a meaningful phrase composed of a few words. Don't use whole sentences as topics.
+- Don't include details that are unrelated to the general nature of the task, or a potential strategy for completing tasks.
+- List each topic on a separate line, without any extra text like numbering, or bullets, or any other formatting, because we don't want those things in the index of the book.\n\n""")
+
+ user_message.append("# Text to be indexed\n")
+ user_message.append(input_string)
+
+ self._clear_history()
+ topics = await self.call_model(
+ summary="Ask the model to extract topics", system_message_content=sys_message, user_content=user_message
+ )
+
+ # Parse the topics into a list.
+ topic_list: List[str] = []
+ for line in topics.split("\n"):
+ if len(line) > 0:
+ topic_list.append(line)
+
+ return topic_list
+
+ async def generalize_task(self, task_description: str) -> str:
+ """
+ Attempts to rewrite a task description in a more general form.
+ """
+
+ sys_message = """You are a helpful and thoughtful assistant."""
+
+ user_message: List[Union[str, Image]] = [
+ "We have been given a task description. Our job is not to complete the task, but merely rephrase the task in simpler, more general terms, if possible. Please reach through the following task description, then explain your understanding of the task in detail, as a single, flat list of all the important points."
+ ]
+ user_message.append("\n# Task description")
+ user_message.append(task_description)
+
+ self._clear_history()
+ await self.call_model(
+ summary="Ask the model to rephrase the task in a list of important points",
+ system_message_content=sys_message,
+ user_content=user_message,
+ )
+
+ user_message = [
+ "Do you see any parts of this list that are irrelevant to actually solving the task? If so, explain which items are irrelevant."
+ ]
+ await self.call_model(
+ summary="Ask the model to identify irrelevant points",
+ system_message_content=sys_message,
+ user_content=user_message,
+ )
+
+ user_message = [
+ "Revise your original list to include only the most general terms, those that are critical to solving the task, removing any themes or descriptions that are not essential to the solution. Your final list may be shorter, but do not leave out any part of the task that is needed for solving the task. Do not add any additional commentary either before or after the list."
+ ]
+ generalized_task = await self.call_model(
+ summary="Ask the model to make a final list of general terms",
+ system_message_content=sys_message,
+ user_content=user_message,
+ )
+ return generalized_task
+
+ async def validate_insight(self, insight: str, task_description: str) -> bool:
+ """
+ Judges whether the insight could help solve the task.
+ """
+
+ sys_message = """You are a helpful and thoughtful assistant."""
+
+ user_message: List[Union[str, Image]] = [
+ """We have been given a potential insight that may or may not be useful for solving a given task.
+- First review the following task.
+- Then review the insight that follows, and consider whether it might help solve the given task.
+- Do not attempt to actually solve the task.
+- Reply with a single character, '1' if the insight may be useful, or '0' if it is not."""
+ ]
+ user_message.append("\n# Task description")
+ user_message.append(task_description)
+ user_message.append("\n# Possibly useful insight")
+ user_message.append(insight)
+ self._clear_history()
+ response = await self.call_model(
+ summary="Ask the model to validate the insight",
+ system_message_content=sys_message,
+ user_content=user_message,
+ )
+ return response == "1"
+
+ async def extract_task(self, text: str) -> str | None:
+ """
+ Returns a task found in the given text, or None if not found.
+ """
+ sys_message = """You are a helpful and thoughtful assistant."""
+ user_message: List[Union[str, Image]] = [
+ """Does the following text contain a question or a some task we are being asked to perform?
+- If so, please reply with the full question or task description, along with any supporting information, but without adding extra commentary or formatting.
+- If the task is just to remember something, that doesn't count as a task, so don't include it.
+- If there is no question or task in the text, simply write "None" with no punctuation."""
+ ]
+ user_message.append("\n# Text to analyze")
+ user_message.append(text)
+ self._clear_history()
+ response = await self.call_model(
+ summary="Ask the model to extract a task", system_message_content=sys_message, user_content=user_message
+ )
+ return response if response != "None" else None
+
+ async def extract_advice(self, text: str) -> str | None:
+ """
+ Returns advice from the given text, or None if not found.
+ """
+ sys_message = """You are a helpful and thoughtful assistant."""
+ user_message: List[Union[str, Image]] = [
+ """Does the following text contain any information or advice that might be useful later?
+- If so, please copy the information or advice, adding no extra commentary or formatting.
+- If there is no potentially useful information or advice at all, simply write "None" with no punctuation."""
+ ]
+ user_message.append("\n# Text to analyze")
+ user_message.append(text)
+ self._clear_history()
+ response = await self.call_model(
+ summary="Ask the model to extract advice", system_message_content=sys_message, user_content=user_message
+ )
+ return response if response != "None" else None
diff --git a/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/_string_similarity_map.py b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/_string_similarity_map.py
new file mode 100644
index 000000000000..1510c41bc13b
--- /dev/null
+++ b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/_string_similarity_map.py
@@ -0,0 +1,124 @@
+import os
+import pickle
+from typing import Dict, List, Tuple, Union
+
+import chromadb
+from chromadb.api.types import (
+ QueryResult,
+)
+from chromadb.config import Settings
+
+from .utils.page_logger import PageLogger
+
+
+class StringSimilarityMap:
+ """
+ Provides storage and similarity-based retrieval of string pairs using a vector database.
+ Each DB entry is a pair of strings: an input string and an output string.
+ The input string is embedded and used as the retrieval key.
+ The output string can be anything, but it's typically used as a dict key.
+ Vector embeddings are currently supplied by Chroma's default Sentence Transformers.
+
+ Args:
+ - reset: True to clear the DB immediately after creation.
+ - path_to_db_dir: Path to the directory where the DB is stored.
+ - logger: An optional logger. If None, no logging will be performed.
+ """
+
+ def __init__(self, reset: bool, path_to_db_dir: str, logger: PageLogger | None = None) -> None:
+ if logger is None:
+ logger = PageLogger() # Nothing will be logged by this object.
+ self.logger = logger
+ self.path_to_db_dir = path_to_db_dir
+
+ # Load or create the vector DB on disk.
+ chromadb_settings = Settings(
+ anonymized_telemetry=False, allow_reset=True, is_persistent=True, persist_directory=path_to_db_dir
+ )
+ self.db_client = chromadb.Client(chromadb_settings)
+ self.vec_db = self.db_client.create_collection("string-pairs", get_or_create=True) # The collection is the DB.
+
+ # Load or create the associated string-pair dict on disk.
+ self.path_to_dict = os.path.join(path_to_db_dir, "uid_text_dict.pkl")
+ self.uid_text_dict: Dict[str, Tuple[str, str]] = {}
+ self.last_string_pair_id = 0
+ if (not reset) and os.path.exists(self.path_to_dict):
+ self.logger.debug("\nLOADING STRING SIMILARITY MAP FROM DISK at {}".format(self.path_to_dict))
+ with open(self.path_to_dict, "rb") as f:
+ self.uid_text_dict = pickle.load(f)
+ self.last_string_pair_id = len(self.uid_text_dict)
+ if len(self.uid_text_dict) > 0:
+ self.logger.debug("\n{} STRING PAIRS LOADED".format(len(self.uid_text_dict)))
+ self._log_string_pairs()
+
+ # Clear the DB if requested.
+ if reset:
+ self.reset_db()
+
+ def _log_string_pairs(self) -> None:
+ """
+ Logs all string pairs currently in the map.
+ """
+ self.logger.debug("LIST OF STRING PAIRS")
+ for uid, text in self.uid_text_dict.items():
+ input_text, output_text = text
+ self.logger.debug(" ID: {}\n INPUT TEXT: {}\n OUTPUT TEXT: {}".format(uid, input_text, output_text))
+
+ def save_string_pairs(self) -> None:
+ """
+ Saves the string-pair dict (self.uid_text_dict) to disk.
+ """
+ self.logger.debug("\nSAVING STRING SIMILARITY MAP TO DISK at {}".format(self.path_to_dict))
+ with open(self.path_to_dict, "wb") as file:
+ pickle.dump(self.uid_text_dict, file)
+
+ def reset_db(self) -> None:
+ """
+ Forces immediate deletion of the DB's contents, in memory and on disk.
+ """
+ self.logger.debug("\nCLEARING STRING-PAIR MAP")
+ self.db_client.delete_collection("string-pairs")
+ self.vec_db = self.db_client.create_collection("string-pairs")
+ self.uid_text_dict = {}
+ self.save_string_pairs()
+
+ def add_input_output_pair(self, input_text: str, output_text: str) -> None:
+ """
+ Adds one input-output string pair to the DB.
+ """
+ self.last_string_pair_id += 1
+ self.vec_db.add(documents=[input_text], ids=[str(self.last_string_pair_id)])
+ self.uid_text_dict[str(self.last_string_pair_id)] = input_text, output_text
+ self.logger.debug(
+ "\nINPUT-OUTPUT PAIR ADDED TO VECTOR DATABASE:\n ID\n {}\n INPUT\n {}\n OUTPUT\n {}\n".format(
+ self.last_string_pair_id, input_text, output_text
+ )
+ )
+ # self._log_string_pairs() # For deeper debugging, uncomment to log all string pairs after each addition.
+
+ def get_related_string_pairs(
+ self, query_text: str, n_results: int, threshold: Union[int, float]
+ ) -> List[Tuple[str, str, float]]:
+ """
+ Retrieves up to n string pairs that are related to the given query text within the specified distance threshold.
+ """
+ string_pairs_with_distances: List[Tuple[str, str, float]] = []
+ if n_results > len(self.uid_text_dict):
+ n_results = len(self.uid_text_dict)
+ if n_results > 0:
+ results: QueryResult = self.vec_db.query(query_texts=[query_text], n_results=n_results)
+ num_results = len(results["ids"][0])
+ for i in range(num_results):
+ uid = results["ids"][0][i]
+ input_text = results["documents"][0][i] if results["documents"] else ""
+ distance = results["distances"][0][i] if results["distances"] else 0.0
+ if distance < threshold:
+ input_text_2, output_text = self.uid_text_dict[uid]
+ assert input_text == input_text_2
+ self.logger.debug(
+ "\nINPUT-OUTPUT PAIR RETRIEVED FROM VECTOR DATABASE:\n INPUT1\n {}\n OUTPUT\n {}\n DISTANCE\n {}".format(
+ input_text, output_text, distance
+ )
+ )
+ string_pairs_with_distances.append((input_text, output_text, distance))
+ return string_pairs_with_distances
diff --git a/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/_task_centric_memory_bank.py b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/_task_centric_memory_bank.py
new file mode 100644
index 000000000000..59268de4685a
--- /dev/null
+++ b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/_task_centric_memory_bank.py
@@ -0,0 +1,201 @@
+import os
+import pickle
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Tuple, TypedDict
+
+from ._string_similarity_map import StringSimilarityMap
+from .utils.page_logger import PageLogger
+
+
+@dataclass
+class Memo:
+ """
+ Represents an atomic unit of memory that can be stored in a memory bank and later retrieved.
+ """
+
+ task: str | None # The task description, if any.
+ insight: str # A hint, solution, plan, or any other text that may help solve a similar task.
+
+
+# Following the nested-config pattern, this TypedDict minimizes code changes by encapsulating
+# the settings that change frequently, as when loading many settings from a single YAML file.
+class TaskCentricMemoryBankConfig(TypedDict, total=False):
+ path: str
+ relevance_conversion_threshold: float
+ n_results: int
+ distance_threshold: int
+
+
+class TaskCentricMemoryBank:
+ """
+ Stores task-completion insights as memories in a vector DB for later retrieval.
+
+ Args:
+ reset: True to clear the DB before starting.
+ config: An optional dict that can be used to override the following values:
+
+ - path: The path to the directory where the memory bank files are stored.
+ - relevance_conversion_threshold: The threshold used to normalize relevance.
+ - n_results: The maximum number of most relevant results to return for any given topic.
+ - distance_threshold: The maximum string-pair distance for a memo to be retrieved.
+
+ logger: An optional logger. If None, no logging will be performed.
+ """
+
+ def __init__(
+ self,
+ reset: bool,
+ config: TaskCentricMemoryBankConfig | None = None,
+ logger: PageLogger | None = None,
+ ) -> None:
+ if logger is None:
+ logger = PageLogger() # Nothing will be logged by this object.
+ self.logger = logger
+ self.logger.enter_function()
+
+ # Apply default settings and any config overrides.
+ memory_dir_path = "./memory_bank/default"
+ self.relevance_conversion_threshold = 1.7
+ self.n_results = 25
+ self.distance_threshold = 100
+ if config is not None:
+ memory_dir_path = config.get("path", memory_dir_path)
+ self.relevance_conversion_threshold = config.get(
+ "relevance_conversion_threshold", self.relevance_conversion_threshold
+ )
+ self.n_results = config.get("n_results", self.n_results)
+ self.distance_threshold = config.get("distance_threshold", self.distance_threshold)
+
+ memory_dir_path = os.path.expanduser(memory_dir_path)
+ self.logger.info("\nMEMORY BANK DIRECTORY {}".format(memory_dir_path))
+ path_to_db_dir = os.path.join(memory_dir_path, "string_map")
+ self.path_to_dict = os.path.join(memory_dir_path, "uid_memo_dict.pkl")
+
+ self.string_map = StringSimilarityMap(reset=reset, path_to_db_dir=path_to_db_dir, logger=self.logger)
+
+ # Load or create the associated memo dict on disk.
+ self.uid_memo_dict: Dict[str, Memo] = {}
+ self.last_memo_id = 0
+ if (not reset) and os.path.exists(self.path_to_dict):
+ self.logger.info("\nLOADING MEMOS FROM DISK at {}".format(self.path_to_dict))
+ with open(self.path_to_dict, "rb") as f:
+ self.uid_memo_dict = pickle.load(f)
+ self.last_memo_id = len(self.uid_memo_dict)
+ self.logger.info("\n{} MEMOS LOADED".format(len(self.uid_memo_dict)))
+
+ # Clear the DB if requested.
+ if reset:
+ self._reset_memos()
+
+ self.logger.leave_function()
+
+ def reset(self) -> None:
+ """
+ Forces immediate deletion of all contents, in memory and on disk.
+ """
+ self.string_map.reset_db()
+ self._reset_memos()
+
+ def _reset_memos(self) -> None:
+ """
+ Forces immediate deletion of the memos, in memory and on disk.
+ """
+ self.logger.info("\nCLEARING MEMOS")
+ self.uid_memo_dict = {}
+ self.save_memos()
+
+ def save_memos(self) -> None:
+ """
+ Saves the current memo structures (possibly empty) to disk.
+ """
+ self.string_map.save_string_pairs()
+ with open(self.path_to_dict, "wb") as file:
+ self.logger.info("\nSAVING MEMOS TO DISK at {}".format(self.path_to_dict))
+ pickle.dump(self.uid_memo_dict, file)
+
+ def contains_memos(self) -> bool:
+ """
+ Returns True if the memory bank contains any memo.
+ """
+ return len(self.uid_memo_dict) > 0
+
+ def _map_topics_to_memo(self, topics: List[str], memo_id: str, memo: Memo) -> None:
+ """
+ Adds a mapping in the vec DB from each topic to the memo.
+ """
+ self.logger.enter_function()
+ self.logger.info("\nINSIGHT\n{}".format(memo.insight))
+ for topic in topics:
+ self.logger.info("\n TOPIC = {}".format(topic))
+ self.string_map.add_input_output_pair(topic, memo_id)
+ self.uid_memo_dict[memo_id] = memo
+ self.save_memos()
+ self.logger.leave_function()
+
+ def add_memo(self, insight_str: str, topics: List[str], task_str: Optional[str] = None) -> None:
+ """
+ Adds an insight to the memory bank, given topics related to the insight, and optionally the task.
+ """
+ self.logger.enter_function()
+ self.last_memo_id += 1
+ id_str = str(self.last_memo_id)
+ insight = Memo(insight=insight_str, task=task_str)
+ self._map_topics_to_memo(topics, id_str, insight)
+ self.logger.leave_function()
+
+ def add_task_with_solution(self, task: str, solution: str, topics: List[str]) -> None:
+ """
+ Adds a task-solution pair to the memory bank, to be retrieved together later as a combined insight.
+ This is useful when the insight is a demonstration of how to solve a given type of task.
+ """
+ self.logger.enter_function()
+ self.last_memo_id += 1
+ id_str = str(self.last_memo_id)
+ # Prepend the insight to the task description for context.
+ insight_str = "Example task:\n\n{}\n\nExample solution:\n\n{}".format(task, solution)
+ memo = Memo(insight=insight_str, task=task)
+ self._map_topics_to_memo(topics, id_str, memo)
+ self.logger.leave_function()
+
+ def get_relevant_memos(self, topics: List[str]) -> List[Memo]:
+ """
+ Returns any memos from the memory bank that appear sufficiently relevant to the input topics.
+ """
+ self.logger.enter_function()
+
+ # Retrieve all topic matches, and gather them into a single list.
+ matches: List[Tuple[str, str, float]] = [] # Each match is a tuple: (topic, memo_id, distance)
+ for topic in topics:
+ matches.extend(self.string_map.get_related_string_pairs(topic, self.n_results, self.distance_threshold))
+
+ # Build a dict of memo-relevance pairs from the matches.
+ memo_relevance_dict: Dict[str, float] = {}
+ for match in matches:
+ relevance = self.relevance_conversion_threshold - match[2]
+ memo_id = match[1]
+ if memo_id in memo_relevance_dict:
+ memo_relevance_dict[memo_id] += relevance
+ else:
+ memo_relevance_dict[memo_id] = relevance
+
+ # Log the details of all the retrieved memos.
+ self.logger.info("\n{} POTENTIALLY RELEVANT MEMOS".format(len(memo_relevance_dict)))
+ for memo_id, relevance in memo_relevance_dict.items():
+ memo = self.uid_memo_dict[memo_id]
+ details = ""
+ if memo.task is not None:
+ details += "\n TASK: {}\n".format(memo.task)
+ details += "\n INSIGHT: {}\n\n RELEVANCE: {:.3f}\n".format(memo.insight, relevance)
+ self.logger.info(details)
+
+ # Sort the memo-relevance pairs by relevance, in descending order.
+ memo_relevance_dict = dict(sorted(memo_relevance_dict.items(), key=lambda item: item[1], reverse=True))
+
+ # Compose the list of sufficiently relevant memos to return.
+ memo_list: List[Memo] = []
+ for memo_id in memo_relevance_dict:
+ if memo_relevance_dict[memo_id] >= 0:
+ memo_list.append(self.uid_memo_dict[memo_id])
+
+ self.logger.leave_function()
+ return memo_list
diff --git a/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/task_centric_memory_controller.py b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/task_centric_memory_controller.py
new file mode 100644
index 000000000000..a7d26dcba4fa
--- /dev/null
+++ b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/task_centric_memory_controller.py
@@ -0,0 +1,458 @@
+from typing import TYPE_CHECKING, Awaitable, Callable, List, Tuple, TypedDict
+
+from autogen_core.models import (
+ ChatCompletionClient,
+)
+
+from ._prompter import Prompter
+from ._task_centric_memory_bank import Memo, TaskCentricMemoryBank
+
+if TYPE_CHECKING:
+ from ._task_centric_memory_bank import TaskCentricMemoryBankConfig
+from .utils.grader import Grader
+from .utils.page_logger import PageLogger
+
+
+# Following the nested-config pattern, this TypedDict minimizes code changes by encapsulating
+# the settings that change frequently, as when loading many settings from a single YAML file.
+class TaskCentricMemoryControllerConfig(TypedDict, total=False):
+ max_train_trials: int
+ max_test_trials: int
+ TaskCentricMemoryBank: "TaskCentricMemoryBankConfig"
+
+
+class TaskCentricMemoryController:
+ """
+ (EXPERIMENTAL, RESEARCH IN PROGRESS)
+
+ Implements fast, memory-based learning, and manages the flow of information to and from a memory bank.
+
+ Args:
+ reset: True to empty the memory bank before starting.
+ client: The model client to use internally.
+ task_assignment_callback: An optional callback used to assign a task to any agent managed by the caller.
+ config: An optional dict that can be used to override the following values:
+
+ - max_train_trials: The maximum number of learning iterations to attempt when training on a task.
+ - max_test_trials: The total number of attempts made when testing for failure on a task.
+ - TaskCentricMemoryBank: A config dict passed to TaskCentricMemoryBank.
+
+ logger: An optional logger. If None, a default logger will be created.
+
+ Example:
+
+ The `task-centric-memory` extra first needs to be installed:
+
+ .. code-block:: bash
+
+ pip install "autogen-ext[task-centric-memory]"
+
+ The following code snippet shows how to use this class for the most basic storage and retrieval of memories.:
+
+ .. code-block:: python
+
+ import asyncio
+ from autogen_ext.models.openai import OpenAIChatCompletionClient
+ from autogen_ext.task_centric_memory import TaskCentricMemoryController
+ from autogen_ext.task_centric_memory.utils import PageLogger
+
+
+ async def main() -> None:
+ client = OpenAIChatCompletionClient(model="gpt-4o")
+ logger = PageLogger(config={"level": "DEBUG", "path": "~/pagelogs/quickstart"}) # Optional, but very useful.
+ memory_controller = TaskCentricMemoryController(reset=True, client=client, logger=logger)
+
+ # Add a few task-insight pairs as memories, where an insight can be any string that may help solve the task.
+ await memory_controller.add_memo(task="What color do I like?", insight="Deep blue is my favorite color")
+ await memory_controller.add_memo(task="What's another color I like?", insight="I really like cyan")
+ await memory_controller.add_memo(task="What's my favorite food?", insight="Halibut is my favorite")
+
+ # Retrieve memories for a new task that's related to only two of the stored memories.
+ memos = await memory_controller.retrieve_relevant_memos(task="What colors do I like most?")
+ print("{} memories retrieved".format(len(memos)))
+ for memo in memos:
+ print("- " + memo.insight)
+
+
+ asyncio.run(main())
+ """
+
+ def __init__(
+ self,
+ reset: bool,
+ client: ChatCompletionClient,
+ task_assignment_callback: Callable[[str], Awaitable[Tuple[str, str]]] | None = None,
+ config: TaskCentricMemoryControllerConfig | None = None,
+ logger: PageLogger | None = None,
+ ) -> None:
+ if logger is None:
+ logger = PageLogger({"level": "DEBUG"})
+ self.logger = logger
+ self.logger.enter_function()
+
+ # Apply default settings and any config overrides.
+ self.max_train_trials = 10
+ self.max_test_trials = 3
+ memory_bank_config = None
+ if config is not None:
+ self.max_train_trials = config.get("max_train_trials", self.max_train_trials)
+ self.max_test_trials = config.get("max_test_trials", self.max_test_trials)
+ memory_bank_config = config.get("TaskCentricMemoryBank", memory_bank_config)
+
+ self.client = client
+ self.task_assignment_callback = task_assignment_callback
+ self.prompter = Prompter(client, logger)
+ self.memory_bank = TaskCentricMemoryBank(reset=reset, config=memory_bank_config, logger=logger)
+ self.grader = Grader(client, logger)
+ self.logger.leave_function()
+
+ def reset_memory(self) -> None:
+ """
+ Empties the memory bank in RAM and on disk.
+ """
+ self.memory_bank.reset()
+
+ async def train_on_task(self, task: str, expected_answer: str) -> None:
+ """
+ Repeatedly assigns a task to the agent, and tries to learn from failures by creating useful insights as memories.
+ """
+ self.logger.enter_function()
+ self.logger.info("Iterate on the task, possibly discovering a useful new insight.\n")
+ _, insight = await self._iterate_on_task(task, expected_answer)
+ if insight is None:
+ self.logger.info("No useful insight was discovered.\n")
+ else:
+ self.logger.info("A new insight was created:\n{}".format(insight))
+ await self.add_memo(insight, task)
+ self.logger.leave_function()
+
+ async def test_on_task(self, task: str, expected_answer: str, num_trials: int = 1) -> Tuple[str, int, int]:
+ """
+ Assigns a task to the agent, along with any relevant memos retrieved from memory.
+ """
+ self.logger.enter_function()
+ assert self.task_assignment_callback is not None
+ response = ""
+ num_successes = 0
+
+ for trial in range(num_trials):
+ self.logger.info("\n----- TRIAL {} -----\n".format(trial + 1))
+ task_plus_insights = task
+
+ # Try to retrieve any relevant memories from the DB.
+ filtered_memos = await self.retrieve_relevant_memos(task)
+ filtered_insights = [memo.insight for memo in filtered_memos]
+ if len(filtered_insights) > 0:
+ self.logger.info("Relevant insights were retrieved from memory.\n")
+ memory_section = self._format_memory_section(filtered_insights)
+ if len(memory_section) > 0:
+ task_plus_insights = task + "\n\n" + memory_section
+
+ # Attempt to solve the task.
+ self.logger.info("Try to solve the task.\n")
+ response, _ = await self.task_assignment_callback(task_plus_insights)
+
+ # Check if the response is correct.
+ response_is_correct, extracted_answer = await self.grader.is_response_correct(
+ task, response, expected_answer
+ )
+ self.logger.info("Extracted answer: {}".format(extracted_answer))
+ if response_is_correct:
+ self.logger.info("Answer is CORRECT.\n")
+ num_successes += 1
+ else:
+ self.logger.info("Answer is INCORRECT.\n")
+
+ # Calculate the success rate as a percentage, rounded to the nearest whole number.
+ self.logger.info("\nSuccess rate: {}%\n".format(round((num_successes / num_trials) * 100)))
+ self.logger.leave_function()
+ return response, num_successes, num_trials
+
+ async def add_memo(self, insight: str, task: None | str = None, index_on_both: bool = True) -> None:
+ """
+ Adds one insight to the memory bank, using the task (if provided) as context.
+ """
+ self.logger.enter_function()
+
+ generalized_task = ""
+ if task is not None:
+ self.logger.info("\nGIVEN TASK:")
+ self.logger.info(task)
+ # Generalize the task.
+ generalized_task = await self.prompter.generalize_task(task)
+
+ self.logger.info("\nGIVEN INSIGHT:")
+ self.logger.info(insight)
+
+ # Get a list of topics from the insight and the task (if provided).
+ if task is None:
+ text_to_index = insight
+ self.logger.info("\nTOPICS EXTRACTED FROM INSIGHT:")
+ else:
+ if index_on_both:
+ text_to_index = generalized_task.strip() + "\n(Hint: " + insight + ")"
+ self.logger.info("\nTOPICS EXTRACTED FROM TASK AND INSIGHT COMBINED:")
+ else:
+ text_to_index = task
+ self.logger.info("\nTOPICS EXTRACTED FROM TASK:")
+
+ topics = await self.prompter.find_index_topics(text_to_index)
+ self.logger.info("\n".join(topics))
+ self.logger.info("")
+
+ # Add the insight to the memory bank.
+ self.memory_bank.add_memo(insight, topics, task)
+ self.logger.leave_function()
+
+ async def add_task_solution_pair_to_memory(self, task: str, solution: str) -> None:
+ """
+ Adds a task-solution pair to the memory bank, to be retrieved together later as a combined insight.
+ This is useful when the task-solution pair is an exemplar of solving a task related to some other task.
+ """
+ self.logger.enter_function()
+
+ self.logger.info("\nEXAMPLE TASK:")
+ self.logger.info(task)
+
+ self.logger.info("\nEXAMPLE SOLUTION:")
+ self.logger.info(solution)
+
+ # Get a list of topics from the task.
+ topics = await self.prompter.find_index_topics(task.strip())
+ self.logger.info("\nTOPICS EXTRACTED FROM TASK:")
+ self.logger.info("\n".join(topics))
+ self.logger.info("")
+
+ # Add the task and solution (as a combined insight) to the memory bank.
+ self.memory_bank.add_task_with_solution(task=task, solution=solution, topics=topics)
+ self.logger.leave_function()
+
+ async def retrieve_relevant_memos(self, task: str) -> List[Memo]:
+ """
+ Retrieves any memos from memory that seem relevant to the task.
+ """
+ self.logger.enter_function()
+
+ if self.memory_bank.contains_memos():
+ self.logger.info("\nCURRENT TASK:")
+ self.logger.info(task)
+
+ # Get a list of topics from the generalized task.
+ generalized_task = await self.prompter.generalize_task(task)
+ task_topics = await self.prompter.find_index_topics(generalized_task)
+ self.logger.info("\nTOPICS EXTRACTED FROM TASK:")
+ self.logger.info("\n".join(task_topics))
+ self.logger.info("")
+
+ # Retrieve relevant memos from the memory bank.
+ memo_list = self.memory_bank.get_relevant_memos(topics=task_topics)
+
+ # Apply a final validation stage to keep only the memos that the LLM concludes are sufficiently relevant.
+ validated_memos: List[Memo] = []
+ for memo in memo_list:
+ if await self.prompter.validate_insight(memo.insight, task):
+ validated_memos.append(memo)
+
+ self.logger.info("\n{} VALIDATED MEMOS".format(len(validated_memos)))
+ for memo in validated_memos:
+ if memo.task is not None:
+ self.logger.info("\n TASK: {}".format(memo.task))
+ self.logger.info("\n INSIGHT: {}".format(memo.insight))
+ else:
+ self.logger.info("\nNO SUFFICIENTLY RELEVANT MEMOS WERE FOUND IN MEMORY")
+ validated_memos = []
+
+ self.logger.leave_function()
+ return validated_memos
+
+ def _format_memory_section(self, memories: List[str]) -> str:
+ """
+ Formats a list of memories as a section for appending to a task description.
+ """
+ memory_section = ""
+ if len(memories) > 0:
+ memory_section = "## Important insights that may help solve tasks like this\n"
+ for mem in memories:
+ memory_section += "- " + mem + "\n"
+ return memory_section
+
+ async def _test_for_failure(
+ self, task: str, task_plus_insights: str, expected_answer: str
+ ) -> Tuple[bool, str, str]:
+ """
+ Attempts to solve the given task multiple times to find a failure case to learn from.
+ """
+ self.logger.enter_function()
+ self.logger.info("\nTask description, including any insights: {}".format(task_plus_insights))
+ self.logger.info("\nExpected answer: {}\n".format(expected_answer))
+
+ assert self.task_assignment_callback is not None
+ failure_found = False
+ response, work_history = "", ""
+
+ for trial in range(self.max_test_trials):
+ self.logger.info("\n----- TRIAL {} -----\n".format(trial + 1))
+
+ # Attempt to solve the task.
+ self.logger.info("Try to solve the task.")
+ response, work_history = await self.task_assignment_callback(task_plus_insights)
+
+ response_is_correct, extracted_answer = await self.grader.is_response_correct(
+ task, response, expected_answer
+ )
+ self.logger.info("Extracted answer: {}".format(extracted_answer))
+ if response_is_correct:
+ self.logger.info("Answer is CORRECT.\n")
+ else:
+ self.logger.info("Answer is INCORRECT.\n Stop testing, and return the details of the failure.\n")
+ failure_found = True
+ break
+
+ self.logger.leave_function()
+ return failure_found, response, work_history
+
+ async def _iterate_on_task(self, task: str, expected_answer: str) -> Tuple[str, None | str]:
+ """
+ Repeatedly assigns a task to the agent, and tries to learn from failures by creating useful insights as memories.
+ """
+ self.logger.enter_function()
+ self.logger.info("\nTask description: {}".format(task))
+ self.logger.info("\nExpected answer: {}\n".format(expected_answer))
+
+ final_response = ""
+ old_memos = await self.retrieve_relevant_memos(task)
+ old_insights = [memo.insight for memo in old_memos]
+ new_insights: List[str] = []
+ last_insight = None
+ insight = None
+ successful_insight = None
+
+ # Loop until success (or timeout) while learning from failures.
+ for trial in range(1, self.max_train_trials + 1):
+ self.logger.info("\n----- TRAIN TRIAL {} -----\n".format(trial))
+ task_plus_insights = task
+
+ # Add any new insights we've accumulated so far.
+ if last_insight is not None:
+ memory_section = self._format_memory_section(old_insights + [last_insight])
+ else:
+ memory_section = self._format_memory_section(old_insights)
+ if len(memory_section) > 0:
+ task_plus_insights += "\n\n" + memory_section
+
+ # Can we find a failure case to learn from?
+ failure_found, response, work_history = await self._test_for_failure(
+ task, task_plus_insights, expected_answer
+ )
+ if not failure_found:
+ # No. Time to exit the loop.
+ self.logger.info("\nResponse is CORRECT.\n Stop looking for insights.\n")
+ # Was this the first trial?
+ if trial == 1:
+ # Yes. We should return the successful response, and no insight.
+ final_response = response
+ else:
+ # No. We learned a successful insight, which should be returned.
+ successful_insight = insight
+ break
+
+ # Will we try again?
+ if trial == self.max_train_trials:
+ # No. We're out of training trials.
+ self.logger.info("\nNo more trials will be attempted.\n")
+ break
+
+ # Try to learn from this failure.
+ self.logger.info("\nResponse is INCORRECT. Try to learn from this failure.\n")
+ insight = await self.prompter.learn_from_failure(
+ task, memory_section, response, expected_answer, work_history
+ )
+ self.logger.info("\nInsight: {}\n".format(insight))
+ new_insights.append(insight)
+ last_insight = insight
+
+ # Return the answer from the last loop.
+ self.logger.info("\n{}\n".format(final_response))
+ self.logger.leave_function()
+ return final_response, successful_insight
+
+ async def _append_any_relevant_memories(self, task: str) -> str:
+ """
+ Appends any relevant memories to the task description.
+ """
+ self.logger.enter_function()
+
+ filtered_memos = await self.retrieve_relevant_memos(task)
+ filtered_insights = [memo.insight for memo in filtered_memos]
+ if len(filtered_insights) > 0:
+ self.logger.info("Relevant insights were retrieved from memory.\n")
+ memory_section = self._format_memory_section(filtered_insights)
+ if len(memory_section) > 0:
+ task = task + "\n\n" + memory_section
+
+ self.logger.leave_function()
+ return task
+
+ async def assign_task(self, task: str, use_memory: bool = True, should_await: bool = True) -> str:
+ """
+ Assigns a task to some agent through the task_assignment_callback, along with any relevant memories.
+ """
+ self.logger.enter_function()
+
+ assert self.task_assignment_callback is not None
+
+ if use_memory:
+ task = await self._append_any_relevant_memories(task)
+
+ # Attempt to solve the task.
+ self.logger.info("Try to solve the task.\n")
+ assert should_await
+ response, _ = await self.task_assignment_callback(task)
+
+ self.logger.leave_function()
+ return response
+
+ async def consider_memo_storage(self, text: str) -> str | None:
+ """
+ Tries to extract any advice from the given text and add it to memory.
+ """
+ self.logger.enter_function()
+
+ advice = await self.prompter.extract_advice(text)
+ self.logger.info("Advice: {}".format(advice))
+ if advice is not None:
+ await self.add_memo(insight=advice)
+
+ self.logger.leave_function()
+ return advice
+
+ async def handle_user_message(self, text: str, should_await: bool = True) -> str:
+ """
+ Handles a user message by extracting any advice as an insight to be stored in memory, and then calling assign_task().
+ """
+ self.logger.enter_function()
+
+ # Check for advice.
+ advice = await self.consider_memo_storage(text)
+
+ # Assign the task through the task_assignment_callback, using memory only if no advice was just provided.
+ response = await self.assign_task(text, use_memory=(advice is None), should_await=should_await)
+
+ self.logger.leave_function()
+ return response
+
+ async def process_user_message(self, text: str) -> str:
+ """
+ Processes a user message by extracting any advice as an insight to be stored in memory,
+ and returns the original user message with any other relevant memories appended.
+ """
+ self.logger.enter_function()
+
+ # Append any relevant memories to the user message.
+ expanded_text = await self._append_any_relevant_memories(text)
+
+ # Check for advice to add to memory for later turns.
+ await self.consider_memo_storage(text)
+
+ self.logger.leave_function()
+ return expanded_text
diff --git a/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/__init__.py b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/__init__.py
new file mode 100644
index 000000000000..e6cc092ba6e8
--- /dev/null
+++ b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/__init__.py
@@ -0,0 +1,6 @@
+from .apprentice import Apprentice
+from .grader import Grader
+from .page_logger import PageLogger
+from .teachability import Teachability
+
+__all__ = ["Apprentice", "PageLogger", "Grader", "Teachability"]
diff --git a/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/_functions.py b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/_functions.py
new file mode 100644
index 000000000000..f1c9aed6f41c
--- /dev/null
+++ b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/_functions.py
@@ -0,0 +1,96 @@
+import hashlib
+import os
+from typing import List, Tuple, Union
+
+from autogen_core import FunctionCall, Image
+from autogen_core.models import FunctionExecutionResult
+
+# Convenience types
+UserContent = Union[str, List[Union[str, Image]]]
+AssistantContent = Union[str, List[FunctionCall]]
+FunctionExecutionContent = List[FunctionExecutionResult]
+SystemContent = str
+MessageContent = UserContent | AssistantContent | SystemContent | FunctionExecutionContent
+
+
+def message_content_to_str(message_content: MessageContent | None) -> str:
+ """
+ Converts the message content to a string.
+ """
+ if message_content is None:
+ return ""
+ elif isinstance(message_content, str):
+ return message_content
+ elif isinstance(message_content, List):
+ converted: List[str] = list()
+ for item in message_content:
+ if isinstance(item, str):
+ converted.append(item)
+ elif isinstance(item, Image):
+ converted.append("")
+ else:
+ converted.append(str(item).rstrip())
+ return "\n".join(converted)
+ else:
+ raise AssertionError("Unexpected response type.")
+
+
+def text_from_user_content(user_content: UserContent) -> str:
+ """
+ Extracts just the text from the user content.
+ """
+ if isinstance(user_content, str):
+ return user_content
+ elif isinstance(user_content, List):
+ text_list: List[str] = list()
+ for item in user_content:
+ if isinstance(item, str):
+ text_list.append(item.rstrip())
+ return "\n\n".join(text_list)
+ else:
+ raise AssertionError("Unexpected response type.")
+
+
+def single_image_from_user_content(user_content: UserContent) -> Union[Image, None]:
+ """
+ Extracts a single image from the user content.
+ """
+ image_to_return = None
+ if isinstance(user_content, str):
+ return None
+ elif isinstance(user_content, List):
+ for item in user_content:
+ if isinstance(item, Image):
+ assert image_to_return is None, "Only one image is currently allowed in the user content."
+ image_to_return = item
+ else:
+ raise AssertionError("Unexpected response type.")
+ return image_to_return
+
+
+def hash_directory(directory: str, hash_algo: str = "sha256") -> Tuple[str, int, int]:
+ """Computes a hash representing the state of a directory, including its structure and file contents."""
+ hash_func = hashlib.new(hash_algo)
+
+ # Also count the number of files and sub-directories
+ num_files = 0
+ num_subdirs = 0
+
+ for root, dirs, files in sorted(os.walk(directory)): # Ensure order for consistent hashing
+ num_files += len(files)
+ num_subdirs += len(dirs)
+ for dir_name in sorted(dirs):
+ hash_func.update(dir_name.encode()) # Hash directory names
+
+ for file_name in sorted(files):
+ file_path = os.path.join(root, file_name)
+ hash_func.update(file_name.encode()) # Hash file names
+
+ try:
+ with open(file_path, "rb") as f:
+ while chunk := f.read(4096): # Read in chunks
+ hash_func.update(chunk)
+ except Exception:
+ pass
+
+ return hash_func.hexdigest(), num_files, num_subdirs
diff --git a/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/apprentice.py b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/apprentice.py
new file mode 100644
index 000000000000..221b685584e8
--- /dev/null
+++ b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/apprentice.py
@@ -0,0 +1,262 @@
+import random
+import time
+from typing import TYPE_CHECKING, Any, List, Sequence, Tuple, TypedDict
+
+from autogen_agentchat.agents import AssistantAgent
+from autogen_agentchat.base import TaskResult
+from autogen_agentchat.messages import AgentEvent, ChatMessage, TextMessage
+from autogen_core.models import (
+ ChatCompletionClient,
+ LLMMessage,
+ SystemMessage,
+ UserMessage,
+)
+
+from .page_logger import PageLogger
+
+if TYPE_CHECKING:
+ from ..task_centric_memory_controller import TaskCentricMemoryControllerConfig
+
+
+# Following the nested-config pattern, this TypedDict minimizes code changes by encapsulating
+# the settings that change frequently, as when loading many settings from a single YAML file.
+class ApprenticeConfig(TypedDict, total=False):
+ name_of_agent_or_team: str
+ disable_prefix_caching: bool
+ TaskCentricMemoryController: "TaskCentricMemoryControllerConfig"
+
+
+class Apprentice:
+ """
+ A minimal wrapper combining task-centric memory with an agent or team.
+ Applications may use the Apprentice class, or they may directly instantiate
+ and call the Task Centric Memory Controller using this class as an example.
+
+ Args:
+ client: The client to call the model.
+ config: An optional dict that can be used to override the following values:
+
+ - name_of_agent_or_team: The name of the target agent or team for assigning tasks to.
+ - disable_prefix_caching: True to disable prefix caching by prepending random ints to the first message.
+ - TaskCentricMemoryController: A config dict passed to TaskCentricMemoryController.
+
+ logger: An optional logger. If None, a default logger will be created.
+ """
+
+ def __init__(
+ self,
+ client: ChatCompletionClient,
+ config: ApprenticeConfig | None = None,
+ logger: PageLogger | None = None,
+ ) -> None:
+ if logger is None:
+ logger = PageLogger({"level": "DEBUG"})
+ self.logger = logger
+
+ # Apply default settings and any config overrides.
+ self.name_of_agent_or_team = "AssistantAgent"
+ self.disable_prefix_caching = False
+ memory_controller_config = None
+ if config is not None:
+ self.name_of_agent_or_team = config.get("name_of_agent_or_team", self.name_of_agent_or_team)
+ self.disable_prefix_caching = config.get("disable_prefix_caching", self.disable_prefix_caching)
+ memory_controller_config = config.get("TaskCentricMemoryController", memory_controller_config)
+
+ self.client = client
+ if self.disable_prefix_caching:
+ self.rand = random.Random()
+ self.rand.seed(int(time.time() * 1000))
+
+ # Create the TaskCentricMemoryController, which creates the TaskCentricMemoryBank.
+ from ..task_centric_memory_controller import TaskCentricMemoryController
+
+ self.memory_controller = TaskCentricMemoryController(
+ reset=True,
+ client=self.client,
+ task_assignment_callback=self.assign_task_to_agent_or_team,
+ config=memory_controller_config,
+ logger=self.logger,
+ )
+
+ def reset_memory(self) -> None:
+ """
+ Resets the memory bank.
+ """
+ self.memory_controller.reset_memory()
+
+ async def handle_user_message(self, text: str, should_await: bool = True) -> str:
+ """
+ Handles a user message, extracting any advice and assigning a task to the agent.
+ """
+ self.logger.enter_function()
+
+ # Pass the user message through to the memory controller.
+ response = await self.memory_controller.handle_user_message(text, should_await)
+
+ self.logger.leave_function()
+ return response
+
+ async def add_task_solution_pair_to_memory(self, task: str, solution: str) -> None:
+ """
+ Adds a task-solution pair to the memory bank, to be retrieved together later as a combined insight.
+ This is useful when the insight is a demonstration of how to solve a given type of task.
+ """
+ self.logger.enter_function()
+
+ # Pass the task and solution through to the memory controller.
+ await self.memory_controller.add_task_solution_pair_to_memory(task, solution)
+
+ self.logger.leave_function()
+
+ async def assign_task(self, task: str, use_memory: bool = True, should_await: bool = True) -> str:
+ """
+ Assigns a task to the agent, along with any relevant insights/memories.
+ """
+ self.logger.enter_function()
+
+ # Pass the task through to the memory controller.
+ response = await self.memory_controller.assign_task(task, use_memory, should_await)
+
+ self.logger.leave_function()
+ return response
+
+ async def train_on_task(self, task: str, expected_answer: str) -> None:
+ """
+ Repeatedly assigns a task to the completion agent, and tries to learn from failures by creating useful insights as memories.
+ """
+ self.logger.enter_function()
+
+ # Pass the task through to the memory controller.
+ await self.memory_controller.train_on_task(task, expected_answer)
+
+ self.logger.leave_function()
+
+ async def assign_task_to_agent_or_team(self, task: str) -> Tuple[str, str]:
+ """
+ Passes the given task to the target agent or team.
+ """
+ self.logger.enter_function()
+
+ # Pass the task through.
+ if self.name_of_agent_or_team == "MagenticOneGroupChat":
+ response, work_history = await self._assign_task_to_magentic_one(task)
+ elif self.name_of_agent_or_team == "AssistantAgent":
+ response, work_history = await self._assign_task_to_assistant_agent(task)
+ else:
+ raise AssertionError("Invalid base agent")
+
+ self.logger.leave_function()
+ return response, work_history
+
+ async def _assign_task_to_assistant_agent(self, task: str) -> Tuple[Any, Any]:
+ """
+ Passes the given task to a newly created AssistantAgent with a generic 6-step system prompt.
+ """
+ self.logger.enter_function()
+ self.logger.info(task)
+
+ system_message_content = """You are a helpful and thoughtful assistant.
+In responding to every user message, you follow the same multi-step process given here:
+1. Explain your understanding of the user message in detail, covering all the important points.
+2. List as many possible responses as you can think of.
+3. Carefully list and weigh the pros and cons (if any) of each possible response.
+4. Critique the pros and cons above, looking for any flaws in your reasoning. But don't make up flaws that don't exist.
+5. Decide on the best response, looping back to step 1 if none of the responses are satisfactory.
+6. Finish by providing your final response in the particular format requested by the user."""
+
+ if self.disable_prefix_caching:
+ # Prepend a random int to disable prefix caching.
+ random_str = "({})\n\n".format(self.rand.randint(0, 1000000))
+ system_message_content = random_str + system_message_content
+
+ system_message: LLMMessage
+ if self.client.model_info["family"] == "o1":
+ # No system message allowed, so pass it as the first user message.
+ system_message = UserMessage(content=system_message_content, source="User")
+ else:
+ # System message allowed.
+ system_message = SystemMessage(content=system_message_content)
+
+ user_message: LLMMessage = UserMessage(content=task, source="User")
+ system_message_list: List[LLMMessage] = [system_message]
+ user_message_list: List[LLMMessage] = [user_message]
+ input_messages: List[LLMMessage] = system_message_list + user_message_list
+
+ assistant_agent = AssistantAgent(
+ "assistant_agent",
+ self.client,
+ system_message=system_message_content,
+ )
+
+ # Get the agent's response to the task.
+ task_result: TaskResult = await assistant_agent.run(task=TextMessage(content=task, source="User"))
+ messages: Sequence[AgentEvent | ChatMessage] = task_result.messages
+ message: AgentEvent | ChatMessage = messages[-1]
+ response_str = message.content
+
+ # Log the model call
+ self.logger.log_model_task(
+ summary="Ask the model to complete the task", input_messages=input_messages, task_result=task_result
+ )
+ self.logger.info("\n----- RESPONSE -----\n\n{}\n".format(response_str))
+
+ # Use the response as the work history as well.
+ work_history = response_str
+
+ self.logger.leave_function()
+ return response_str, work_history
+
+ async def _assign_task_to_magentic_one(self, task: str) -> Tuple[str, str]:
+ """
+ Instantiates a MagenticOneGroupChat team, and passes the given task to it.
+ """
+ self.logger.enter_function()
+ self.logger.info(task)
+
+ general_agent = AssistantAgent(
+ "general_agent",
+ self.client,
+ description="A general GPT-4o AI assistant capable of performing a variety of tasks.",
+ )
+
+ from autogen_ext.agents.web_surfer import MultimodalWebSurfer
+
+ web_surfer = MultimodalWebSurfer(
+ name="web_surfer",
+ model_client=self.client,
+ downloads_folder="logs",
+ debug_dir="logs",
+ to_save_screenshots=True,
+ )
+
+ from autogen_agentchat.teams import MagenticOneGroupChat
+
+ team = MagenticOneGroupChat(
+ [general_agent, web_surfer],
+ model_client=self.client,
+ max_turns=20,
+ )
+
+ # Get the team's response to the task.
+ task_result: TaskResult = await team.run(task=task)
+
+ assert isinstance(task_result, TaskResult)
+ messages = task_result.messages
+
+ response_str_list: List[str] = []
+ for message in messages:
+ content = message.content
+ if isinstance(content, str):
+ content_str = content
+ else:
+ content_str = "Not a string."
+ response_str_list.append(content_str)
+ response_str = "\n".join(response_str_list)
+
+ self.logger.info("\n----- RESPONSE -----\n\n{}\n".format(response_str))
+
+ # MagenticOne's response is the chat history, which we use here as the work history.
+ work_history = response_str
+
+ self.logger.leave_function()
+ return response_str, work_history
diff --git a/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/grader.py b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/grader.py
new file mode 100644
index 000000000000..7bf0c1cb279c
--- /dev/null
+++ b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/grader.py
@@ -0,0 +1,182 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, List, Tuple, Union
+
+from autogen_core import Image
+from autogen_core.models import (
+ AssistantMessage,
+ ChatCompletionClient,
+ CreateResult,
+ LLMMessage,
+ SystemMessage,
+ UserMessage,
+)
+
+from ._functions import UserContent
+from .page_logger import PageLogger
+
+if TYPE_CHECKING:
+ from .apprentice import Apprentice
+
+
+class Grader:
+ """
+ Runs basic tests, and determines task success without limitation to string matches.
+
+ Args:
+ client: The client to call the model.
+ logger: An optional logger. If None, no logging will be performed.
+ """
+
+ def __init__(self, client: ChatCompletionClient, logger: PageLogger | None = None) -> None:
+ if logger is None:
+ logger = PageLogger() # Nothing will be logged by this object.
+ self.logger = logger
+ self.client = client
+
+ # Check whether to report results to the client.
+ self.report_results = hasattr(self.client, "report_result")
+
+ # Create the chat history
+ self._chat_history: List[LLMMessage] = []
+
+ async def test_apprentice(
+ self,
+ apprentice: Apprentice,
+ task_description: str,
+ expected_answer: str,
+ num_trials: int,
+ use_memory: bool,
+ client: ChatCompletionClient,
+ ) -> Tuple[int, int]:
+ self.logger.enter_function()
+
+ self.logger.info("Testing the apprentice on the given task.\n")
+
+ num_successes = 0
+
+ for trial in range(num_trials):
+ self.logger.info("\n----- TRIAL {} -----\n".format(trial + 1))
+ self.logger.info("Try to solve the task.\n")
+ response = await apprentice.assign_task(task_description, use_memory=use_memory)
+ response_is_correct, extracted_answer = await self.is_response_correct(
+ task_description, response, expected_answer
+ )
+ self.logger.info("Extracted answer: {}".format(extracted_answer))
+ if response_is_correct:
+ self.logger.info("Answer is CORRECT.\n")
+ num_successes += 1
+ else:
+ self.logger.info("Answer is INCORRECT.\n")
+
+ self.logger.info("\nSuccess rate: {}%\n".format(round((num_successes / num_trials) * 100)))
+ self.logger.leave_function()
+ return num_successes, num_trials
+
+ async def call_model(
+ self,
+ summary: str,
+ user_content: UserContent,
+ system_message_content: str | None = None,
+ keep_these_messages: bool = True,
+ ) -> str:
+ """
+ Calls the model client with the given input and returns the response.
+ """
+ # Prepare the input message list
+ if system_message_content is None:
+ system_message_content = "You are a helpful assistant."
+ system_message: LLMMessage
+ if self.client.model_info["family"] == "o1":
+ # No system message allowed, so pass it as the first user message.
+ system_message = UserMessage(content=system_message_content, source="User")
+ else:
+ # System message allowed.
+ system_message = SystemMessage(content=system_message_content)
+ user_message = UserMessage(content=user_content, source="User")
+ input_messages = [system_message] + self._chat_history + [user_message]
+
+ # Call the model.
+ response = await self.client.create(input_messages)
+ assert isinstance(response, CreateResult)
+ response_string = response.content
+ assert isinstance(response_string, str)
+ response_message = AssistantMessage(content=response_string, source="Assistant")
+ assert isinstance(response_message, AssistantMessage)
+
+ # Log the model call
+ self.logger.log_model_call(summary=summary, input_messages=input_messages, response=response)
+
+ # Manage the chat history
+ if keep_these_messages:
+ self._chat_history.append(user_message)
+ self._chat_history.append(response_message)
+
+ # Return the response as a string
+ return response_string
+
+ def _clear_history(self) -> None:
+ """
+ Empties the message list containing the chat history.
+ """
+ self._chat_history = []
+
+ async def is_response_correct(
+ self, task_description: str, response_to_be_graded: str, correct_answer: str
+ ) -> Tuple[bool, str]:
+ """
+ Determines whether the response is equivalent to the task's correct answer.
+ """
+ self.logger.enter_function()
+
+ sys_message = """You are a helpful and thoughtful assistant."""
+
+ # Ask the model to extract the answer from the response.
+ user_message: List[Union[str, Image]] = []
+ user_message.append("""Your job is to extract a possible answer to the following question from the given text.
+- First review the following task.
+- Then review the text that follows, which may an answer, plus reasoning that led to the answer.
+- Do not attempt to actually solve the task yourself.
+- Don't try to judge whether the reasoning steps were correct.
+- Simply respond by summarizing the answer described in the text, omitting any other parts of the text.
+- If no answer is present can be extracted from the text, simply reply "None".""")
+ user_message.append("\n# Task description")
+ user_message.append(task_description)
+ user_message.append("\n# Text that may contain an answer")
+ user_message.append(response_to_be_graded)
+ user_message_arg: UserContent = user_message
+ self._clear_history()
+ extracted_answer = await self.call_model(
+ summary="Ask the model to extract the answer",
+ system_message_content=sys_message,
+ user_content=user_message_arg,
+ )
+ self.logger.info("Extracted answer: " + extracted_answer)
+
+ # Ask the model to check the answer for correctness.
+ user_message = [
+ """Your job is to decide whether a given answer to a task is correct or not.
+- You will be given the task description and the correct, gold-standard answer, along with the answer to be graded.
+- In general, an answer is correct if it is equivalent to the correct answer.
+- Specifically, the given answer must contain the important information from the correct answer, and must not in any way contradict the correct answer.
+- Ignore any differences of grammar, spelling mistakes, punctuation, capitalization, formatting, or extra commentary.
+- An answer should be considered correct if it omits information that is clearly inferred.
+ - For instance, if the correct answer is "Paris, France", the answer "Paris" should be considered correct.
+- Respond with a single character: '1' if the answer to be graded is correct", '0' if not."""
+ ]
+ user_message.append("\n# Task description")
+ user_message.append(task_description)
+ user_message.append("\n# Correct answer")
+ user_message.append(correct_answer)
+ user_message.append("\n# Answer to be graded")
+ user_message.append(extracted_answer)
+ self._clear_history()
+ decision = await self.call_model(
+ summary="Ask the model to check the answer for correctness",
+ system_message_content=sys_message,
+ user_content=user_message,
+ )
+ self.logger.info("Decision: " + decision)
+
+ self.logger.leave_function()
+ return decision == "1", extracted_answer
diff --git a/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/page_logger.py b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/page_logger.py
new file mode 100644
index 000000000000..57cbd207d36a
--- /dev/null
+++ b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/page_logger.py
@@ -0,0 +1,522 @@
+import inspect
+import json
+import os
+import shutil
+from typing import Dict, List, Optional, Sequence, TypedDict
+
+from autogen_agentchat.base import TaskResult
+from autogen_agentchat.messages import AgentEvent, ChatMessage
+from autogen_core import Image
+from autogen_core.models import (
+ AssistantMessage,
+ CreateResult,
+ FunctionExecutionResultMessage,
+ LLMMessage,
+ RequestUsage,
+ SystemMessage,
+ UserMessage,
+)
+
+from ._functions import MessageContent, hash_directory
+
+
+def _html_opening(file_title: str, finished: bool = False) -> str:
+ """
+ Returns the opening text of a simple HTML file.
+ """
+ refresh_tag = '' if not finished else ""
+ st = f"""
+
+
+
+ {refresh_tag}
+ {file_title}
+
+
+ """
+ return st
+
+
+def _html_closing() -> str:
+ """
+ Return the closing text of a simple HTML file.
+ """
+ return """"""
+
+
+# Following the nested-config pattern, this TypedDict minimizes code changes by encapsulating
+# the settings that change frequently, as when loading many settings from a single YAML file.
+class PageLoggerConfig(TypedDict, total=False):
+ level: str
+ path: str
+
+
+class PageLogger:
+ """
+ Logs text and images to a set of HTML pages, one per function/method, linked to each other in a call tree.
+
+ Args:
+ config: An optional dict that can be used to override the following values:
+
+ - level: The logging level, one of DEBUG, INFO, WARNING, ERROR, CRITICAL, or NONE.
+ - path: The path to the directory where the log files will be written.
+ """
+
+ def __init__(self, config: PageLoggerConfig | None = None) -> None:
+ self.levels = {
+ "DEBUG": 10,
+ "INFO": 20,
+ "WARNING": 30,
+ "ERROR": 40,
+ "CRITICAL": 50,
+ "NONE": 100,
+ }
+
+ # Apply default settings and any config overrides.
+ level_str = "NONE" # Default to no logging at all.
+ self.log_dir = "./pagelogs/default"
+ if config is not None:
+ level_str = config.get("level", level_str)
+ self.log_dir = config.get("path", self.log_dir)
+ self.level = self.levels[level_str]
+ self.log_dir = os.path.expanduser(self.log_dir)
+
+ # If the logging level is set to NONE or higher, don't log anything.
+ if self.level >= self.levels["NONE"]:
+ return
+
+ self.page_stack = PageStack()
+ self.pages: List[Page] = []
+ self.last_page_id = 0
+ self.name = "0 Call Tree"
+ self._create_run_dir()
+ self.flush()
+
+ def __del__(self) -> None:
+ # Writes a hash of the log directory to a file for change detection.
+
+ # Do nothing if the app is being forced to exit early.
+ if self.page_stack.size() > 0:
+ return
+
+ # Compute the hash.
+ hash_str, num_files, num_subdirs = hash_directory(self.log_dir)
+ filename = "00 hash-{}.txt".format(hash_str[-5:])
+ hash_path = os.path.join(self.log_dir, filename)
+
+ # Write the hash to a file.
+ with open(hash_path, "w") as f:
+ f.write(hash_str)
+ f.write("\n")
+ f.write("{} files\n".format(num_files))
+ f.write("{} subdirectories\n".format(num_subdirs))
+
+ self.flush(finished=True)
+
+ @staticmethod
+ def _decorate_text(text: str, color: str, weight: str = "bold", demarcate: bool = False) -> str:
+ """
+ Returns a string of text with HTML styling for weight and color.
+ """
+ if demarcate:
+ text = f"<<<<< {text} >>>>>"
+ return f'{text}'
+
+ @staticmethod
+ def _link_to_image(image_path: str, description: str) -> str:
+ """
+ Returns an HTML string defining a thumbnail link to an image.
+ """
+ # To avoid a bug in heml rendering aht displays underscores to the left of thumbnails,
+ # define the following string on a single line.
+ link = f"""
"""
+ return link
+
+ def _get_next_page_id(self) -> int:
+ """Returns the next page id and increments the counter."""
+ self.last_page_id += 1
+ return self.last_page_id
+
+ def _create_run_dir(self) -> None:
+ """Creates a fresh log directory."""
+ if os.path.exists(self.log_dir):
+ shutil.rmtree(self.log_dir)
+ os.makedirs(self.log_dir)
+
+ def _add_page(self, summary: str, show_in_call_tree: bool = True, finished: bool = True) -> "Page":
+ """
+ Adds a new page to the log.
+ """
+ page = Page(
+ page_logger=self,
+ index=self._get_next_page_id(),
+ summary=summary,
+ indent_level=len(self.page_stack.stack),
+ show_in_call_tree=show_in_call_tree,
+ finished=finished,
+ )
+ self.pages.append(page)
+ self.flush()
+ if len(self.page_stack.stack) > 0:
+ # Insert a link to the new page into the calling page.
+ self.info("\n" + page.full_link)
+ return page
+
+ def _log_text(self, text: str) -> None:
+ """
+ Adds text to the current page.
+ """
+ page = self.page_stack.top()
+ if page is not None:
+ page.add_lines(text, flush=True)
+
+ def debug(self, line: str) -> None:
+ """
+ Adds DEBUG text to the current page if debugging level <= DEBUG.
+ """
+ if self.level <= self.levels["DEBUG"]:
+ self._log_text(line)
+
+ def info(self, line: str) -> None:
+ """
+ Adds INFO text to the current page if debugging level <= INFO.
+ """
+ if self.level <= self.levels["INFO"]:
+ self._log_text(line)
+
+ def warning(self, line: str) -> None:
+ """
+ Adds WARNING text to the current page if debugging level <= WARNING.
+ """
+ if self.level <= self.levels["WARNING"]:
+ self._log_text(line)
+
+ def error(self, line: str) -> None:
+ """
+ Adds ERROR text to the current page if debugging level <= ERROR.
+ """
+ if self.level <= self.levels["ERROR"]:
+ self._log_text(line)
+
+ def critical(self, line: str) -> None:
+ """
+ Adds CRITICAL text to the current page if debugging level <= CRITICAL.
+ """
+ if self.level <= self.levels["CRITICAL"]:
+ self._log_text(line)
+
+ def _message_source(self, message: LLMMessage) -> str:
+ """
+ Returns a decorated string indicating the source of a message.
+ """
+ source = "UNKNOWN"
+ color = "black"
+ if isinstance(message, SystemMessage):
+ source = "SYSTEM"
+ color = "purple"
+ elif isinstance(message, UserMessage):
+ source = "USER"
+ color = "blue"
+ elif isinstance(message, AssistantMessage):
+ source = "ASSISTANT"
+ color = "green"
+ elif isinstance(message, FunctionExecutionResultMessage):
+ source = "FUNCTION"
+ color = "red"
+ return self._decorate_text(source, color, demarcate=True)
+
+ def _format_message_content(self, message_content: MessageContent) -> str:
+ """
+ Formats the message content for logging.
+ """
+ # Start by converting the message content to a list of strings.
+ content_list: List[str] = []
+ content = message_content
+ if isinstance(content, str):
+ content_list.append(content)
+ elif isinstance(content, list):
+ for item in content:
+ if isinstance(item, str):
+ content_list.append(item.rstrip())
+ elif isinstance(item, Image):
+ # Save the image to disk.
+ image_filename = str(self._get_next_page_id()) + " image.jpg"
+ image_path = os.path.join(self.log_dir, image_filename)
+ item.image.save(image_path)
+ # Add a link to the image.
+ content_list.append(self._link_to_image(image_filename, "message_image"))
+ elif isinstance(item, Dict):
+ # Add a dictionary to the log.
+ json_str = json.dumps(item, indent=4)
+ content_list.append(json_str)
+ else:
+ content_list.append(str(item).rstrip())
+ else:
+ content_list.append("")
+
+ # Convert the list of strings to a single string containing newline separators.
+ output = ""
+ for item in content_list:
+ output += f"\n{item}\n"
+ return output
+
+ def log_message_content(self, message_content: MessageContent, summary: str) -> None:
+ """
+ Adds a page containing the message's content, including any images.
+ """
+ if self.level > self.levels["INFO"]:
+ return None
+ page = self._add_page(summary=summary, show_in_call_tree=False)
+ self.page_stack.write_stack_to_page(page)
+ page.add_lines(self._format_message_content(message_content=message_content))
+ page.flush()
+
+ def _log_model_messages(
+ self, summary: str, input_messages: List[LLMMessage], response_str: str, usage: RequestUsage | None
+ ) -> Optional["Page"]:
+ """
+ Adds a page containing the messages to a model (including any input images) and its response.
+ """
+ page = self._add_page(summary=summary, show_in_call_tree=False)
+ self.page_stack.write_stack_to_page(page)
+
+ if usage is not None:
+ page.add_lines("{} prompt tokens".format(usage.prompt_tokens))
+ page.add_lines("{} completion tokens".format(usage.completion_tokens))
+ for m in input_messages:
+ page.add_lines("\n" + self._message_source(m))
+ page.add_lines(self._format_message_content(message_content=m.content))
+ page.add_lines("\n" + self._decorate_text("ASSISTANT RESPONSE", "green", demarcate=True))
+ page.add_lines("\n" + response_str + "\n")
+ page.flush()
+ return page
+
+ def log_model_call(
+ self, summary: str, input_messages: List[LLMMessage], response: CreateResult
+ ) -> Optional["Page"]:
+ """
+ Logs messages sent to a model and the TaskResult response to a new page.
+ """
+ if self.level > self.levels["INFO"]:
+ return None
+
+ response_str = response.content
+ if not isinstance(response_str, str):
+ response_str = "??"
+
+ page = self._log_model_messages(summary, input_messages, response_str, response.usage)
+ return page
+
+ def log_model_task(
+ self, summary: str, input_messages: List[LLMMessage], task_result: TaskResult
+ ) -> Optional["Page"]:
+ """
+ Logs messages sent to a model and the TaskResult response to a new page.
+ """
+ if self.level > self.levels["INFO"]:
+ return None
+
+ messages: Sequence[AgentEvent | ChatMessage] = task_result.messages
+ message = messages[-1]
+ response_str = message.content
+ if not isinstance(response_str, str):
+ response_str = "??"
+
+ if hasattr(message, "models_usage"):
+ usage: RequestUsage | None = message.models_usage
+ else:
+ usage = RequestUsage(prompt_tokens=0, completion_tokens=0)
+
+ page = self._log_model_messages(summary, input_messages, response_str, usage)
+ return page
+
+ def log_link_to_local_file(self, file_path: str) -> str:
+ """
+ Returns a link to a local file in the log.
+ """
+ file_name = os.path.basename(file_path)
+ link = f'{file_name}'
+ return link
+
+ def add_link_to_image(self, description: str, source_image_path: str) -> None:
+ """
+ Inserts a thumbnail link to an image to the page.
+ """
+ # Remove every character from the string 'description' that is not alphanumeric or a space.
+ description = "".join(e for e in description if e.isalnum() or e.isspace())
+ target_image_filename = str(self._get_next_page_id()) + " - " + description
+ # Copy the image to the log directory.
+ local_image_path = os.path.join(self.log_dir, target_image_filename)
+ shutil.copyfile(source_image_path, local_image_path)
+ self._log_text("\n" + description)
+ self._log_text(self._link_to_image(target_image_filename, description))
+
+ def flush(self, finished: bool = False) -> None:
+ """
+ Writes the current state of the log to disk.
+ """
+ if self.level > self.levels["INFO"]:
+ return
+ # Create a call tree of the log.
+ call_tree_path = os.path.join(self.log_dir, self.name + ".html")
+ with open(call_tree_path, "w") as f:
+ f.write(_html_opening("0 Call Tree", finished=finished))
+ f.write(f"{self.name}
")
+ f.write("\n")
+ for page in self.pages:
+ if page.show_in_call_tree:
+ f.write(page.line_text + "\n")
+ f.write("\n")
+ f.write(_html_closing())
+
+ def enter_function(self) -> Optional["Page"]:
+ """
+ Adds a new page corresponding to the current function call.
+ """
+ if self.level > self.levels["INFO"]:
+ return None
+
+ page = None
+ frame_type = inspect.currentframe()
+ if frame_type is not None:
+ frame = frame_type.f_back # Get the calling frame
+ if frame is not None:
+ # Check if it's a method by looking for 'self' or 'cls' in f_locals
+ if "self" in frame.f_locals:
+ class_name = type(frame.f_locals["self"]).__name__
+ elif "cls" in frame.f_locals:
+ class_name = frame.f_locals["cls"].__name__
+ else:
+ class_name = None # Not part of a class
+
+ if class_name is None: # Not part of a class
+ caller_name = frame.f_code.co_name
+ else:
+ caller_name = class_name + "." + frame.f_code.co_name
+
+ # Create a new page for this function.
+ page = self._add_page(summary=caller_name, show_in_call_tree=True, finished=False)
+ self.page_stack.push(page)
+ self.page_stack.write_stack_to_page(page)
+
+ page.add_lines("\nENTER {}".format(caller_name), flush=True)
+ return page
+
+ def leave_function(self) -> None:
+ """
+ Finishes the page corresponding to the current function call.
+ """
+ if self.level > self.levels["INFO"]:
+ return None
+ page = self.page_stack.top()
+ if page is not None:
+ page.finished = True
+ page.add_lines("\nLEAVE {}".format(page.summary), flush=True)
+ self.page_stack.pop()
+
+
+class Page:
+ """
+ Represents a single HTML page in the logger output.
+
+ Args:
+ page_logger: The PageLogger object that created this page.
+ index: The index of the page.
+ summary: A brief summary of the page's contents for display.
+ indent_level: The level of indentation in the call tree.
+ show_in_call_tree: Whether to display the page in the call tree.
+ finished: Whether the page is complete.
+ """
+
+ def __init__(
+ self,
+ page_logger: PageLogger,
+ index: int,
+ summary: str,
+ indent_level: int,
+ show_in_call_tree: bool = True,
+ finished: bool = True,
+ ):
+ """
+ Initializes and writes to a new HTML page.
+ """
+ self.page_logger = page_logger
+ self.index_str = str(index)
+ self.summary = summary
+ self.indent_level = indent_level
+ self.show_in_call_tree = show_in_call_tree
+ self.finished = finished
+ self.file_title = self.index_str + " " + self.summary
+ self.indentation_text = "| " * self.indent_level
+ self.full_link = f'{self.file_title}'
+ self.line_text = self.indentation_text + self.full_link
+ self.lines: List[str] = []
+ self.flush()
+
+ def add_lines(self, lines: str, flush: bool = False) -> None:
+ """
+ Adds one or more lines to the page.
+ """
+ lines_to_add: List[str] = []
+ if "\n" in lines:
+ lines_to_add = lines.split("\n")
+ else:
+ lines_to_add.append(lines)
+ self.lines.extend(lines_to_add)
+ if flush:
+ self.flush()
+
+ def flush(self) -> None:
+ """
+ Writes the HTML page to disk.
+ """
+ page_path = os.path.join(self.page_logger.log_dir, self.index_str + ".html")
+ with open(page_path, "w") as f:
+ f.write(_html_opening(self.file_title, finished=self.finished))
+ f.write(f"{self.file_title}
\n")
+ for line in self.lines:
+ try:
+ f.write(f"{line}\n")
+ except UnicodeEncodeError:
+ f.write("UnicodeEncodeError in this line.\n")
+ f.write(_html_closing())
+ f.flush()
+
+
+class PageStack:
+ """
+ A call stack containing a list of currently active function pages in the order they called each other.
+ """
+
+ def __init__(self) -> None:
+ self.stack: List[Page] = []
+
+ def push(self, page: Page) -> None:
+ """Adds a page to the top of the stack."""
+ self.stack.append(page)
+
+ def pop(self) -> Page:
+ """Removes and returns the top page from the stack"""
+ return self.stack.pop()
+
+ def size(self) -> int:
+ """Returns the number of pages in the stack."""
+ return len(self.stack)
+
+ def top(self) -> Page | None:
+ """Returns the top page from the stack without removing it"""
+ if self.size() == 0:
+ return None
+ return self.stack[-1]
+
+ def write_stack_to_page(self, page: Page) -> None:
+ # Logs a properly indented string displaying the current call stack.
+ page.add_lines("\nCALL STACK")
+ for stack_page in self.stack:
+ page.add_lines(stack_page.line_text)
+ page.add_lines("")
+ page.add_lines("")
+ page.flush()
diff --git a/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/teachability.py b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/teachability.py
new file mode 100644
index 000000000000..41dd46912b3b
--- /dev/null
+++ b/python/packages/autogen-ext/src/autogen_ext/task_centric_memory/utils/teachability.py
@@ -0,0 +1,126 @@
+from typing import TYPE_CHECKING, Any
+
+from autogen_core import CancellationToken, Image
+from autogen_core.memory import Memory, MemoryContent, MemoryMimeType, MemoryQueryResult, UpdateContextResult
+from autogen_core.model_context import ChatCompletionContext
+from autogen_core.models import UserMessage
+
+if TYPE_CHECKING:
+ from autogen_ext.task_centric_memory import TaskCentricMemoryController
+
+
+class Teachability(Memory):
+ """
+ Gives an AssistantAgent the ability to learn quickly from user teachings, hints, and advice.
+ """
+
+ def __init__(self, memory_controller: "TaskCentricMemoryController", name: str | None = None) -> None:
+ """Initialize Teachability."""
+ self._memory_controller = memory_controller
+ self._logger = memory_controller.logger
+ self._name = name or "teachability"
+
+ @property
+ def name(self) -> str:
+ """Get the memory instance identifier."""
+ return self._name
+
+ def _extract_text(self, content_item: str | MemoryContent) -> str:
+ """Extract searchable text from content."""
+ if isinstance(content_item, str):
+ return content_item
+
+ content = content_item.content
+ mime_type = content_item.mime_type
+
+ if mime_type in [MemoryMimeType.TEXT, MemoryMimeType.MARKDOWN]:
+ return str(content)
+ elif mime_type == MemoryMimeType.JSON:
+ if isinstance(content, dict):
+ # Store original JSON string representation
+ return str(content).lower()
+ raise ValueError("JSON content must be a dict")
+ elif isinstance(content, Image):
+ raise ValueError("Image content cannot be converted to text")
+ else:
+ raise ValueError(f"Unsupported content type: {mime_type}")
+
+ async def update_context(
+ self,
+ model_context: ChatCompletionContext,
+ ) -> UpdateContextResult:
+ """
+ Extracts any advice from the last user turn to be stored in memory,
+ and adds any relevant memories to the model context.
+ """
+ self._logger.enter_function()
+
+ # Extract text from the user's last message
+ messages = await model_context.get_messages()
+ if not messages:
+ self._logger.leave_function()
+ return UpdateContextResult(memories=MemoryQueryResult(results=[]))
+ last_message = messages[-1]
+ last_user_text = last_message.content if isinstance(last_message.content, str) else str(last_message)
+
+ # Add any relevant memories to the chat history
+ query_results = await self.query(last_user_text)
+ if query_results.results:
+ memory_strings = [f"{i}. {str(memory.content)}" for i, memory in enumerate(query_results.results, 1)]
+ memory_context = "\nPotentially relevant memories:\n" + "\n".join(memory_strings)
+ await model_context.add_message(UserMessage(content=memory_context, source="user"))
+
+ # Add any user advice to memory
+ await self._memory_controller.consider_memo_storage(last_user_text)
+
+ self._logger.leave_function()
+ return UpdateContextResult(memories=query_results)
+
+ async def add(self, content: MemoryContent, cancellation_token: CancellationToken | None = None) -> None:
+ """
+ Tries to extract any advice from the passed content and add it to memory.
+ """
+ self._logger.enter_function()
+
+ # Extract text from the incoming content
+ text = self._extract_text(content)
+
+ # Check for advice to add to memory for later turns.
+ await self._memory_controller.consider_memo_storage(text)
+
+ self._logger.leave_function()
+
+ async def query(
+ self,
+ query: str | MemoryContent,
+ cancellation_token: CancellationToken | None = None,
+ **kwargs: Any,
+ ) -> MemoryQueryResult:
+ """
+ Returns any memories that seem relevant to the query.
+ """
+ self._logger.enter_function()
+
+ task = self._extract_text(query)
+ memory_results: list[MemoryContent] = []
+ filtered_memos = await self._memory_controller.retrieve_relevant_memos(task=task)
+ filtered_insights = [memo.insight for memo in filtered_memos]
+ for insight in filtered_insights:
+ self._logger.info(f"Insight: {insight}")
+ memory_content = MemoryContent(
+ content=insight,
+ mime_type="MemoryMimeType.TEXT",
+ metadata={},
+ )
+ memory_results.append(memory_content)
+
+ self._logger.leave_function()
+ return MemoryQueryResult(results=memory_results)
+
+ async def clear(self) -> None:
+ """Clear all entries from memory."""
+ self._memory_controller.reset_memory()
+
+ async def close(self) -> None:
+ """Clean up memory resources."""
+ pass # No cleanup needed for this memory implementation
diff --git a/python/samples/task_centric_memory/README.md b/python/samples/task_centric_memory/README.md
new file mode 100644
index 000000000000..f3b8add8e86a
--- /dev/null
+++ b/python/samples/task_centric_memory/README.md
@@ -0,0 +1,136 @@
+# Task-Centric Memory Code Samples
+_(EXPERIMENTAL, RESEARCH IN PROGRESS)_
+
+
+
+
+
+This directory contains code samples that illustrate the following forms of fast, memory-based learning:
+* Direct memory storage and retrieval
+* Learning from user advice and corrections
+* Learning from user demonstrations
+* Learning from the agent's own experience
+
+Each sample connects task-centric memory to a selectable agent with no changes to that agent's code.
+See the block diagram to the right for an overview of the components and their interactions.
+
+Each sample is contained in a separate python script, using data and configs stored in yaml files for easy modification.
+Note that since agent behavior is non-deterministic, results will vary between runs.
+
+To watch operations live in a browser and see how task-centric memory works,
+open the HTML page at the location specified at the top of the config file,
+such as: `~/pagelogs/teachability/0 Call Tree.html`
+
+The config files specify an _AssistantAgent_ by default, which uses a fixed, multi-step system prompt.
+To use _MagenticOneGroupChat_ instead, specify that in the yaml file where indicated.
+
+
+## Installation
+
+Install AutoGen and its extension package as follows:
+
+```bash
+pip install -U "autogen-agentchat" "autogen-ext[openai]" "autogen-ext[task-centric-memory]"
+```
+
+Assign your OpenAI key to the environment variable OPENAI_API_KEY,
+or else modify `utils/client.py` as appropriate for the model you choose.
+
+
+## Running the Samples
+
+The following samples are listed in order of increasing complexity.
+Execute the corresponding commands from this (autogen_ext/task_centric_memory) directory.
+
+
+### Making AssistantAgent teachable
+
+This short, interactive code sample shows how to make the AssistantAgent teachable.
+The following steps show the agent learning a user teaching from one chat session to the next,
+starting with an empty memory bank.
+
+```bash
+rm -r memory_bank
+python chat_with_teachable_agent.py
+Now chatting with a teachable agent. Please enter your first message. Type 'exit' or 'quit' to quit.
+
+You: How many items should be put in research summaries?
+---------- user ----------
+How many items should be put in research summaries?
+---------- teachable_agent ----------
+
+
+You: Whenever asked to prepare a research summary, try to cover just the 5 top items.
+---------- user ----------
+Whenever asked to prepare a research summary, try to cover just the 5 top items.
+---------- teachable_agent ----------
+
+
+You: quit
+
+python chat_with_teachable_agent.py`
+Now chatting with a teachable agent. Please enter your first message. Type 'exit' or 'quit' to quit.
+
+You: How many items should be put in research summaries?
+---------- user ----------
+How many items should be put in research summaries?
+---------- teachable_agent ----------
+[MemoryContent(content='Whenever asked to prepare a research summary, try to cover just the 5 top items.', mime_type='MemoryMimeType.TEXT', metadata={})]
+---------- teachable_agent ----------
+
+```
+
+
+### Direct Memory Storage and Retrieval
+
+This sample shows how an app can access the `TaskCentricMemoryController` directly
+to retrieve previously stored task-insight pairs as potentially useful examplars when solving some new task.
+A task is any text instruction that the app may give to an agent.
+An insight is any text (like a hint, advice, a demonstration or plan) that might help the agent perform such tasks.
+
+A typical app will perform the following steps in some interleaved order:
+1. Call the `TaskCentricMemoryController` repeatedly to store a set of memories (task-insight pairs).
+2. Call the `TaskCentricMemoryController` repeatedly to retrieve any memories related to a new task.
+3. Use the retrieved insights, typically by adding them to the agent's context window. (This step is not illustrated by this code sample.)
+
+This sample code adds several task-insight pairs to memory, retrieves memories for a set of new tasks,
+logs the full retrieval results, and reports the retrieval precision and recall.
+
+`python eval_retrieval.py configs/retrieval.yaml`
+
+Precision and recall for this sample are usually near 100%.
+
+
+### Agent Learning from User Advice and Corrections
+
+This sample first tests the agent (once) for knowledge it currently lacks.
+Then the agent is given advice to help it solve the task, and the context window is cleared.
+Finally the agent is once tested again to see if it can retrieve and use the advice successfully.
+
+`python eval_teachability.py configs/teachability.yaml`
+
+With the benefit of memory, the agent usually succeeds on this sample.
+
+
+### Agent Learning from User Demonstrations
+
+This sample asks the agent to perform a reasoning task (ten times) on which it usually fails.
+The agent is then given one demonstration of how to solve a similar but different task, and the context window is cleared.
+Finally the agent is tested 10 more times to see if it can retrieve and apply the demonstration to the original task.
+
+`python eval_learning_from_demonstration.py configs/demonstration.yaml`
+
+The agent's success rate tends to be measurably higher after the demonstration has been stored in memory.
+
+
+### Agent Learning from Its Own Experience
+
+This sample asks the agent to perform a reasoning task on which it usually fails.
+Then using automatic success or failure feedback (for a verifiable task with no side-effects on the environment),
+the agent iterates through a background learning loop to find a solution, which it then stores as an insight in memory.
+Finally the agent is tested again to see if it can retrieve and apply its insight to the original task,
+as well as to a similar but different task as a test of generalization.
+
+`python eval_self_teaching.py configs/self_teaching.yaml`
+
+Using memory, the agent usually completes both tasks successfully in the second set of trials.
diff --git a/python/samples/task_centric_memory/chat_with_teachable_agent.py b/python/samples/task_centric_memory/chat_with_teachable_agent.py
new file mode 100644
index 000000000000..b8c32eed4053
--- /dev/null
+++ b/python/samples/task_centric_memory/chat_with_teachable_agent.py
@@ -0,0 +1,36 @@
+from autogen_agentchat.agents import AssistantAgent
+from autogen_agentchat.ui import Console
+from autogen_ext.models.openai import OpenAIChatCompletionClient
+from autogen_ext.task_centric_memory import TaskCentricMemoryController
+from autogen_ext.task_centric_memory.utils.teachability import Teachability
+
+
+async def main():
+ # Create a client
+ client = OpenAIChatCompletionClient(model="gpt-4o-2024-08-06", )
+
+ # Create an instance of Task-Centric Memory, passing minimal parameters for this simple example
+ memory_controller = TaskCentricMemoryController(reset=False, client=client)
+
+ # Wrap the memory controller in a Teachability instance
+ teachability = Teachability(memory_controller=memory_controller)
+
+ # Create an AssistantAgent, and attach teachability as its memory
+ assistant_agent = AssistantAgent(
+ name="teachable_agent",
+ system_message = "You are a helpful AI assistant, with the special ability to remember user teachings from prior conversations.",
+ model_client=client,
+ memory=[teachability],
+ )
+
+ # Enter a loop to chat with the teachable agent
+ print("Now chatting with a teachable agent. Please enter your first message. Type 'exit' or 'quit' to quit.")
+ while True:
+ user_input = input("\nYou: ")
+ if user_input.lower() in ["exit", "quit"]:
+ break
+ await Console(assistant_agent.run_stream(task=user_input))
+
+if __name__ == "__main__":
+ import asyncio
+ asyncio.run(main())
diff --git a/python/samples/task_centric_memory/configs/demonstration.yaml b/python/samples/task_centric_memory/configs/demonstration.yaml
new file mode 100644
index 000000000000..302686093be2
--- /dev/null
+++ b/python/samples/task_centric_memory/configs/demonstration.yaml
@@ -0,0 +1,31 @@
+
+PageLogger:
+ level: DEBUG # DEBUG, INFO, WARNING, ERROR, CRITICAL, or NONE.
+ path: ~/pagelogs/demonstration
+
+client:
+ model: gpt-4o-2024-08-06
+ temperature: 0.8
+ max_completion_tokens: 4096
+ presence_penalty: 0.0
+ frequency_penalty: 0.0
+ top_p: 1.0
+ max_retries: 65535
+
+Apprentice:
+ name_of_agent_or_team: AssistantAgent # AssistantAgent or MagenticOneGroupChat
+ disable_prefix_caching: 1 # If true, prepends a small random string to the context, to decorrelate repeated runs.
+ TaskCentricMemoryController:
+ max_train_trials: 10
+ max_test_trials: 3
+ TaskCentricMemoryBank:
+ path: ~/task_centric_memory_bank/temp
+ relevance_conversion_threshold: 1.7
+ n_results: 25
+ distance_threshold: 100
+
+test:
+ main_task_file: data_files/tasks/cell_towers_1.yaml # The task being tested.
+ demo_task_file: data_files/tasks/cell_towers_2.yaml # A similar but different task.
+ demo_solution_file: data_files/insights/cell_towers_2_demo.yaml # A demonstration of solving the second task.
+ num_trials: 10
diff --git a/python/samples/task_centric_memory/configs/retrieval.yaml b/python/samples/task_centric_memory/configs/retrieval.yaml
new file mode 100644
index 000000000000..c94625b4b359
--- /dev/null
+++ b/python/samples/task_centric_memory/configs/retrieval.yaml
@@ -0,0 +1,38 @@
+
+PageLogger:
+ level: DEBUG # DEBUG, INFO, WARNING, ERROR, CRITICAL, or NONE.
+ path: ~/pagelogs/retrieval
+
+client:
+ model: gpt-4o-2024-08-06
+ temperature: 0.8
+ max_completion_tokens: 4096
+ presence_penalty: 0.0
+ frequency_penalty: 0.0
+ top_p: 1.0
+ max_retries: 65535
+
+TaskCentricMemoryController:
+ TaskCentricMemoryBank:
+ path: ~/task_centric_memory_bank/retrieval
+ relevance_conversion_threshold: 1.7
+ n_results: 25
+ distance_threshold: 100
+
+test:
+ tasks:
+ - data_files/tasks/10_liars.yaml
+ - data_files/tasks/100_vampires.yaml
+ - data_files/tasks/autogen_package.yaml
+ - data_files/tasks/cell_towers_1.yaml
+ - data_files/tasks/cell_towers_2.yaml
+ insights:
+ - data_files/insights/add_topic.yaml
+ - data_files/insights/cell_towers_2_demo.yaml
+ - data_files/insights/liar_advice.yaml
+ task_insight_relevance: # Rows and columns represent (respectively) the tasks and insights listed above.
+ - [0, 0, 2] # 2 denotes a mutually relevant task-insight pair, stored in memory.
+ - [0, 0, 1] # 1 denotes a mutually relevant task-insight pair, not stored in memory.
+ - [2, 0, 0] # 0 denotes a mutually irrelevant task-insight pair.
+ - [0, 1, 0]
+ - [0, 2, 0]
diff --git a/python/samples/task_centric_memory/configs/self_teaching.yaml b/python/samples/task_centric_memory/configs/self_teaching.yaml
new file mode 100644
index 000000000000..7ca2b78e5d64
--- /dev/null
+++ b/python/samples/task_centric_memory/configs/self_teaching.yaml
@@ -0,0 +1,31 @@
+
+PageLogger:
+ level: DEBUG # DEBUG, INFO, WARNING, ERROR, CRITICAL, or NONE.
+ path: ~/pagelogs/self-teaching
+
+client:
+ model: gpt-4o-2024-08-06
+ temperature: 0.8
+ max_completion_tokens: 4096
+ presence_penalty: 0.0
+ frequency_penalty: 0.0
+ top_p: 1.0
+ max_retries: 65535
+
+Apprentice:
+ name_of_agent_or_team: AssistantAgent # AssistantAgent or MagenticOneGroupChat
+ disable_prefix_caching: 1 # If true, prepends a small random string to the context, to decorrelate repeated runs.
+ TaskCentricMemoryController:
+ max_train_trials: 10
+ max_test_trials: 3
+ TaskCentricMemoryBank:
+ path: ~/task_centric_memory_bank/temp
+ relevance_conversion_threshold: 1.7
+ n_results: 25
+ distance_threshold: 100
+
+test:
+ task_file_1: data_files/tasks/10_liars.yaml # Train and test on this task.
+ task_file_2: data_files/tasks/100_vampires.yaml # Test generalization on this different, similar task.
+ num_loops: 10
+ num_final_test_trials: 3
diff --git a/python/samples/task_centric_memory/configs/teachability.yaml b/python/samples/task_centric_memory/configs/teachability.yaml
new file mode 100644
index 000000000000..92f612a17765
--- /dev/null
+++ b/python/samples/task_centric_memory/configs/teachability.yaml
@@ -0,0 +1,29 @@
+
+PageLogger:
+ level: DEBUG # DEBUG, INFO, WARNING, ERROR, CRITICAL, or NONE.
+ path: ~/pagelogs/teachability
+
+client:
+ model: gpt-4o-2024-08-06
+ temperature: 0.8
+ max_completion_tokens: 4096
+ presence_penalty: 0.0
+ frequency_penalty: 0.0
+ top_p: 1.0
+ max_retries: 65535
+
+Apprentice:
+ name_of_agent_or_team: AssistantAgent # AssistantAgent or MagenticOneGroupChat
+ disable_prefix_caching: 1 # If true, prepends a small random string to the context, to decorrelate repeated runs.
+ TaskCentricMemoryController:
+ max_train_trials: 10
+ max_test_trials: 3
+ TaskCentricMemoryBank:
+ path: ~/task_centric_memory_bank/temp
+ relevance_conversion_threshold: 1.7
+ n_results: 25
+ distance_threshold: 100
+
+test:
+ task_file: data_files/tasks/autogen_package.yaml # The task being tested.
+ insight_file: data_files/insights/add_topic.yaml # Advice provided to help solve the task.
diff --git a/python/samples/task_centric_memory/data_files/insights/add_topic.yaml b/python/samples/task_centric_memory/data_files/insights/add_topic.yaml
new file mode 100644
index 000000000000..05e334ccbf7e
--- /dev/null
+++ b/python/samples/task_centric_memory/data_files/insights/add_topic.yaml
@@ -0,0 +1,6 @@
+# Advice created manually for solving the autogen_package task.
+
+insight: 'When somebody builds something on autogen and wants to contribute it to autogen,
+ instead of creating a new autogen package, it''s better for them to implement it in a separate github repo,
+ then just add the autogen-extension topic to that repo.
+ That way their contribution will be automatically discoverable through autogen''s page on community extensions: https://microsoft.github.io/autogen/dev/user-guide/extensions-user-guide/index.html'
diff --git a/python/samples/task_centric_memory/data_files/insights/cell_towers_2_demo.yaml b/python/samples/task_centric_memory/data_files/insights/cell_towers_2_demo.yaml
new file mode 100644
index 000000000000..ca00473067ef
--- /dev/null
+++ b/python/samples/task_centric_memory/data_files/insights/cell_towers_2_demo.yaml
@@ -0,0 +1,11 @@
+# Demo created manually for solving the cell_towers_2 task.
+
+insight: 'Sort the houses by location: 3, 6, 10, 11, 12, 17, 19, 20.
+ Then start at one end and place the towers only where absolutely needed.
+ The house at 3 could be served by a tower as far away as mile marker 7, because 3 + 4 = 7, so place a tower at 7.
+ This obviously covers houses up to mile 7.
+ But a coverage radius of 4 miles (in each direction) means a total coverage of 8 miles.
+ So the tower at mile 7 would reach all the way to mile 11, covering the houses at 10 and 11.
+ The next uncovered house would be at mile 12 (not 10), requiring a second tower.
+ It could go at mile 16 (which is 12 + 4) and this tower would reach up to mile 20 (16 + 4),
+ covering the remaining houses. So 2 towers would be enough.'
diff --git a/python/samples/task_centric_memory/data_files/insights/liar_advice.yaml b/python/samples/task_centric_memory/data_files/insights/liar_advice.yaml
new file mode 100644
index 000000000000..502379d37896
--- /dev/null
+++ b/python/samples/task_centric_memory/data_files/insights/liar_advice.yaml
@@ -0,0 +1,6 @@
+# Advice created automatically for solving the 10_liars task.
+
+insight: 'When solving logic puzzles, carefully consider all possible scenarios,
+ including the simplest ones, and remember that if everyone is lying,
+ their statements should naturally align with the known conditions without needing a truth-teller.
+ Always double-check that your conclusions don''t inadvertently introduce contradictions.'
diff --git a/python/samples/task_centric_memory/data_files/tasks/100_vampires.yaml b/python/samples/task_centric_memory/data_files/tasks/100_vampires.yaml
new file mode 100644
index 000000000000..2e2341d91fd1
--- /dev/null
+++ b/python/samples/task_centric_memory/data_files/tasks/100_vampires.yaml
@@ -0,0 +1,22 @@
+# From GAIA L1
+
+task_description: "You are Van Helsing, a renowned vampire hunter. A Count of Moldova, La\u021B\
+ cu IV, son of Costea, has tasked you with investigating the village of \u0218\
+ irnea in neighboring Wallachia. The Count's advisors have reported that a vampire\
+ \ was spotted crossing the border near the village, and would like you to investigate\
+ \ it.\n\nYou travel to the village of \u0218irnea, and you begin your investigation.\
+ \ One night, just before dawn, you catch a glimpse of a man in a long black\
+ \ cape with red lining leaping from roof-top to roof-top with superhuman agility.\
+ \ It's a vampire! You try to chase the creature back to its home, but the creature\
+ \ is too fast. However, because of the remoteness of the village, you know with\
+ \ absolute certainty that the vampire must be a resident of the village. You\
+ \ decide that your best course of action will be to visit all 100 residents\
+ \ of the town during the day. You know something about vampires and humans that\
+ \ will make your investigation possible; humans always tell the truth, but vampires\
+ \ always lie.\n\nIn the afternoon, you go from house to house, speaking with\
+ \ all 100 residents of \u0218irnea. You ask everyone the same question: \"How\
+ \ many vampires are living in \u0218irnea\". Everyone in the village gives the\
+ \ same response, \"At least one of us is a human.\"\n\nHow many residents of\
+ \ \u0218irnea have been turned into vampires?"
+
+expected_answer: '100'
diff --git a/python/samples/task_centric_memory/data_files/tasks/10_liars.yaml b/python/samples/task_centric_memory/data_files/tasks/10_liars.yaml
new file mode 100644
index 000000000000..096e12775935
--- /dev/null
+++ b/python/samples/task_centric_memory/data_files/tasks/10_liars.yaml
@@ -0,0 +1,8 @@
+# Similar to the 100 vampires task, for testing generalization from one to the other.
+
+task_description: 'You ask ten people ''How many of you are liars?''
+ They all answer ''At least one of us is not a liar.''
+ You happen to know that at least one of them IS a liar.
+ How many of them are liars in total?'
+
+expected_answer: All of them are liars.
diff --git a/python/samples/task_centric_memory/data_files/tasks/autogen_package.yaml b/python/samples/task_centric_memory/data_files/tasks/autogen_package.yaml
new file mode 100644
index 000000000000..f80840b30073
--- /dev/null
+++ b/python/samples/task_centric_memory/data_files/tasks/autogen_package.yaml
@@ -0,0 +1,5 @@
+# Test where human advice is needed.
+
+task_description: As a contribution to autogen, can I create a new autogen package for a copilot extension agent that I built on autogen?
+
+expected_answer: It's best to have your agent in its own repo, then add the autogen-extension topic to that repo.
diff --git a/python/samples/task_centric_memory/data_files/tasks/cell_towers_1.yaml b/python/samples/task_centric_memory/data_files/tasks/cell_towers_1.yaml
new file mode 100644
index 000000000000..f86e370db3ee
--- /dev/null
+++ b/python/samples/task_centric_memory/data_files/tasks/cell_towers_1.yaml
@@ -0,0 +1,9 @@
+# File-free version of a GAIA L1 task.
+
+task_description: You are a telecommunications engineer who wants to build cell phone towers on a stretch of road.
+ Houses are located at mile markers 16, 17, 19, 11, 9, 10, 2, 5, 4.
+ Each cell phone tower can cover houses located next to the road within a 4-mile radius.
+ Find the minimum number of cell phone towers needed to cover all houses next to the road.
+ Your answer should be a positive numerical integer value.
+
+expected_answer: '2'
diff --git a/python/samples/task_centric_memory/data_files/tasks/cell_towers_2.yaml b/python/samples/task_centric_memory/data_files/tasks/cell_towers_2.yaml
new file mode 100644
index 000000000000..5ddc046920c9
--- /dev/null
+++ b/python/samples/task_centric_memory/data_files/tasks/cell_towers_2.yaml
@@ -0,0 +1,9 @@
+# Similar to the cell_towers_1 task.
+
+task_description: You are a telecommunications engineer who wants to build cell phone towers on a stretch of road.
+ Houses are located at mile markers 17, 20, 19, 10, 11, 12, 3, 6.
+ Each cell phone tower can cover houses located next to the road within a 4-mile radius.
+ Find the minimum number of cell phone towers needed to cover all houses next to the road.
+ Your answer should be a positive numerical integer value.
+
+expected_answer: '2'
diff --git a/python/samples/task_centric_memory/eval_learning_from_demonstration.py b/python/samples/task_centric_memory/eval_learning_from_demonstration.py
new file mode 100644
index 000000000000..3201964aa99e
--- /dev/null
+++ b/python/samples/task_centric_memory/eval_learning_from_demonstration.py
@@ -0,0 +1,110 @@
+import asyncio
+import sys
+from typing import Any, Dict
+
+from autogen_core.models import (
+ ChatCompletionClient,
+)
+from autogen_ext.task_centric_memory.utils import Apprentice, Grader, PageLogger
+from utils import create_oai_client, load_yaml_file
+
+
+"""
+This code sample connects task-centric memory to a selectable agent with no changes to that agent's code.
+See the block diagram in the README for an overview of the components and their interactions.
+See the config file configs/demonstration.yaml for an overall view of the structure and settings in this sample.
+
+Execute the sample with this command:
+ python eval_learning_from_demonstration.py configs/demonstration.yaml
+
+Here, to learn from a demonstration means to remember a previously demonstrated solution for the same or a similar task.
+
+1. The function below asks the agent to perform a reasoning task (ten times) on which it usually fails.
+2. Then agent is then given one demonstration of how to solve a similar but different task, and the context window is cleared.
+3. Finally the agent is tested 10 more times to see if it can retrieve and apply the demonstration to the original task.
+
+If adapting this sample code to a new setting, the Apprentice class can be used or completely replaced by other code.
+"""
+
+
+async def eval_learning_from_demonstration(
+ apprentice: Apprentice, client: ChatCompletionClient, logger: PageLogger, config: Dict[str, Any]
+) -> str:
+ """
+ Evaluates the ability to learn quickly from demonstrations.
+ """
+ logger.enter_function()
+
+ num_trials = config["num_trials"]
+ grader = Grader(client, logger)
+
+ # Load the specified data.
+ main_task = load_yaml_file(config["main_task_file"])
+ task_description = main_task["task_description"]
+ expected_answer = main_task["expected_answer"]
+ demo_task = load_yaml_file(config["demo_task_file"])["task_description"]
+ demo_solution = load_yaml_file(config["demo_solution_file"])["insight"]
+
+ # Start by clearing memory then running a baseline test.
+ logger.info("To get a baseline, clear memory, then assign the task.")
+ apprentice.reset_memory()
+ num_successes, num_trials = await grader.test_apprentice(
+ apprentice=apprentice,
+ task_description=task_description,
+ expected_answer=expected_answer,
+ num_trials=num_trials,
+ use_memory=True,
+ client=client,
+ )
+ success_rate = round((num_successes / num_trials) * 100)
+ results_str_1 = "Success rate before demonstration: {}%".format(success_rate)
+ logger.info("\n" + results_str_1)
+
+ # Provide a demonstration for a similar but different task.
+ logger.info("Demonstrate a solution to a similar task.")
+ await apprentice.add_task_solution_pair_to_memory(demo_task, demo_solution)
+
+ # Now test again to see if the demonstration (retrieved from memory) helps.
+ logger.info("Assign the task again to see if the demonstration helps.")
+ num_successes, num_trials = await grader.test_apprentice(
+ apprentice=apprentice,
+ task_description=task_description,
+ expected_answer=expected_answer,
+ num_trials=num_trials,
+ use_memory=True,
+ client=client,
+ )
+ success_rate = round((num_successes / num_trials) * 100)
+ results_str_2 = "Success rate after demonstration: {}%".format(success_rate)
+ logger.info("\n" + results_str_2)
+
+ logger.leave_function()
+ return "\neval_learning_from_demonstration\n" + results_str_1 + "\n" + results_str_2
+
+
+async def run_example(config_filepath: str) -> None:
+ """
+ Runs the code example with the necessary components.
+ """
+ config = load_yaml_file(config_filepath)
+
+ # Create the necessary components.
+ logger = PageLogger(config["PageLogger"])
+ client = create_oai_client(config["client"])
+ apprentice = Apprentice(client, config["Apprentice"], logger)
+
+ # Call the example function.
+ results = await eval_learning_from_demonstration(apprentice, client, logger, config["test"])
+
+ # Finish up.
+ print(results)
+
+
+if __name__ == "__main__":
+ args = sys.argv[1:]
+ if len(args) != 1:
+ # Print usage information.
+ print("Usage: amt.py ")
+ else:
+ # Run the code example.
+ asyncio.run(run_example(config_filepath=args[0]))
diff --git a/python/samples/task_centric_memory/eval_retrieval.py b/python/samples/task_centric_memory/eval_retrieval.py
new file mode 100644
index 000000000000..a16110c3b0ad
--- /dev/null
+++ b/python/samples/task_centric_memory/eval_retrieval.py
@@ -0,0 +1,118 @@
+import asyncio
+import sys
+from typing import Any, Dict, Set
+
+from autogen_core.models import (
+ ChatCompletionClient,
+)
+from autogen_ext.task_centric_memory import TaskCentricMemoryController
+from autogen_ext.task_centric_memory.utils import PageLogger
+from utils import create_oai_client, load_yaml_file
+
+
+"""
+This code sample evaluates memory precision and recall, with no agent involved at all.
+See the config file configs/retrieval.yaml for an overall view of the structure and settings in this sample,
+as well as the data files used for the test.
+
+Execute the sample with this command:
+ python eval_retrieval.py configs/retrieval.yaml
+
+This sample shows how an app can access the `TaskCentricMemoryController` directly
+to retrieve previously stored task-insight pairs as potentially useful examplars when solving some new task.
+A task is any text instruction that the app may give to an agent.
+An insight is any text (like a hint, advice, a demonstration or plan) that might help the agent perform such tasks.
+"""
+
+
+async def eval_retrieval(
+ memory_controller: TaskCentricMemoryController, client: ChatCompletionClient, logger: PageLogger, config: Dict[str, Any]
+) -> str:
+ """
+ Evaluates precision and recall of task-centric memory retrieval.
+ """
+ logger.enter_function()
+
+ # Load the specified data.
+ task_files = config["tasks"]
+ task_list = [load_yaml_file(task)["task_description"] for task in task_files]
+
+ insight_files = config["insights"]
+ insight_list = [load_yaml_file(insight)["insight"] for insight in insight_files]
+
+ task_insight_relevance = config["task_insight_relevance"]
+
+ # Clear memory, then store the specified task-insight pairs.
+ memory_controller.reset_memory()
+ for ti, task in enumerate(task_list):
+ for ii, insight in enumerate(insight_list):
+ if task_insight_relevance[ti][ii] == 2:
+ await memory_controller.add_memo(task=task, insight=insight)
+
+ # Test memory retrieval.
+ num_retrieved = 0
+ num_relevant = 0
+ num_relevant_and_retrieved = 0
+ for ti, task in enumerate(task_list):
+ # Retrieve insights for this task.
+ memos = await memory_controller.retrieve_relevant_memos(task=task)
+ set_of_retrieved_insights = set(memo.insight for memo in memos)
+
+ # Gather the insights that are relevant to this task according to ground truth.
+ set_of_relevant_insights: Set[str] = set()
+ for ii, insight in enumerate(insight_list):
+ if task_insight_relevance[ti][ii] > 0:
+ set_of_relevant_insights.add(insight)
+
+ # Accumulate the counts.
+ num_retrieved += len(set_of_retrieved_insights)
+ num_relevant += len(set_of_relevant_insights)
+ num_relevant_and_retrieved += len(set_of_relevant_insights & set_of_retrieved_insights)
+ logger.info("\nNum retrieved: {}".format(num_retrieved))
+ logger.info("\nNum relevant: {}".format(num_relevant))
+ logger.info("\nNum relevant and retrieved: {}".format(num_relevant_and_retrieved))
+
+ # Compute precision and recall as percentages.
+ precision = num_relevant_and_retrieved / num_retrieved if num_retrieved > 0 else 0
+ recall = num_relevant_and_retrieved / num_relevant if num_relevant > 0 else 0
+ precision_str = "Precision: {:.3f}%".format(precision * 100)
+ recall_str = "Recall: {:.3f}%".format(recall * 100)
+ logger.info("\n" + precision_str)
+ logger.info("\n" + recall_str)
+
+ logger.leave_function()
+ return "\neval_retrieval\n" + precision_str + "\n" + recall_str
+
+
+async def run_example(config_filepath: str) -> None:
+ """
+ Runs the code example with the necessary components.
+ """
+ config = load_yaml_file(config_filepath)
+
+ # Create the necessary components.
+ logger = PageLogger(config["PageLogger"])
+ client = create_oai_client(config["client"])
+ memory_controller = TaskCentricMemoryController(
+ reset=True,
+ client=client,
+ task_assignment_callback=None,
+ config=config["TaskCentricMemoryController"],
+ logger=logger,
+ )
+
+ # Call the example function.
+ results = await eval_retrieval(memory_controller, client, logger, config["test"])
+
+ # Finish up.
+ print(results)
+
+
+if __name__ == "__main__":
+ args = sys.argv[1:]
+ if len(args) != 1:
+ # Print usage information.
+ print("Usage: amt.py ")
+ else:
+ # Run the code example.
+ asyncio.run(run_example(config_filepath=args[0]))
diff --git a/python/samples/task_centric_memory/eval_self_teaching.py b/python/samples/task_centric_memory/eval_self_teaching.py
new file mode 100644
index 000000000000..9e783aa2dbf8
--- /dev/null
+++ b/python/samples/task_centric_memory/eval_self_teaching.py
@@ -0,0 +1,128 @@
+import asyncio
+import sys
+from typing import Any, Dict
+
+from autogen_core.models import (
+ ChatCompletionClient,
+)
+from autogen_ext.task_centric_memory.utils import Apprentice, Grader, PageLogger
+
+from utils import create_oai_client, load_yaml_file
+
+
+"""
+This code sample connects task-centric memory to a selectable agent with no changes to that agent's code.
+See the block diagram in the README for an overview of the components and their interactions.
+See the config file configs/self_teaching.yaml for an overall view of the structure and settings in this sample.
+
+Execute the sample with this command:
+ python eval_self_teaching.py configs/self_teaching.yaml
+
+We say that an agent is self-teaching if it can learn quickly from its own trial and error with no user input.
+This sample asks the agent to perform a reasoning task on which it usually fails.
+Then using automatic success or failure feedback (for a verifiable task with no side-effects on the environment),
+the agent iterates through a background learning loop to find a solution, which it then stores as an insight in memory.
+Finally the agent is tested again to see if it can retrieve and apply its insight to the original task,
+as well as to a similar but different task as a test of generalization.
+
+If adapting this sample code to a new setting, the Apprentice class can be used or completely replaced by other code.
+"""
+
+
+async def eval_self_teaching(
+ apprentice: Apprentice, client: ChatCompletionClient, logger: PageLogger, config: Dict[str, Any]
+) -> str:
+ """
+ Evaluates the ability of an agent to learn quickly from its own trial and error.
+ """
+ logger.enter_function()
+
+ num_loops = config["num_loops"]
+ num_final_test_trials = config["num_final_test_trials"]
+ grader = Grader(client, logger)
+
+ # Load the specified data.
+ task_dict_1 = load_yaml_file(config["task_file_1"])
+ task_description_1 = task_dict_1["task_description"]
+ expected_answer_1 = task_dict_1["expected_answer"]
+
+ # Test generalization on this different, similar task.
+ task_dict_2 = load_yaml_file(config["task_file_2"])
+ task_description_2 = task_dict_2["task_description"]
+ expected_answer_2 = task_dict_2["expected_answer"]
+
+ # Start the test with empty memory.
+ apprentice.reset_memory()
+
+ total_num_successes_1 = 0
+ total_num_successes_2 = 0
+ total_num_trials = 0
+ for _ in range(num_loops):
+ # Train on the first task.
+ await apprentice.train_on_task(task=task_description_1, expected_answer=expected_answer_1)
+
+ # Test on the first task.
+ num_successes, num_trials = await grader.test_apprentice(
+ apprentice=apprentice,
+ task_description=task_description_1,
+ expected_answer=expected_answer_1,
+ num_trials=num_final_test_trials,
+ use_memory=True,
+ client=client,
+ )
+ logger.info("Task 1 success rate: {}%".format(round((num_successes / num_trials) * 100)))
+ total_num_successes_1 += num_successes
+
+ # Test on the second task.
+ num_successes, num_trials = await grader.test_apprentice(
+ apprentice=apprentice,
+ task_description=task_description_2,
+ expected_answer=expected_answer_2,
+ num_trials=num_final_test_trials,
+ use_memory=True,
+ client=client,
+ )
+ logger.info("Task 2 success rate: {}%".format(round((num_successes / num_trials) * 100)))
+ total_num_successes_2 += num_successes
+
+ total_num_trials += num_final_test_trials
+ logger.info("")
+
+ overall_success_rate_1 = round((total_num_successes_1 / total_num_trials) * 100)
+ overall_success_rate_2 = round((total_num_successes_2 / total_num_trials) * 100)
+
+ results_str_1 = "Overall task 1 success rate: {}%".format(overall_success_rate_1)
+ results_str_2 = "Overall task 2 success rate: {}%".format(overall_success_rate_2)
+ logger.info("\n" + results_str_1)
+ logger.info(results_str_2)
+
+ logger.leave_function()
+ return "\neval_self_teaching\n" + results_str_1 + "\n" + results_str_2
+
+
+async def run_example(config_filepath: str) -> None:
+ """
+ Runs the code example with the necessary components.
+ """
+ config = load_yaml_file(config_filepath)
+
+ # Create the necessary components.
+ logger = PageLogger(config["PageLogger"])
+ client = create_oai_client(config["client"])
+ apprentice = Apprentice(client, config["Apprentice"], logger)
+
+ # Call the example function.
+ results = await eval_self_teaching(apprentice, client, logger, config["test"])
+
+ # Finish up.
+ print(results)
+
+
+if __name__ == "__main__":
+ args = sys.argv[1:]
+ if len(args) != 1:
+ # Print usage information.
+ print("Usage: amt.py ")
+ else:
+ # Run the code example.
+ asyncio.run(run_example(config_filepath=args[0]))
diff --git a/python/samples/task_centric_memory/eval_teachability.py b/python/samples/task_centric_memory/eval_teachability.py
new file mode 100644
index 000000000000..76b794970ff9
--- /dev/null
+++ b/python/samples/task_centric_memory/eval_teachability.py
@@ -0,0 +1,112 @@
+import asyncio
+import sys
+from typing import Any, Dict
+
+from autogen_core.models import (
+ ChatCompletionClient,
+)
+from autogen_ext.task_centric_memory.utils import Apprentice, Grader, PageLogger
+
+from utils import create_oai_client, load_yaml_file
+
+
+"""
+This code sample connects task-centric memory to a selectable agent with no changes to that agent's code.
+See the block diagram in the README for an overview of the components and their interactions.
+See the config file configs/eval_teachability.yaml for an overall view of the structure and settings in this sample.
+
+Execute the sample with this command:
+ python eval_teachability.py configs/eval_teachability.yaml
+
+Teachable agents use memory to learn quickly from user teachings, hints, and advice.
+The function below passes user instructions (loaded from a file) to the agent by calling Apprentice.handle_user_message().
+If adapting this sample code to a new setting, the Apprentice class can be used or completely replaced by other code.
+
+1. In the first conversation, the agent is expected to fail because it lacks the necessary knowledge.
+2. In the second conversation (starting with an empty context window), the user provides the missing insight.
+3. In the third conversation, the agent is expected to succeed after retrieving the key insight from memory.
+"""
+
+
+async def eval_teachability(
+ apprentice: Apprentice, client: ChatCompletionClient, logger: PageLogger, config: Dict[str, Any]
+) -> str:
+ """
+ Evaluates the ability to learn quickly from user teachings, hints, and advice.
+ """
+ logger.enter_function()
+
+ # Load the specified data.
+ task_dict = load_yaml_file(config["task_file"])
+ task_description = task_dict["task_description"]
+ expected_answer = task_dict["expected_answer"]
+
+ insight_dict = load_yaml_file(config["insight_file"])
+ insight = insight_dict["insight"]
+
+ # First test without memory.
+ apprentice.reset_memory()
+ logger.info("\nClear memory, then ask the question.")
+ response = await apprentice.handle_user_message(task_description)
+
+ # Check the response.
+ grader = Grader(client, logger)
+ response_is_correct, extracted_answer = await grader.is_response_correct(
+ task_description, response, expected_answer
+ )
+ logger.info("Extracted answer: {}".format(extracted_answer))
+ if response_is_correct:
+ results_str_1 = "Answer before teaching is CORRECT."
+ else:
+ results_str_1 = "Answer before teaching is INCORRECT."
+ logger.info(results_str_1 + "\n")
+
+ # Give advice that should help solve this task.
+ logger.info("Give the advice.")
+ await apprentice.handle_user_message(insight)
+
+ # Now ask the question again to see if the advice helps.
+ logger.info("\nAsk the question again to see if the advice helps.")
+ response = await apprentice.handle_user_message(task_description)
+
+ # Check the response.
+ response_is_correct, extracted_answer = await grader.is_response_correct(
+ task_description, response, expected_answer
+ )
+ logger.info("Extracted answer: {}".format(extracted_answer))
+ if response_is_correct:
+ results_str_2 = "Answer after teaching is CORRECT."
+ else:
+ results_str_2 = "Answer after teaching is INCORRECT."
+ logger.info(results_str_2 + "\n")
+
+ logger.leave_function()
+ return "\neval_teachability\n" + results_str_1 + "\n" + results_str_2
+
+
+async def run_example(config_filepath: str) -> None:
+ """
+ Runs the code example with the necessary components.
+ """
+ config = load_yaml_file(config_filepath)
+
+ # Create the necessary components.
+ logger = PageLogger(config["PageLogger"])
+ client = create_oai_client(config["client"])
+ apprentice = Apprentice(client, config["Apprentice"], logger)
+
+ # Call the example function.
+ results = await eval_teachability(apprentice, client, logger, config["test"])
+
+ # Finish up.
+ print(results)
+
+
+if __name__ == "__main__":
+ args = sys.argv[1:]
+ if len(args) != 1:
+ # Print usage information.
+ print("Usage: amt.py ")
+ else:
+ # Run the code example.
+ asyncio.run(run_example(config_filepath=args[0]))
diff --git a/python/samples/task_centric_memory/utils.py b/python/samples/task_centric_memory/utils.py
new file mode 100644
index 000000000000..366b16c995e1
--- /dev/null
+++ b/python/samples/task_centric_memory/utils.py
@@ -0,0 +1,32 @@
+from typing import Any, Dict
+import yaml
+
+from autogen_core.models import (
+ ChatCompletionClient,
+)
+from autogen_ext.models.openai import OpenAIChatCompletionClient
+
+
+def create_oai_client(config: Dict[str, Any]) -> ChatCompletionClient:
+ """
+ Creates a chat completion client from OpenAI.
+ """
+ client = OpenAIChatCompletionClient(
+ model=config["model"],
+ max_tokens=config["max_completion_tokens"],
+ max_retries=config["max_retries"],
+ temperature=config["temperature"],
+ presence_penalty=config["presence_penalty"],
+ frequency_penalty=config["frequency_penalty"],
+ top_p=config["top_p"],
+ )
+ return client
+
+
+def load_yaml_file(file_path: str) -> Any:
+ """
+ Opens a file and returns its contents.
+ """
+ with open(file_path, "r") as file:
+ return yaml.load(file, Loader=yaml.FullLoader)
+
diff --git a/python/uv.lock b/python/uv.lock
index 78f5f8ae617e..0a057c703bca 100644
--- a/python/uv.lock
+++ b/python/uv.lock
@@ -355,6 +355,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/f8/ed/e97229a566617f2ae958a6b13e7cc0f585470eac730a73e9e82c32a3cdd2/arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80", size = 66419 },
]
+[[package]]
+name = "asgiref"
+version = "3.8.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/29/38/b3395cc9ad1b56d2ddac9970bc8f4141312dbaec28bc7c218b0dfafd0f42/asgiref-3.8.1.tar.gz", hash = "sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590", size = 35186 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/39/e3/893e8757be2612e6c266d9bb58ad2e3651524b5b40cf56761e985a28b13e/asgiref-3.8.1-py3-none-any.whl", hash = "sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47", size = 23828 },
+]
+
[[package]]
name = "asttokens"
version = "2.4.1"
@@ -667,6 +679,9 @@ semantic-kernel-onnx = [
semantic-kernel-pandas = [
{ name = "semantic-kernel", extra = ["pandas"] },
]
+task-centric-memory = [
+ { name = "chromadb" },
+]
video-surfer = [
{ name = "autogen-agentchat" },
{ name = "ffmpeg-python" },
@@ -700,6 +715,7 @@ requires-dist = [
{ name = "azure-ai-inference", marker = "extra == 'azure'", specifier = ">=1.0.0b7" },
{ name = "azure-core", marker = "extra == 'azure'" },
{ name = "azure-identity", marker = "extra == 'azure'" },
+ { name = "chromadb", marker = "extra == 'task-centric-memory'", specifier = ">=0.6.3" },
{ name = "diskcache", marker = "extra == 'diskcache'", specifier = ">=5.6.3" },
{ name = "docker", marker = "extra == 'docker'", specifier = "~=7.0" },
{ name = "ffmpeg-python", marker = "extra == 'video-surfer'" },
@@ -953,6 +969,47 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/ed/20/bc79bc575ba2e2a7f70e8a1155618bb1301eaa5132a8271373a6903f73f8/babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b", size = 9587599 },
]
+[[package]]
+name = "backoff"
+version = "2.2.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/47/d7/5bbeb12c44d7c4f2fb5b56abce497eb5ed9f34d85701de869acedd602619/backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba", size = 17001 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/df/73/b6e24bd22e6720ca8ee9a85a0c4a2971af8497d8f3193fa05390cbd46e09/backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8", size = 15148 },
+]
+
+[[package]]
+name = "bcrypt"
+version = "4.2.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/56/8c/dd696962612e4cd83c40a9e6b3db77bfe65a830f4b9af44098708584686c/bcrypt-4.2.1.tar.gz", hash = "sha256:6765386e3ab87f569b276988742039baab087b2cdb01e809d74e74503c2faafe", size = 24427 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/bc/ca/e17b08c523adb93d5f07a226b2bd45a7c6e96b359e31c1e99f9db58cb8c3/bcrypt-4.2.1-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:1340411a0894b7d3ef562fb233e4b6ed58add185228650942bdc885362f32c17", size = 489982 },
+ { url = "https://files.pythonhosted.org/packages/6a/be/e7c6e0fd6087ee8fc6d77d8d9e817e9339d879737509019b9a9012a1d96f/bcrypt-4.2.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1ee315739bc8387aa36ff127afc99120ee452924e0df517a8f3e4c0187a0f5f", size = 273108 },
+ { url = "https://files.pythonhosted.org/packages/d6/53/ac084b7d985aee1a5f2b086d501f550862596dbf73220663b8c17427e7f2/bcrypt-4.2.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dbd0747208912b1e4ce730c6725cb56c07ac734b3629b60d4398f082ea718ad", size = 278733 },
+ { url = "https://files.pythonhosted.org/packages/8e/ab/b8710a3d6231c587e575ead0b1c45bb99f5454f9f579c9d7312c17b069cc/bcrypt-4.2.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:aaa2e285be097050dba798d537b6efd9b698aa88eef52ec98d23dcd6d7cf6fea", size = 273856 },
+ { url = "https://files.pythonhosted.org/packages/9d/e5/2fd1ea6395358ffdfd4afe370d5b52f71408f618f781772a48971ef3b92b/bcrypt-4.2.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:76d3e352b32f4eeb34703370e370997065d28a561e4a18afe4fef07249cb4396", size = 279067 },
+ { url = "https://files.pythonhosted.org/packages/4e/ef/f2cb7a0f7e1ed800a604f8ab256fb0afcf03c1540ad94ff771ce31e794aa/bcrypt-4.2.1-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:b7703ede632dc945ed1172d6f24e9f30f27b1b1a067f32f68bf169c5f08d0425", size = 306851 },
+ { url = "https://files.pythonhosted.org/packages/de/cb/578b0023c6a5ca16a177b9044ba6bd6032277bd3ef020fb863eccd22e49b/bcrypt-4.2.1-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:89df2aea2c43be1e1fa066df5f86c8ce822ab70a30e4c210968669565c0f4685", size = 310793 },
+ { url = "https://files.pythonhosted.org/packages/98/bc/9d501ee9d754f63d4b1086b64756c284facc3696de9b556c146279a124a5/bcrypt-4.2.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:04e56e3fe8308a88b77e0afd20bec516f74aecf391cdd6e374f15cbed32783d6", size = 320957 },
+ { url = "https://files.pythonhosted.org/packages/a1/25/2ec4ce5740abc43182bfc064b9acbbf5a493991246985e8b2bfe231ead64/bcrypt-4.2.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:cfdf3d7530c790432046c40cda41dfee8c83e29482e6a604f8930b9930e94139", size = 339958 },
+ { url = "https://files.pythonhosted.org/packages/6d/64/fd67788f64817727897d31e9cdeeeba3941eaad8540733c05c7eac4aa998/bcrypt-4.2.1-cp37-abi3-win32.whl", hash = "sha256:adadd36274510a01f33e6dc08f5824b97c9580583bd4487c564fc4617b328005", size = 160912 },
+ { url = "https://files.pythonhosted.org/packages/00/8f/fe834eaa54abbd7cab8607e5020fa3a0557e929555b9e4ca404b4adaab06/bcrypt-4.2.1-cp37-abi3-win_amd64.whl", hash = "sha256:8c458cd103e6c5d1d85cf600e546a639f234964d0228909d8f8dbeebff82d526", size = 152981 },
+ { url = "https://files.pythonhosted.org/packages/4a/57/23b46933206daf5384b5397d9878746d2249fe9d45efaa8e1467c87d3048/bcrypt-4.2.1-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:8ad2f4528cbf0febe80e5a3a57d7a74e6635e41af1ea5675282a33d769fba413", size = 489842 },
+ { url = "https://files.pythonhosted.org/packages/fd/28/3ea8a39ddd4938b6c6b6136816d72ba5e659e2d82b53d843c8c53455ac4d/bcrypt-4.2.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:909faa1027900f2252a9ca5dfebd25fc0ef1417943824783d1c8418dd7d6df4a", size = 272500 },
+ { url = "https://files.pythonhosted.org/packages/77/7f/b43622999f5d4de06237a195ac5501ac83516adf571b907228cd14bac8fe/bcrypt-4.2.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cde78d385d5e93ece5479a0a87f73cd6fa26b171c786a884f955e165032b262c", size = 278368 },
+ { url = "https://files.pythonhosted.org/packages/50/68/f2e3959014b4d8874c747e6e171d46d3e63a3a39aaca8417a8d837eda0a8/bcrypt-4.2.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:533e7f3bcf2f07caee7ad98124fab7499cb3333ba2274f7a36cf1daee7409d99", size = 273335 },
+ { url = "https://files.pythonhosted.org/packages/d6/c3/4b4bad4da852924427c651589d464ad1aa624f94dd904ddda8493b0a35e5/bcrypt-4.2.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:687cf30e6681eeda39548a93ce9bfbb300e48b4d445a43db4298d2474d2a1e54", size = 278614 },
+ { url = "https://files.pythonhosted.org/packages/6e/5a/ee107961e84c41af2ac201d0460f962b6622ff391255ffd46429e9e09dc1/bcrypt-4.2.1-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:041fa0155c9004eb98a232d54da05c0b41d4b8e66b6fc3cb71b4b3f6144ba837", size = 306464 },
+ { url = "https://files.pythonhosted.org/packages/5c/72/916e14fa12d2b1d1fc6c26ea195337419da6dd23d0bf53ac61ef3739e5c5/bcrypt-4.2.1-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f85b1ffa09240c89aa2e1ae9f3b1c687104f7b2b9d2098da4e923f1b7082d331", size = 310674 },
+ { url = "https://files.pythonhosted.org/packages/97/92/3dc76d8bfa23300591eec248e950f85bd78eb608c96bd4747ce4cc06acdb/bcrypt-4.2.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c6f5fa3775966cca251848d4d5393ab016b3afed251163c1436fefdec3b02c84", size = 320577 },
+ { url = "https://files.pythonhosted.org/packages/5d/ab/a6c0da5c2cf86600f74402a72b06dfe365e1a1d30783b1bbeec460fd57d1/bcrypt-4.2.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:807261df60a8b1ccd13e6599c779014a362ae4e795f5c59747f60208daddd96d", size = 339836 },
+ { url = "https://files.pythonhosted.org/packages/b4/b4/e75b6e9a72a030a04362034022ebe317c5b735d04db6ad79237101ae4a5c/bcrypt-4.2.1-cp39-abi3-win32.whl", hash = "sha256:b588af02b89d9fad33e5f98f7838bf590d6d692df7153647724a7f20c186f6bf", size = 160911 },
+ { url = "https://files.pythonhosted.org/packages/76/b9/d51d34e6cd6d887adddb28a8680a1d34235cc45b9d6e238ce39b98199ca0/bcrypt-4.2.1-cp39-abi3-win_amd64.whl", hash = "sha256:e84e0e6f8e40a242b11bce56c313edc2be121cec3e0ec2d76fce01f6af33c07c", size = 153078 },
+ { url = "https://files.pythonhosted.org/packages/4e/6e/7193067042de23af3d71882f898c8c0bd2b18e6ee44a4f76e395dfadb5a8/bcrypt-4.2.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:76132c176a6d9953cdc83c296aeaed65e1a708485fd55abf163e0d9f8f16ce0e", size = 270069 },
+ { url = "https://files.pythonhosted.org/packages/3b/05/2546085c6dc07a45627460a39e6291b82382b434fff2bd0167ff3bc31eb1/bcrypt-4.2.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e158009a54c4c8bc91d5e0da80920d048f918c61a581f0a63e4e93bb556d362f", size = 274652 },
+]
+
[[package]]
name = "beartype"
version = "0.18.5"
@@ -1032,6 +1089,22 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/3e/05/43bae794c8e5f42d79e1c24205bc0c7447b3909a446de46cf231fa6b39dd/botocore-1.36.8-py3-none-any.whl", hash = "sha256:59d3fdfbae6d916b046e973bebcbeb70a102f9e570ca86d5ba512f1854b78fc2", size = 13318382 },
]
+[[package]]
+name = "build"
+version = "1.2.2.post1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "(os_name == 'nt' and platform_machine != 'aarch64' and sys_platform == 'linux') or (os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux')" },
+ { name = "importlib-metadata", marker = "python_full_version < '3.10.2'" },
+ { name = "packaging" },
+ { name = "pyproject-hooks" },
+ { name = "tomli", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/7d/46/aeab111f8e06793e4f0e421fcad593d547fb8313b50990f31681ee2fb1ad/build-1.2.2.post1.tar.gz", hash = "sha256:b36993e92ca9375a219c99e606a122ff365a760a2d4bba0caa09bd5278b608b7", size = 46701 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/84/c2/80633736cd183ee4a62107413def345f7e6e3c01563dbca1417363cf957e/build-1.2.2.post1-py3-none-any.whl", hash = "sha256:1d61c0887fa860c01971625baae8bdd338e517b836a2f70dd1f7aa3a6b2fc5b5", size = 22950 },
+]
+
[[package]]
name = "cachetools"
version = "5.5.1"
@@ -1201,6 +1274,70 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/52/93/342cc62a70ab727e093ed98e02a725d85b746345f05d2b5e5034649f4ec8/chevron-0.14.0-py3-none-any.whl", hash = "sha256:fbf996a709f8da2e745ef763f482ce2d311aa817d287593a5b990d6d6e4f0443", size = 11595 },
]
+[[package]]
+name = "chroma-hnswlib"
+version = "0.7.6"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "numpy" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/73/09/10d57569e399ce9cbc5eee2134996581c957f63a9addfa6ca657daf006b8/chroma_hnswlib-0.7.6.tar.gz", hash = "sha256:4dce282543039681160259d29fcde6151cc9106c6461e0485f57cdccd83059b7", size = 32256 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a8/74/b9dde05ea8685d2f8c4681b517e61c7887e974f6272bb24ebc8f2105875b/chroma_hnswlib-0.7.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f35192fbbeadc8c0633f0a69c3d3e9f1a4eab3a46b65458bbcbcabdd9e895c36", size = 195821 },
+ { url = "https://files.pythonhosted.org/packages/fd/58/101bfa6bc41bc6cc55fbb5103c75462a7bf882e1704256eb4934df85b6a8/chroma_hnswlib-0.7.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f007b608c96362b8f0c8b6b2ac94f67f83fcbabd857c378ae82007ec92f4d82", size = 183854 },
+ { url = "https://files.pythonhosted.org/packages/17/ff/95d49bb5ce134f10d6aa08d5f3bec624eaff945f0b17d8c3fce888b9a54a/chroma_hnswlib-0.7.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:456fd88fa0d14e6b385358515aef69fc89b3c2191706fd9aee62087b62aad09c", size = 2358774 },
+ { url = "https://files.pythonhosted.org/packages/3a/6d/27826180a54df80dbba8a4f338b022ba21c0c8af96fd08ff8510626dee8f/chroma_hnswlib-0.7.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dfaae825499c2beaa3b75a12d7ec713b64226df72a5c4097203e3ed532680da", size = 2392739 },
+ { url = "https://files.pythonhosted.org/packages/d6/63/ee3e8b7a8f931918755faacf783093b61f32f59042769d9db615999c3de0/chroma_hnswlib-0.7.6-cp310-cp310-win_amd64.whl", hash = "sha256:2487201982241fb1581be26524145092c95902cb09fc2646ccfbc407de3328ec", size = 150955 },
+ { url = "https://files.pythonhosted.org/packages/f5/af/d15fdfed2a204c0f9467ad35084fbac894c755820b203e62f5dcba2d41f1/chroma_hnswlib-0.7.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:81181d54a2b1e4727369486a631f977ffc53c5533d26e3d366dda243fb0998ca", size = 196911 },
+ { url = "https://files.pythonhosted.org/packages/0d/19/aa6f2139f1ff7ad23a690ebf2a511b2594ab359915d7979f76f3213e46c4/chroma_hnswlib-0.7.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4b4ab4e11f1083dd0a11ee4f0e0b183ca9f0f2ed63ededba1935b13ce2b3606f", size = 185000 },
+ { url = "https://files.pythonhosted.org/packages/79/b1/1b269c750e985ec7d40b9bbe7d66d0a890e420525187786718e7f6b07913/chroma_hnswlib-0.7.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53db45cd9173d95b4b0bdccb4dbff4c54a42b51420599c32267f3abbeb795170", size = 2377289 },
+ { url = "https://files.pythonhosted.org/packages/c7/2d/d5663e134436e5933bc63516a20b5edc08b4c1b1588b9680908a5f1afd04/chroma_hnswlib-0.7.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c093f07a010b499c00a15bc9376036ee4800d335360570b14f7fe92badcdcf9", size = 2411755 },
+ { url = "https://files.pythonhosted.org/packages/3e/79/1bce519cf186112d6d5ce2985392a89528c6e1e9332d680bf752694a4cdf/chroma_hnswlib-0.7.6-cp311-cp311-win_amd64.whl", hash = "sha256:0540b0ac96e47d0aa39e88ea4714358ae05d64bbe6bf33c52f316c664190a6a3", size = 151888 },
+ { url = "https://files.pythonhosted.org/packages/93/ac/782b8d72de1c57b64fdf5cb94711540db99a92768d93d973174c62d45eb8/chroma_hnswlib-0.7.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e87e9b616c281bfbe748d01705817c71211613c3b063021f7ed5e47173556cb7", size = 197804 },
+ { url = "https://files.pythonhosted.org/packages/32/4e/fd9ce0764228e9a98f6ff46af05e92804090b5557035968c5b4198bc7af9/chroma_hnswlib-0.7.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ec5ca25bc7b66d2ecbf14502b5729cde25f70945d22f2aaf523c2d747ea68912", size = 185421 },
+ { url = "https://files.pythonhosted.org/packages/d9/3d/b59a8dedebd82545d873235ef2d06f95be244dfece7ee4a1a6044f080b18/chroma_hnswlib-0.7.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:305ae491de9d5f3c51e8bd52d84fdf2545a4a2bc7af49765cda286b7bb30b1d4", size = 2389672 },
+ { url = "https://files.pythonhosted.org/packages/74/1e/80a033ea4466338824974a34f418e7b034a7748bf906f56466f5caa434b0/chroma_hnswlib-0.7.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:822ede968d25a2c88823ca078a58f92c9b5c4142e38c7c8b4c48178894a0a3c5", size = 2436986 },
+]
+
+[[package]]
+name = "chromadb"
+version = "0.6.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "bcrypt" },
+ { name = "build" },
+ { name = "chroma-hnswlib" },
+ { name = "fastapi" },
+ { name = "grpcio" },
+ { name = "httpx" },
+ { name = "importlib-resources" },
+ { name = "kubernetes" },
+ { name = "mmh3" },
+ { name = "numpy" },
+ { name = "onnxruntime" },
+ { name = "opentelemetry-api" },
+ { name = "opentelemetry-exporter-otlp-proto-grpc" },
+ { name = "opentelemetry-instrumentation-fastapi" },
+ { name = "opentelemetry-sdk" },
+ { name = "orjson" },
+ { name = "overrides" },
+ { name = "posthog" },
+ { name = "pydantic" },
+ { name = "pypika" },
+ { name = "pyyaml" },
+ { name = "rich" },
+ { name = "tenacity" },
+ { name = "tokenizers" },
+ { name = "tqdm" },
+ { name = "typer" },
+ { name = "typing-extensions" },
+ { name = "uvicorn", extra = ["standard"] },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/39/cd/f0f2de3f466ff514fb6b58271c14f6d22198402bb5b71b8d890231265946/chromadb-0.6.3.tar.gz", hash = "sha256:c8f34c0b704b9108b04491480a36d42e894a960429f87c6516027b5481d59ed3", size = 29297929 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/28/8e/5c186c77bf749b6fe0528385e507e463f1667543328d76fd00a49e1a4e6a/chromadb-0.6.3-py3-none-any.whl", hash = "sha256:4851258489a3612b558488d98d09ae0fe0a28d5cad6bd1ba64b96fdc419dc0e5", size = 611129 },
+]
+
[[package]]
name = "chromedriver-autoinstaller"
version = "0.6.4"
@@ -1645,6 +1782,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408 },
]
+[[package]]
+name = "durationpy"
+version = "0.9"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/31/e9/f49c4e7fccb77fa5c43c2480e09a857a78b41e7331a75e128ed5df45c56b/durationpy-0.9.tar.gz", hash = "sha256:fd3feb0a69a0057d582ef643c355c40d2fa1c942191f914d12203b1a01ac722a", size = 3186 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4c/a3/ac312faeceffd2d8f86bc6dcb5c401188ba5a01bc88e69bed97578a0dfcd/durationpy-0.9-py3-none-any.whl", hash = "sha256:e65359a7af5cedad07fb77a2dd3f390f8eb0b74cb845589fa6c057086834dd38", size = 3461 },
+]
+
[[package]]
name = "email-validator"
version = "2.2.0"
@@ -2670,6 +2816,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/a0/d9/a1e041c5e7caa9a05c925f4bdbdfb7f006d1f74996af53467bc394c97be7/importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b", size = 26514 },
]
+[[package]]
+name = "importlib-resources"
+version = "6.5.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/cf/8c/f834fbf984f691b4f7ff60f50b514cc3de5cc08abfc3295564dd89c5e2e7/importlib_resources-6.5.2.tar.gz", hash = "sha256:185f87adef5bcc288449d98fb4fba07cea78bc036455dd44c5fc4a2fe78fed2c", size = 44693 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a4/ed/1f1afb2e9e7f38a545d628f864d562a5ae64fe6f7a10e28ffb9b185b4e89/importlib_resources-6.5.2-py3-none-any.whl", hash = "sha256:789cfdc3ed28c78b67a06acb8126751ced69a3d5f79c095a98298cd8a760ccec", size = 37461 },
+]
+
[[package]]
name = "iniconfig"
version = "2.0.0"
@@ -3045,6 +3200,28 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/3a/1d/50ad811d1c5dae091e4cf046beba925bcae0a610e79ae4c538f996f63ed5/kiwisolver-1.4.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:65ea09a5a3faadd59c2ce96dc7bf0f364986a315949dc6374f04396b0d60e09b", size = 71762 },
]
+[[package]]
+name = "kubernetes"
+version = "32.0.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "certifi" },
+ { name = "durationpy" },
+ { name = "google-auth" },
+ { name = "oauthlib" },
+ { name = "python-dateutil" },
+ { name = "pyyaml" },
+ { name = "requests" },
+ { name = "requests-oauthlib" },
+ { name = "six" },
+ { name = "urllib3" },
+ { name = "websocket-client" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/bc/7f/15bcdf96c91f7a7b74d524c1bd058e0a2ef37eb6128cf16dca5c8b613aa0/kubernetes-32.0.0.tar.gz", hash = "sha256:319fa840345a482001ac5d6062222daeb66ec4d1bcb3087402aed685adf0aecb", size = 945530 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/df/14/a59acfe4b3095f2a4fd8d13b348853a69c8f1ed4bce9af00d1b31351a88e/kubernetes-32.0.0-py2.py3-none-any.whl", hash = "sha256:60fd8c29e8e43d9c553ca4811895a687426717deba9c0a66fb2dcc3f5ef96692", size = 1987229 },
+]
+
[[package]]
name = "lancedb"
version = "0.17.0"
@@ -3952,6 +4129,71 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/58/e7/7147c75c383a975c58c33f8e7ee7dbbb0e7390fbcb1ecd321f63e4c73efd/mistralai-1.5.0-py3-none-any.whl", hash = "sha256:9372537719f87bd6f9feef4747d0bf1f4fbe971f8c02945ca4b4bf3c94571c97", size = 271559 },
]
+[[package]]
+name = "mmh3"
+version = "5.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/47/1b/1fc6888c74cbd8abad1292dde2ddfcf8fc059e114c97dd6bf16d12f36293/mmh3-5.1.0.tar.gz", hash = "sha256:136e1e670500f177f49ec106a4ebf0adf20d18d96990cc36ea492c651d2b406c", size = 33728 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a1/01/9d06468928661765c0fc248a29580c760a4a53a9c6c52cf72528bae3582e/mmh3-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eaf4ac5c6ee18ca9232238364d7f2a213278ae5ca97897cafaa123fcc7bb8bec", size = 56095 },
+ { url = "https://files.pythonhosted.org/packages/e4/d7/7b39307fc9db867b2a9a20c58b0de33b778dd6c55e116af8ea031f1433ba/mmh3-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:48f9aa8ccb9ad1d577a16104834ac44ff640d8de8c0caed09a2300df7ce8460a", size = 40512 },
+ { url = "https://files.pythonhosted.org/packages/4f/85/728ca68280d8ccc60c113ad119df70ff1748fbd44c89911fed0501faf0b8/mmh3-5.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d4ba8cac21e1f2d4e436ce03a82a7f87cda80378691f760e9ea55045ec480a3d", size = 40110 },
+ { url = "https://files.pythonhosted.org/packages/e4/96/beaf0e301472ffa00358bbbf771fe2d9c4d709a2fe30b1d929e569f8cbdf/mmh3-5.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d69281c281cb01994f054d862a6bb02a2e7acfe64917795c58934b0872b9ece4", size = 100151 },
+ { url = "https://files.pythonhosted.org/packages/c3/ee/9381f825c4e09ffafeffa213c3865c4bf7d39771640de33ab16f6faeb854/mmh3-5.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d05ed3962312fbda2a1589b97359d2467f677166952f6bd410d8c916a55febf", size = 106312 },
+ { url = "https://files.pythonhosted.org/packages/67/dc/350a54bea5cf397d357534198ab8119cfd0d8e8bad623b520f9c290af985/mmh3-5.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78ae6a03f4cff4aa92ddd690611168856f8c33a141bd3e5a1e0a85521dc21ea0", size = 104232 },
+ { url = "https://files.pythonhosted.org/packages/b2/5d/2c6eb4a4ec2f7293b98a9c07cb8c64668330b46ff2b6511244339e69a7af/mmh3-5.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:95f983535b39795d9fb7336438faae117424c6798f763d67c6624f6caf2c4c01", size = 91663 },
+ { url = "https://files.pythonhosted.org/packages/f1/ac/17030d24196f73ecbab8b5033591e5e0e2beca103181a843a135c78f4fee/mmh3-5.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d46fdd80d4c7ecadd9faa6181e92ccc6fe91c50991c9af0e371fdf8b8a7a6150", size = 99166 },
+ { url = "https://files.pythonhosted.org/packages/b9/ed/54ddc56603561a10b33da9b12e95a48a271d126f4a4951841bbd13145ebf/mmh3-5.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0f16e976af7365ea3b5c425124b2a7f0147eed97fdbb36d99857f173c8d8e096", size = 101555 },
+ { url = "https://files.pythonhosted.org/packages/1c/c3/33fb3a940c9b70908a5cc9fcc26534aff8698180f9f63ab6b7cc74da8bcd/mmh3-5.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6fa97f7d1e1f74ad1565127229d510f3fd65d931fdedd707c1e15100bc9e5ebb", size = 94813 },
+ { url = "https://files.pythonhosted.org/packages/61/88/c9ff76a23abe34db8eee1a6fa4e449462a16c7eb547546fc5594b0860a72/mmh3-5.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4052fa4a8561bd62648e9eb993c8f3af3bdedadf3d9687aa4770d10e3709a80c", size = 109611 },
+ { url = "https://files.pythonhosted.org/packages/0b/8e/27d04f40e95554ebe782cac7bddda2d158cf3862387298c9c7b254fa7beb/mmh3-5.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:3f0e8ae9f961037f812afe3cce7da57abf734285961fffbeff9a4c011b737732", size = 100515 },
+ { url = "https://files.pythonhosted.org/packages/7b/00/504ca8f462f01048f3c87cd93f2e1f60b93dac2f930cd4ed73532a9337f5/mmh3-5.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:99297f207db967814f1f02135bb7fe7628b9eacb046134a34e1015b26b06edce", size = 100177 },
+ { url = "https://files.pythonhosted.org/packages/6f/1d/2efc3525fe6fdf8865972fcbb884bd1f4b0f923c19b80891cecf7e239fa5/mmh3-5.1.0-cp310-cp310-win32.whl", hash = "sha256:2e6c8dc3631a5e22007fbdb55e993b2dbce7985c14b25b572dd78403c2e79182", size = 40815 },
+ { url = "https://files.pythonhosted.org/packages/38/b5/c8fbe707cb0fea77a6d2d58d497bc9b67aff80deb84d20feb34d8fdd8671/mmh3-5.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:e4e8c7ad5a4dddcfde35fd28ef96744c1ee0f9d9570108aa5f7e77cf9cfdf0bf", size = 41479 },
+ { url = "https://files.pythonhosted.org/packages/a1/f1/663e16134f913fccfbcea5b300fb7dc1860d8f63dc71867b013eebc10aec/mmh3-5.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:45da549269883208912868a07d0364e1418d8292c4259ca11699ba1b2475bd26", size = 38883 },
+ { url = "https://files.pythonhosted.org/packages/56/09/fda7af7fe65928262098382e3bf55950cfbf67d30bf9e47731bf862161e9/mmh3-5.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0b529dcda3f951ff363a51d5866bc6d63cf57f1e73e8961f864ae5010647079d", size = 56098 },
+ { url = "https://files.pythonhosted.org/packages/0c/ab/84c7bc3f366d6f3bd8b5d9325a10c367685bc17c26dac4c068e2001a4671/mmh3-5.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db1079b3ace965e562cdfc95847312f9273eb2ad3ebea983435c8423e06acd7", size = 40513 },
+ { url = "https://files.pythonhosted.org/packages/4f/21/25ea58ca4a652bdc83d1528bec31745cce35802381fb4fe3c097905462d2/mmh3-5.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:22d31e3a0ff89b8eb3b826d6fc8e19532998b2aa6b9143698043a1268da413e1", size = 40112 },
+ { url = "https://files.pythonhosted.org/packages/bd/78/4f12f16ae074ddda6f06745254fdb50f8cf3c85b0bbf7eaca58bed84bf58/mmh3-5.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2139bfbd354cd6cb0afed51c4b504f29bcd687a3b1460b7e89498329cc28a894", size = 102632 },
+ { url = "https://files.pythonhosted.org/packages/48/11/8f09dc999cf2a09b6138d8d7fc734efb7b7bfdd9adb9383380941caadff0/mmh3-5.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c8105c6a435bc2cd6ea2ef59558ab1a2976fd4a4437026f562856d08996673a", size = 108884 },
+ { url = "https://files.pythonhosted.org/packages/bd/91/e59a66538a3364176f6c3f7620eee0ab195bfe26f89a95cbcc7a1fb04b28/mmh3-5.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57730067174a7f36fcd6ce012fe359bd5510fdaa5fe067bc94ed03e65dafb769", size = 106835 },
+ { url = "https://files.pythonhosted.org/packages/25/14/b85836e21ab90e5cddb85fe79c494ebd8f81d96a87a664c488cc9277668b/mmh3-5.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bde80eb196d7fdc765a318604ded74a4378f02c5b46c17aa48a27d742edaded2", size = 93688 },
+ { url = "https://files.pythonhosted.org/packages/ac/aa/8bc964067df9262740c95e4cde2d19f149f2224f426654e14199a9e47df6/mmh3-5.1.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9c8eddcb441abddeb419c16c56fd74b3e2df9e57f7aa2903221996718435c7a", size = 101569 },
+ { url = "https://files.pythonhosted.org/packages/70/b6/1fb163cbf919046a64717466c00edabebece3f95c013853fec76dbf2df92/mmh3-5.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:99e07e4acafbccc7a28c076a847fb060ffc1406036bc2005acb1b2af620e53c3", size = 98483 },
+ { url = "https://files.pythonhosted.org/packages/70/49/ba64c050dd646060f835f1db6b2cd60a6485f3b0ea04976e7a29ace7312e/mmh3-5.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9e25ba5b530e9a7d65f41a08d48f4b3fedc1e89c26486361166a5544aa4cad33", size = 96496 },
+ { url = "https://files.pythonhosted.org/packages/9e/07/f2751d6a0b535bb865e1066e9c6b80852571ef8d61bce7eb44c18720fbfc/mmh3-5.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:bb9bf7475b4d99156ce2f0cf277c061a17560c8c10199c910a680869a278ddc7", size = 105109 },
+ { url = "https://files.pythonhosted.org/packages/b7/02/30360a5a66f7abba44596d747cc1e6fb53136b168eaa335f63454ab7bb79/mmh3-5.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2a1b0878dd281ea3003368ab53ff6f568e175f1b39f281df1da319e58a19c23a", size = 98231 },
+ { url = "https://files.pythonhosted.org/packages/8c/60/8526b0c750ff4d7ae1266e68b795f14b97758a1d9fcc19f6ecabf9c55656/mmh3-5.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:25f565093ac8b8aefe0f61f8f95c9a9d11dd69e6a9e9832ff0d293511bc36258", size = 97548 },
+ { url = "https://files.pythonhosted.org/packages/6d/4c/26e1222aca65769280d5427a1ce5875ef4213449718c8f03958d0bf91070/mmh3-5.1.0-cp311-cp311-win32.whl", hash = "sha256:1e3554d8792387eac73c99c6eaea0b3f884e7130eb67986e11c403e4f9b6d372", size = 40810 },
+ { url = "https://files.pythonhosted.org/packages/98/d5/424ba95062d1212ea615dc8debc8d57983f2242d5e6b82e458b89a117a1e/mmh3-5.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:8ad777a48197882492af50bf3098085424993ce850bdda406a358b6ab74be759", size = 41476 },
+ { url = "https://files.pythonhosted.org/packages/bd/08/0315ccaf087ba55bb19a6dd3b1e8acd491e74ce7f5f9c4aaa06a90d66441/mmh3-5.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:f29dc4efd99bdd29fe85ed6c81915b17b2ef2cf853abf7213a48ac6fb3eaabe1", size = 38880 },
+ { url = "https://files.pythonhosted.org/packages/f4/47/e5f452bdf16028bfd2edb4e2e35d0441e4a4740f30e68ccd4cfd2fb2c57e/mmh3-5.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:45712987367cb9235026e3cbf4334670522a97751abfd00b5bc8bfa022c3311d", size = 56152 },
+ { url = "https://files.pythonhosted.org/packages/60/38/2132d537dc7a7fdd8d2e98df90186c7fcdbd3f14f95502a24ba443c92245/mmh3-5.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b1020735eb35086ab24affbea59bb9082f7f6a0ad517cb89f0fc14f16cea4dae", size = 40564 },
+ { url = "https://files.pythonhosted.org/packages/c0/2a/c52cf000581bfb8d94794f58865658e7accf2fa2e90789269d4ae9560b16/mmh3-5.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:babf2a78ce5513d120c358722a2e3aa7762d6071cd10cede026f8b32452be322", size = 40104 },
+ { url = "https://files.pythonhosted.org/packages/83/33/30d163ce538c54fc98258db5621447e3ab208d133cece5d2577cf913e708/mmh3-5.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4f47f58cd5cbef968c84a7c1ddc192fef0a36b48b0b8a3cb67354531aa33b00", size = 102634 },
+ { url = "https://files.pythonhosted.org/packages/94/5c/5a18acb6ecc6852be2d215c3d811aa61d7e425ab6596be940877355d7f3e/mmh3-5.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2044a601c113c981f2c1e14fa33adc9b826c9017034fe193e9eb49a6882dbb06", size = 108888 },
+ { url = "https://files.pythonhosted.org/packages/1f/f6/11c556324c64a92aa12f28e221a727b6e082e426dc502e81f77056f6fc98/mmh3-5.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c94d999c9f2eb2da44d7c2826d3fbffdbbbbcde8488d353fee7c848ecc42b968", size = 106968 },
+ { url = "https://files.pythonhosted.org/packages/5d/61/ca0c196a685aba7808a5c00246f17b988a9c4f55c594ee0a02c273e404f3/mmh3-5.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a015dcb24fa0c7a78f88e9419ac74f5001c1ed6a92e70fd1803f74afb26a4c83", size = 93771 },
+ { url = "https://files.pythonhosted.org/packages/b4/55/0927c33528710085ee77b808d85bbbafdb91a1db7c8eaa89cac16d6c513e/mmh3-5.1.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:457da019c491a2d20e2022c7d4ce723675e4c081d9efc3b4d8b9f28a5ea789bd", size = 101726 },
+ { url = "https://files.pythonhosted.org/packages/49/39/a92c60329fa470f41c18614a93c6cd88821412a12ee78c71c3f77e1cfc2d/mmh3-5.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:71408579a570193a4ac9c77344d68ddefa440b00468a0b566dcc2ba282a9c559", size = 98523 },
+ { url = "https://files.pythonhosted.org/packages/81/90/26adb15345af8d9cf433ae1b6adcf12e0a4cad1e692de4fa9f8e8536c5ae/mmh3-5.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:8b3a04bc214a6e16c81f02f855e285c6df274a2084787eeafaa45f2fbdef1b63", size = 96628 },
+ { url = "https://files.pythonhosted.org/packages/8a/4d/340d1e340df972a13fd4ec84c787367f425371720a1044220869c82364e9/mmh3-5.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:832dae26a35514f6d3c1e267fa48e8de3c7b978afdafa0529c808ad72e13ada3", size = 105190 },
+ { url = "https://files.pythonhosted.org/packages/d3/7c/65047d1cccd3782d809936db446430fc7758bda9def5b0979887e08302a2/mmh3-5.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bf658a61fc92ef8a48945ebb1076ef4ad74269e353fffcb642dfa0890b13673b", size = 98439 },
+ { url = "https://files.pythonhosted.org/packages/72/d2/3c259d43097c30f062050f7e861075099404e8886b5d4dd3cebf180d6e02/mmh3-5.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3313577453582b03383731b66447cdcdd28a68f78df28f10d275d7d19010c1df", size = 97780 },
+ { url = "https://files.pythonhosted.org/packages/29/29/831ea8d4abe96cdb3e28b79eab49cac7f04f9c6b6e36bfc686197ddba09d/mmh3-5.1.0-cp312-cp312-win32.whl", hash = "sha256:1d6508504c531ab86c4424b5a5ff07c1132d063863339cf92f6657ff7a580f76", size = 40835 },
+ { url = "https://files.pythonhosted.org/packages/12/dd/7cbc30153b73f08eeac43804c1dbc770538a01979b4094edbe1a4b8eb551/mmh3-5.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:aa75981fcdf3f21759d94f2c81b6a6e04a49dfbcdad88b152ba49b8e20544776", size = 41509 },
+ { url = "https://files.pythonhosted.org/packages/80/9d/627375bab4c90dd066093fc2c9a26b86f87e26d980dbf71667b44cbee3eb/mmh3-5.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:a4c1a76808dfea47f7407a0b07aaff9087447ef6280716fd0783409b3088bb3c", size = 38888 },
+]
+
+[[package]]
+name = "monotonic"
+version = "1.6"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/ea/ca/8e91948b782ddfbd194f323e7e7d9ba12e5877addf04fb2bf8fca38e86ac/monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7", size = 7615 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9a/67/7e8406a29b6c45be7af7740456f7f37025f0506ae2e05fb9009a53946860/monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c", size = 8154 },
+]
+
[[package]]
name = "more-itertools"
version = "10.6.0"
@@ -4473,6 +4715,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/87/20/199b8713428322a2f22b722c62b8cc278cc53dffa9705d744484b5035ee9/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a", size = 99144 },
]
+[[package]]
+name = "oauthlib"
+version = "3.2.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/6d/fa/fbf4001037904031639e6bfbfc02badfc7e12f137a8afa254df6c4c8a670/oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918", size = 177352 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7e/80/cab10959dc1faead58dc8384a781dfbf93cb4d33d50988f7a69f1b7c9bbe/oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca", size = 151688 },
+]
+
[[package]]
name = "ollama"
version = "0.4.7"
@@ -4741,6 +4992,38 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/ff/b1/55a77152a83ec8998e520a3a575f44af1020cfe4bdc000b7538583293b85/opentelemetry_instrumentation-0.50b0-py3-none-any.whl", hash = "sha256:b8f9fc8812de36e1c6dffa5bfc6224df258841fb387b6dfe5df15099daa10630", size = 30728 },
]
+[[package]]
+name = "opentelemetry-instrumentation-asgi"
+version = "0.50b0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "asgiref" },
+ { name = "opentelemetry-api" },
+ { name = "opentelemetry-instrumentation" },
+ { name = "opentelemetry-semantic-conventions" },
+ { name = "opentelemetry-util-http" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/49/cc/a7b2fd243c6d2621803092eba62e450071b6752dfe4f64f530bbfd91a328/opentelemetry_instrumentation_asgi-0.50b0.tar.gz", hash = "sha256:3ca4cb5616ae6a3e8ce86e7d5c360a8d8cc8ed722cf3dc8a5e44300774e87d49", size = 24105 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d2/81/0899c6b56b1023835f266d909250d439174afa0c34ed5944c5021d3da263/opentelemetry_instrumentation_asgi-0.50b0-py3-none-any.whl", hash = "sha256:2ba1297f746e55dec5a17fe825689da0613662fb25c004c3965a6c54b1d5be22", size = 16304 },
+]
+
+[[package]]
+name = "opentelemetry-instrumentation-fastapi"
+version = "0.50b0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "opentelemetry-api" },
+ { name = "opentelemetry-instrumentation" },
+ { name = "opentelemetry-instrumentation-asgi" },
+ { name = "opentelemetry-semantic-conventions" },
+ { name = "opentelemetry-util-http" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/8d/f8/1917b0b3e414e23c7d71c9a33f0ce020f94bc47d22a30f54ace704e07588/opentelemetry_instrumentation_fastapi-0.50b0.tar.gz", hash = "sha256:16b9181682136da210295def2bb304a32fb9bdee9a935cdc9da43567f7c1149e", size = 19214 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/cb/d6/37784bb30b213e2dd6838b9f96c2940907022c1b75ef1ff18a99afe42433/opentelemetry_instrumentation_fastapi-0.50b0-py3-none-any.whl", hash = "sha256:8f03b738495e4705fbae51a2826389c7369629dace89d0f291c06ffefdff5e52", size = 12079 },
+]
+
[[package]]
name = "opentelemetry-proto"
version = "1.29.0"
@@ -4780,6 +5063,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/da/fb/dc15fad105450a015e913cfa4f5c27b6a5f1bea8fb649f8cae11e699c8af/opentelemetry_semantic_conventions-0.50b0-py3-none-any.whl", hash = "sha256:e87efba8fdb67fb38113efea6a349531e75ed7ffc01562f65b802fcecb5e115e", size = 166602 },
]
+[[package]]
+name = "opentelemetry-util-http"
+version = "0.50b0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/69/10/ce3f0d1157cedbd819194f0b27a6bbb7c19a8bceb3941e4a4775014076cf/opentelemetry_util_http-0.50b0.tar.gz", hash = "sha256:dc4606027e1bc02aabb9533cc330dd43f874fca492e4175c31d7154f341754af", size = 7859 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/64/8a/9e1b54f50d1fddebbeac9a9b0632f8db6ece7add904fb593ee2e268ee4de/opentelemetry_util_http-0.50b0-py3-none-any.whl", hash = "sha256:21f8aedac861ffa3b850f8c0a6c373026189eb8630ac6e14a2bf8c55695cc090", size = 6942 },
+]
+
[[package]]
name = "orjson"
version = "3.10.15"
@@ -5129,6 +5421,22 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/9b/fb/a70a4214956182e0d7a9099ab17d50bfcba1056188e9b14f35b9e2b62a0d/portalocker-2.10.1-py3-none-any.whl", hash = "sha256:53a5984ebc86a025552264b459b46a2086e269b21823cb572f8f28ee759e45bf", size = 18423 },
]
+[[package]]
+name = "posthog"
+version = "3.11.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "backoff" },
+ { name = "monotonic" },
+ { name = "python-dateutil" },
+ { name = "requests" },
+ { name = "six" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/a1/f9/ffb682dfcfe43ff38c501791b8b4c01ba25f772c5d16bdb8c0f992f099fd/posthog-3.11.0.tar.gz", hash = "sha256:42a1f88cbcddeceaf6e8900a528db62d84fc56f6e5809f3d6dfb40e6f743091e", size = 61344 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e6/21/a7975b832603fed31930860108e12f7680ad829d74ce05eab2df1a17ae2d/posthog-3.11.0-py2.py3-none-any.whl", hash = "sha256:8cbd52c26bcdfbe65c4ea84a8090cfa2e046879d6b6d71da68e279a5b4aedb46", size = 72005 },
+]
+
[[package]]
name = "pot"
version = "0.9.5"
@@ -5632,6 +5940,21 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/3e/6e/9aa158121eb5a6af5537af0bde9e38092a97c40a5a0ecaec7cc9688b2c2e/pypdf-5.2.0-py3-none-any.whl", hash = "sha256:d107962ec45e65e3bd10c1d9242bdbbedaa38193c9e3a6617bd6d996e5747b19", size = 298686 },
]
+[[package]]
+name = "pypika"
+version = "0.48.9"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/c7/2c/94ed7b91db81d61d7096ac8f2d325ec562fc75e35f3baea8749c85b28784/PyPika-0.48.9.tar.gz", hash = "sha256:838836a61747e7c8380cd1b7ff638694b7a7335345d0f559b04b2cd832ad5378", size = 67259 }
+
+[[package]]
+name = "pyproject-hooks"
+version = "1.2.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e7/82/28175b2414effca1cdac8dc99f76d660e7a4fb0ceefa4b4ab8f5f6742925/pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8", size = 19228 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/bd/24/12818598c362d7f300f18e74db45963dbcb85150324092410c8b49405e42/pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913", size = 10216 },
+]
+
[[package]]
name = "pyreadline3"
version = "3.5.4"
@@ -6032,6 +6355,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d7/25/dd878a121fcfdf38f52850f11c512e13ec87c2ea72385933818e5b6c15ce/requests_file-2.1.0-py2.py3-none-any.whl", hash = "sha256:cf270de5a4c5874e84599fc5778303d496c10ae5e870bfa378818f35d21bda5c", size = 4244 },
]
+[[package]]
+name = "requests-oauthlib"
+version = "2.0.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "oauthlib" },
+ { name = "requests" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/42/f2/05f29bc3913aea15eb670be136045bf5c5bbf4b99ecb839da9b422bb2c85/requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9", size = 55650 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/3b/5d/63d4ae3b9daea098d5d6f5da83984853c1bbacd5dc826764b249fe119d24/requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", size = 24179 },
+]
+
[[package]]
name = "requests-toolbelt"
version = "1.0.0"