diff --git a/mesa/experimental/llm_layer/__init__.py b/mesa/experimental/llm_layer/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/mesa/experimental/llm_layer/agents.py b/mesa/experimental/llm_layer/agents.py new file mode 100644 index 00000000000..2be6c9635bb --- /dev/null +++ b/mesa/experimental/llm_layer/agents.py @@ -0,0 +1,26 @@ +from mesa import Agent + + +class BasicAgent(Agent): + def __init__(self, model): + # Pass the parameters to the parent class. + super().__init__(model) + + def step(self): + if self.random.random() > 0.5: + print(f"[Basic {self.unique_id}] Collecting nearby resource.") + else: + print(f"[Basic {self.unique_id}] Waiting...") + + +class CognitiveAgent(Agent): + def __init__(self, model, orchestrator): + super().__init__(model) + self.orchestrator = orchestrator + self.memory = [] + + def step(self): + context = {"goal": "collect", "memory": self.memory} + action = self.orchestrator.execute_graph("plan", self, context) + self.memory.append(action) + print(f"[Cognitive {self.unique_id}] {action}") diff --git a/mesa/experimental/llm_layer/models.py b/mesa/experimental/llm_layer/models.py new file mode 100644 index 00000000000..f42664c16c2 --- /dev/null +++ b/mesa/experimental/llm_layer/models.py @@ -0,0 +1,25 @@ +from agents import BasicAgent, CognitiveAgent + +from mesa import Model + + +class HybridModel(Model): + def __init__(self, num_agents, orchestrator): + super().__init__() + self.num_agents = num_agents + self.orchestrator = orchestrator + + # Create 3 basic agents and 2 cognitive agents + BasicAgent.create_agents(model=self, n=3) + CognitiveAgent.create_agents(model=self, n=2, orchestrator=orchestrator) + + def step(self): + self.agents.shuffle_do("step") + + +def llm_collect(agent, state): + return f"[LLM] Reasoned to collect based on memory length {len(state['memory'])}" + + +def wait_tool(agent, state): + return "[RULE] Wait due to uncertainty or cooldown" diff --git a/mesa/experimental/llm_layer/orchestrator.py b/mesa/experimental/llm_layer/orchestrator.py new file mode 100644 index 00000000000..1038f16bc29 --- /dev/null +++ b/mesa/experimental/llm_layer/orchestrator.py @@ -0,0 +1,35 @@ +# LangGraph-inspired orchestrator (future-proof structure) +class Orchestrator: + def __init__(self): + self.nodes = {} + self.edges = {} + + def add_node(self, name, func): + self.nodes[name] = func + + def add_edge(self, from_node, to_node): + self.edges.setdefault(from_node, []).append(to_node) + + def add_conditional_edges(self, from_node, condition_fn): + self.edges[from_node] = [ + (condition_fn, target) for target in self.nodes if target != from_node + ] + + def execute_graph(self, start_node, agent, state): + current_node = start_node + while current_node: + result = self.nodes[current_node](agent, state) + state["last_output"] = result + next_node = self._resolve_next_node(current_node, state) + if not next_node: + break + current_node = next_node + return result + + def _resolve_next_node(self, current_node, state): + if current_node not in self.edges: + return None + for condition_fn, target in self.edges[current_node]: + if condition_fn(state): + return target + return None diff --git a/mesa/experimental/llm_layer/run.py b/mesa/experimental/llm_layer/run.py new file mode 100644 index 00000000000..06671986e94 --- /dev/null +++ b/mesa/experimental/llm_layer/run.py @@ -0,0 +1,18 @@ +from models import HybridModel, llm_collect, wait_tool +from orchestrator import Orchestrator + +# Set up the orchestrator and graph +orchestrator = Orchestrator() +orchestrator.add_node("plan", llm_collect) +orchestrator.add_node("wait", wait_tool) + +# Add conditional edge: alternate between collecting and waiting +orchestrator.add_conditional_edges( + "plan", lambda state: "wait" if len(state["memory"]) % 2 == 0 else None +) + +# Run the model +model = HybridModel(num_agents=5, orchestrator=orchestrator) +for step in range(3): + print(f"\n--- Step {step + 1} ---") + model.step()