diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..733a182 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +/appservice/.env diff --git a/appservice/__pycache__/llm1_bot.cpython-39.pyc b/appservice/__pycache__/llm1_bot.cpython-39.pyc new file mode 100644 index 0000000..3f6d539 Binary files /dev/null and b/appservice/__pycache__/llm1_bot.cpython-39.pyc differ diff --git a/appservice/__pycache__/llm2_bot.cpython-39.pyc b/appservice/__pycache__/llm2_bot.cpython-39.pyc new file mode 100644 index 0000000..91bfd91 Binary files /dev/null and b/appservice/__pycache__/llm2_bot.cpython-39.pyc differ diff --git a/appservice/__pycache__/message_router.cpython-39.pyc b/appservice/__pycache__/message_router.cpython-39.pyc new file mode 100644 index 0000000..89d82fd Binary files /dev/null and b/appservice/__pycache__/message_router.cpython-39.pyc differ diff --git a/appservice/agent_manager.py b/appservice/agent_manager.py deleted file mode 100644 index 7e612a9..0000000 --- a/appservice/agent_manager.py +++ /dev/null @@ -1,16 +0,0 @@ -from mautrix.util.async_db import Database -from mautrix.bridge import BaseBridge - -class AgentManager: - def __init__(self, bridge: BaseBridge): - self.bridge = bridge - self.intent_cache = {} - - def get_agent_user_id(self, agent_id: str) -> str: - return f"@agent_{agent_id}:{self.bridge.config['homeserver.domain']}" - - def get_intent(self, agent_id: str): - mxid = self.get_agent_user_id(agent_id) - if mxid not in self.intent_cache: - self.intent_cache[mxid] = self.bridge.get_intent(mxid) - return self.intent_cache[mxid] diff --git a/appservice/base.py b/appservice/base.py deleted file mode 100644 index 89f7241..0000000 --- a/appservice/base.py +++ /dev/null @@ -1,12 +0,0 @@ -from mautrix.appservice import AppService -from .agent_manager import AgentManager -from .router import MessageRouter - -class AutonomousSphereBridge(AppService): - async def start(self): - self.agent_manager = AgentManager(self) - self.router = MessageRouter(self, self.agent_manager) - - self.register_event_handler("m.room.message", self.router.handle_message) - - await super().start() diff --git a/appservice/bridge_app.py b/appservice/bridge_app.py new file mode 100644 index 0000000..c6a2baa --- /dev/null +++ b/appservice/bridge_app.py @@ -0,0 +1,84 @@ +import asyncio +import os +from typing import Tuple + +from mautrix.appservice import AppService +from mautrix.client import Client +from mautrix.types import Event, MessageEvent, MessageType, RoomID, UserID + +from llm1.llm1_bot import get_gemini_response +from llm2.llm2_bot import get_ollama_response + + +def _choose_model_and_strip(text: str, last_model: str) -> Tuple[str, str]: + if not isinstance(text, str): + return ("gemini", "") + lowered = text.lstrip() + if lowered.startswith("/gemini"): + return ("gemini", lowered.split(" ", 1)[1] if " " in lowered else "") + if lowered.startswith("/ollama") or lowered.startswith("/llama"): + return ("ollama", lowered.split(" ", 1)[1] if " " in lowered else "") + next_model = "ollama" if last_model == "gemini" else "gemini" + return (next_model, text) + + +class LLMBridge: + def __init__(self) -> None: + hs_address = os.getenv("SYNAPSE_ADDRESS", "http://localhost:8008") + domain = os.getenv("SYNAPSE_DOMAIN", "localhost") + as_id = os.getenv("AS_ID", "autonomoussphere") + as_token = os.getenv("AS_TOKEN", "YOUR_AS_TOKEN") + hs_token = os.getenv("HS_TOKEN", "YOUR_HS_TOKEN") + bot_localpart = os.getenv("AS_BOT", "_as_master") + bind_addr = os.getenv("AS_BIND_ADDR", "0.0.0.0") + bind_port = int(os.getenv("AS_PORT", "29333")) + + self.appservice = AppService( + id=as_id, + as_token=as_token, + hs_token=hs_token, + server=hs_address, + appservice_host=bind_addr, + appservice_port=bind_port, + ) + self.bot = self.appservice.intent + self.bot.user_id = UserID(f"@{bot_localpart}:{domain}") + self._last_model = "ollama" + + @self.appservice.on(Event) + async def on_event(evt: Event) -> None: + if not isinstance(evt, MessageEvent): + return + if evt.content.msgtype != MessageType.TEXT: + return + if evt.sender == self.bot.user_id: + return + + room_id: RoomID = evt.room_id + body: str = evt.content.body or "" + model, prompt = _choose_model_and_strip(body, self._last_model) + self._last_model = model + + try: + if model == "gemini": + reply = get_gemini_response(prompt) + else: + reply = get_ollama_response(prompt) + except Exception as exc: + reply = f"Error from {model}: {exc}" + + await self.bot.send_text(room_id, reply) + + async def run(self) -> None: + await self.appservice.start() + + +def run_bridge_blocking() -> None: + bridge = LLMBridge() + asyncio.get_event_loop().run_until_complete(bridge.run()) + + +if __name__ == "__main__": + run_bridge_blocking() + + diff --git a/appservice/data/synapse b/appservice/data/synapse new file mode 160000 index 0000000..be65a8e --- /dev/null +++ b/appservice/data/synapse @@ -0,0 +1 @@ +Subproject commit be65a8ec0195955c15fdb179c9158b187638e39a diff --git a/appservice/llm1/__pycache__/llm1_bot.cpython-311.pyc b/appservice/llm1/__pycache__/llm1_bot.cpython-311.pyc new file mode 100644 index 0000000..1d36532 Binary files /dev/null and b/appservice/llm1/__pycache__/llm1_bot.cpython-311.pyc differ diff --git a/appservice/llm1/__pycache__/llm1_bot.cpython-39.pyc b/appservice/llm1/__pycache__/llm1_bot.cpython-39.pyc new file mode 100644 index 0000000..ee042a1 Binary files /dev/null and b/appservice/llm1/__pycache__/llm1_bot.cpython-39.pyc differ diff --git a/appservice/a2a.py b/appservice/llm1/dockerfile similarity index 100% rename from appservice/a2a.py rename to appservice/llm1/dockerfile diff --git a/appservice/llm1/llm1_bot.py b/appservice/llm1/llm1_bot.py new file mode 100644 index 0000000..f89749d --- /dev/null +++ b/appservice/llm1/llm1_bot.py @@ -0,0 +1,26 @@ +from google import genai +import dotenv + +dotenv.load_dotenv() + +# The client gets the API key from the environment variable `GEMINI_API_KEY`. +api_key = dotenv.get_key(dotenv.find_dotenv(), "GEMINI_API_KEY") +client = genai.Client(api_key=api_key) + +def get_gemini_response(prompt: str) -> str: + stream = client.models.generate_content_stream( + model="gemini-2.5-flash", + contents=prompt + ) + + full_response = "" + for chunk in stream: + if hasattr(chunk, 'text'): + #print(chunk.text, end="", flush=True) # Stream to console + full_response += chunk.text + #print() # Newline after streaming + return full_response + +# For standalone testing +if __name__ == "__main__": + print(get_gemini_response("Explain how AI works in a few words")) diff --git a/appservice/llm1/requirements.txt b/appservice/llm1/requirements.txt new file mode 100644 index 0000000..fc7baef --- /dev/null +++ b/appservice/llm1/requirements.txt @@ -0,0 +1,2 @@ +google-genai>=0.3.0 +python-dotenv>=1.0.1 diff --git a/appservice/llm2/__pycache__/llm2_bot.cpython-311.pyc b/appservice/llm2/__pycache__/llm2_bot.cpython-311.pyc new file mode 100644 index 0000000..07fe45c Binary files /dev/null and b/appservice/llm2/__pycache__/llm2_bot.cpython-311.pyc differ diff --git a/appservice/llm2/__pycache__/llm2_bot.cpython-39.pyc b/appservice/llm2/__pycache__/llm2_bot.cpython-39.pyc new file mode 100644 index 0000000..2d20a77 Binary files /dev/null and b/appservice/llm2/__pycache__/llm2_bot.cpython-39.pyc differ diff --git a/appservice/acp.py b/appservice/llm2/dockerfile similarity index 100% rename from appservice/acp.py rename to appservice/llm2/dockerfile diff --git a/appservice/llm2/llm2_bot.py b/appservice/llm2/llm2_bot.py new file mode 100644 index 0000000..0d4dbbc --- /dev/null +++ b/appservice/llm2/llm2_bot.py @@ -0,0 +1,52 @@ +import subprocess +import sys +try: + import ollama +except Exception: + subprocess.run([sys.executable, "-m", "pip", "install", "ollama"], check=False) +_OLLAMA_AVAILABLE = True + +model_name = 'llama2' + +class _DummyMessage: + def __init__(self, content): + self.content = content + +class _DummyResponse: + def __init__(self, text): + self.message = _DummyMessage(text) + +def chat(model, messages): + if _OLLAMA_AVAILABLE and hasattr(ollama, 'chat'): + return ollama.chat(model=model, messages=messages, stream=True) + install_msg = ( + "ollama is not available.\n" + "Install the Ollama runtime and Python client, then ensure the 'ollama' CLI is on your PATH.\n" + "Quick steps:\n" + " 1) Install the Ollama app / runtime: https://ollama.ai\n" + " 2) Install the Python client: python -m pip install ollama\n" + " 3) Verify you can run 'ollama' from your shell and import ollama in Python.\n" + ) + return _DummyResponse(install_msg) + +def get_ollama_response(prompt: str) -> str: + messages = [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": prompt} + ] + + stream = ollama.chat(model=model_name, messages=messages, stream=True) + + full_response = "" + for chunk in stream: + content = chunk.get("message", {}).get("content", "") + #print(content, end="", flush=True) # Stream to console + full_response += content + #print() # Newline after streaming + + return full_response + + +# For standalone testing +if __name__ == "__main__": + print(get_ollama_response("Hello!")) diff --git a/appservice/mcp.py b/appservice/llm2/requirements.txt similarity index 100% rename from appservice/mcp.py rename to appservice/llm2/requirements.txt diff --git a/appservice/main.py b/appservice/main.py new file mode 100644 index 0000000..2e5ac38 --- /dev/null +++ b/appservice/main.py @@ -0,0 +1,29 @@ +import sys +import os +import subprocess + +def _ensure_dependencies() -> None: + try: + from google import genai # noqa: F401 + except Exception: + subprocess.run([sys.executable, "-m", "pip", "install", "google-genai>=0.3.0"], check=False) + try: + import dotenv # noqa: F401 + except Exception: + subprocess.run([sys.executable, "-m", "pip", "install", "python-dotenv>=1.0.1"], check=False) + +sys.path.append(os.path.dirname(__file__)) + +if __name__ == "__main__": + _ensure_dependencies() + if os.getenv("RUN_BRIDGE", "0") == "1": + # Lazy install mautrix when running bridge + try: + import mautrix # noqa: F401 + except Exception: + subprocess.run([sys.executable, "-m", "pip", "install", "mautrix"], check=False) + from bridge_app import run_bridge_blocking + run_bridge_blocking() + else: + from router.message_router import start_conversation + start_conversation("Hi there!") diff --git a/appservice/router.py b/appservice/router.py deleted file mode 100644 index 1cdb7aa..0000000 --- a/appservice/router.py +++ /dev/null @@ -1,21 +0,0 @@ -from appservice.a2a import handle_a2a -from appservice.acp import handle_acp -from appservice.mcp import handle_mcp - -class MessageRouter: - def __init__(self, bridge, agent_manager): - self.bridge = bridge - self.agent_manager = agent_manager - - async def handle_message(self, evt): - content = evt.content.get("body", "") - sender = evt.sender - - if content.startswith("a2a:"): - await handle_a2a(self.bridge, evt, self.agent_manager) - elif content.startswith("mcp:"): - await handle_mcp(self.bridge, evt, self.agent_manager) - elif content.startswith("acp:"): - await handle_acp(self.bridge, evt, self.agent_manager) - else: - print(f"Ignoring: {content}") diff --git a/appservice/router/__pycache__/message_router.cpython-311.pyc b/appservice/router/__pycache__/message_router.cpython-311.pyc new file mode 100644 index 0000000..b95a365 Binary files /dev/null and b/appservice/router/__pycache__/message_router.cpython-311.pyc differ diff --git a/appservice/router/__pycache__/message_router.cpython-39.pyc b/appservice/router/__pycache__/message_router.cpython-39.pyc new file mode 100644 index 0000000..649a788 Binary files /dev/null and b/appservice/router/__pycache__/message_router.cpython-39.pyc differ diff --git a/appservice/router/message_router.py b/appservice/router/message_router.py new file mode 100644 index 0000000..19e363a --- /dev/null +++ b/appservice/router/message_router.py @@ -0,0 +1,46 @@ +from llm1.llm1_bot import get_gemini_response +from llm2.llm2_bot import get_ollama_response +import sys +import io + +# Ensure UTF-8 output on Windows consoles that default to cp1252 +try: + sys.stdout.reconfigure(encoding="utf-8") +except Exception: + try: + sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8") + except Exception: + pass + +def _choose_model_and_strip_command(text: str, last_model: str) -> (str, str): + if not isinstance(text, str): + return ("gemini", "") + lowered = text.lstrip() + if lowered.startswith("/gemini"): + return ("gemini", lowered.split(" ", 1)[1] if " " in lowered else "") + if lowered.startswith("/ollama") or lowered.startswith("/llama"): + return ("ollama", lowered.split(" ", 1)[1] if " " in lowered else "") + # Round-robin default when no explicit command + next_model = "ollama" if last_model == "gemini" else "gemini" + return (next_model, text) + +def start_conversation(initial_prompt="Hello!"): + turn = 0 + message = initial_prompt + last_model = "ollama" # so first implicit turn goes to gemini + + while True: + model, user_text = _choose_model_and_strip_command(message, last_model) + if model == "gemini": + print("\n🧠 Gemini says:") + message = get_gemini_response(user_text) + else: + print("\n🦙 Ollama says:") + message = get_ollama_response(user_text) + last_model = model + + print(message) + turn += 1 + + if turn > 3 or (isinstance(message, str) and "stop" in message.lower()): + break