Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/appservice/.env
Binary file added appservice/__pycache__/llm1_bot.cpython-39.pyc
Binary file not shown.
Binary file added appservice/__pycache__/llm2_bot.cpython-39.pyc
Binary file not shown.
Binary file added appservice/__pycache__/message_router.cpython-39.pyc
Binary file not shown.
16 changes: 0 additions & 16 deletions appservice/agent_manager.py

This file was deleted.

12 changes: 0 additions & 12 deletions appservice/base.py

This file was deleted.

84 changes: 84 additions & 0 deletions appservice/bridge_app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
import asyncio
import os
from typing import Tuple

from mautrix.appservice import AppService
from mautrix.client import Client
from mautrix.types import Event, MessageEvent, MessageType, RoomID, UserID

from llm1.llm1_bot import get_gemini_response
from llm2.llm2_bot import get_ollama_response


def _choose_model_and_strip(text: str, last_model: str) -> Tuple[str, str]:
if not isinstance(text, str):
return ("gemini", "")
lowered = text.lstrip()
if lowered.startswith("/gemini"):
return ("gemini", lowered.split(" ", 1)[1] if " " in lowered else "")
if lowered.startswith("/ollama") or lowered.startswith("/llama"):
return ("ollama", lowered.split(" ", 1)[1] if " " in lowered else "")
next_model = "ollama" if last_model == "gemini" else "gemini"
return (next_model, text)


class LLMBridge:
def __init__(self) -> None:
hs_address = os.getenv("SYNAPSE_ADDRESS", "http://localhost:8008")
domain = os.getenv("SYNAPSE_DOMAIN", "localhost")
as_id = os.getenv("AS_ID", "autonomoussphere")
as_token = os.getenv("AS_TOKEN", "YOUR_AS_TOKEN")
hs_token = os.getenv("HS_TOKEN", "YOUR_HS_TOKEN")
bot_localpart = os.getenv("AS_BOT", "_as_master")
bind_addr = os.getenv("AS_BIND_ADDR", "0.0.0.0")
bind_port = int(os.getenv("AS_PORT", "29333"))

self.appservice = AppService(
id=as_id,
as_token=as_token,
hs_token=hs_token,
server=hs_address,
appservice_host=bind_addr,
appservice_port=bind_port,
)
self.bot = self.appservice.intent
self.bot.user_id = UserID(f"@{bot_localpart}:{domain}")
self._last_model = "ollama"

@self.appservice.on(Event)
async def on_event(evt: Event) -> None:
if not isinstance(evt, MessageEvent):
return
if evt.content.msgtype != MessageType.TEXT:
return
if evt.sender == self.bot.user_id:
return

room_id: RoomID = evt.room_id
body: str = evt.content.body or ""
model, prompt = _choose_model_and_strip(body, self._last_model)
self._last_model = model

try:
if model == "gemini":
reply = get_gemini_response(prompt)
else:
reply = get_ollama_response(prompt)
except Exception as exc:
reply = f"Error from {model}: {exc}"

await self.bot.send_text(room_id, reply)

async def run(self) -> None:
await self.appservice.start()


def run_bridge_blocking() -> None:
bridge = LLMBridge()
asyncio.get_event_loop().run_until_complete(bridge.run())


if __name__ == "__main__":
run_bridge_blocking()


1 change: 1 addition & 0 deletions appservice/data/synapse
Submodule synapse added at be65a8
Binary file not shown.
Binary file added appservice/llm1/__pycache__/llm1_bot.cpython-39.pyc
Binary file not shown.
File renamed without changes.
26 changes: 26 additions & 0 deletions appservice/llm1/llm1_bot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from google import genai
import dotenv

dotenv.load_dotenv()

# The client gets the API key from the environment variable `GEMINI_API_KEY`.
api_key = dotenv.get_key(dotenv.find_dotenv(), "GEMINI_API_KEY")
client = genai.Client(api_key=api_key)

def get_gemini_response(prompt: str) -> str:
stream = client.models.generate_content_stream(
model="gemini-2.5-flash",
contents=prompt
)

full_response = ""
for chunk in stream:
if hasattr(chunk, 'text'):
#print(chunk.text, end="", flush=True) # Stream to console
full_response += chunk.text
#print() # Newline after streaming
return full_response

# For standalone testing
if __name__ == "__main__":
print(get_gemini_response("Explain how AI works in a few words"))
2 changes: 2 additions & 0 deletions appservice/llm1/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
google-genai>=0.3.0
python-dotenv>=1.0.1
Binary file not shown.
Binary file not shown.
File renamed without changes.
52 changes: 52 additions & 0 deletions appservice/llm2/llm2_bot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import subprocess
import sys
try:
import ollama
except Exception:
subprocess.run([sys.executable, "-m", "pip", "install", "ollama"], check=False)
_OLLAMA_AVAILABLE = True

model_name = 'llama2'

class _DummyMessage:
def __init__(self, content):
self.content = content

class _DummyResponse:
def __init__(self, text):
self.message = _DummyMessage(text)

def chat(model, messages):
if _OLLAMA_AVAILABLE and hasattr(ollama, 'chat'):
return ollama.chat(model=model, messages=messages, stream=True)
install_msg = (
"ollama is not available.\n"
"Install the Ollama runtime and Python client, then ensure the 'ollama' CLI is on your PATH.\n"
"Quick steps:\n"
" 1) Install the Ollama app / runtime: https://ollama.ai\n"
" 2) Install the Python client: python -m pip install ollama\n"
" 3) Verify you can run 'ollama' from your shell and import ollama in Python.\n"
)
return _DummyResponse(install_msg)

def get_ollama_response(prompt: str) -> str:
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]

stream = ollama.chat(model=model_name, messages=messages, stream=True)

full_response = ""
for chunk in stream:
content = chunk.get("message", {}).get("content", "")
#print(content, end="", flush=True) # Stream to console
full_response += content
#print() # Newline after streaming

return full_response


# For standalone testing
if __name__ == "__main__":
print(get_ollama_response("Hello!"))
File renamed without changes.
29 changes: 29 additions & 0 deletions appservice/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import sys
import os
import subprocess

def _ensure_dependencies() -> None:
try:
from google import genai # noqa: F401
except Exception:
subprocess.run([sys.executable, "-m", "pip", "install", "google-genai>=0.3.0"], check=False)
try:
import dotenv # noqa: F401
except Exception:
subprocess.run([sys.executable, "-m", "pip", "install", "python-dotenv>=1.0.1"], check=False)

sys.path.append(os.path.dirname(__file__))

if __name__ == "__main__":
_ensure_dependencies()
if os.getenv("RUN_BRIDGE", "0") == "1":
# Lazy install mautrix when running bridge
try:
import mautrix # noqa: F401
except Exception:
subprocess.run([sys.executable, "-m", "pip", "install", "mautrix"], check=False)
from bridge_app import run_bridge_blocking
run_bridge_blocking()
else:
from router.message_router import start_conversation
start_conversation("Hi there!")
21 changes: 0 additions & 21 deletions appservice/router.py

This file was deleted.

Binary file not shown.
Binary file not shown.
46 changes: 46 additions & 0 deletions appservice/router/message_router.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
from llm1.llm1_bot import get_gemini_response
from llm2.llm2_bot import get_ollama_response
import sys
import io

# Ensure UTF-8 output on Windows consoles that default to cp1252
try:
sys.stdout.reconfigure(encoding="utf-8")
except Exception:
try:
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
except Exception:
pass

def _choose_model_and_strip_command(text: str, last_model: str) -> (str, str):
if not isinstance(text, str):
return ("gemini", "")
lowered = text.lstrip()
if lowered.startswith("/gemini"):
return ("gemini", lowered.split(" ", 1)[1] if " " in lowered else "")
if lowered.startswith("/ollama") or lowered.startswith("/llama"):
return ("ollama", lowered.split(" ", 1)[1] if " " in lowered else "")
# Round-robin default when no explicit command
next_model = "ollama" if last_model == "gemini" else "gemini"
return (next_model, text)

def start_conversation(initial_prompt="Hello!"):
turn = 0
message = initial_prompt
last_model = "ollama" # so first implicit turn goes to gemini

while True:
model, user_text = _choose_model_and_strip_command(message, last_model)
if model == "gemini":
print("\n🧠 Gemini says:")
message = get_gemini_response(user_text)
else:
print("\n🦙 Ollama says:")
message = get_ollama_response(user_text)
last_model = model

print(message)
turn += 1

if turn > 3 or (isinstance(message, str) and "stop" in message.lower()):
break